KVM/riscv changes for 6.8 part #1

- KVM_GET_REG_LIST improvement for vector registers
 - Generate ISA extension reg_list using macros in get-reg-list selftest
 - Steal time account support along with selftest
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEZdn75s5e6LHDQ+f/rUjsVaLHLAcFAmWQ+cgACgkQrUjsVaLH
 LAckBA//R4X9L5ugfPdDunp3ntjZXmNtBS5pM2jD+UvaoFn2kOA1o5kOD5mXluuh
 0imNjVuzlrX7XoAATQ4BoeoXg0whDbnv/8TE13KqSl1PfNziH2p5YD2DuHXPST3B
 V2VHrGACZ4wN074Whztl0oYI72obGnmpcsiglifkeCvRPerghHuDu40eUaWvCmgD
 DPwT+bjBgxkDZ4IheyytUrPql6reALh1Qo1vfj0FsJAgj+MAqQeD8n6rixSPnOdE
 9XXa4jdu7ycDp675VX/DVEWsNBQGPrsRK/uCiMksO36td+wLCKpAkvX95mE0w/L8
 qFJ+dN1c+1ZkjooHdVLfq2MjxaIRwmIowk7SeJbpvGIf/zG3r7eany7eXJT0+NjO
 22j5FY2z1NqcSG6Fazx76Qp2vVBVbxHShP9h7d6VTZYS7XENjmV6IWHpTSuSF8+n
 puj8Nf5C7WuqbySirSgQndDuKawn9myqfXXEoAuSiZ+kVyYEl8QnXm2gAIcxRDHX
 x+NDPMv0DpMBRO9qa/tXeqgNue/XOTJwgbmXzAlCNff3U7hPIHJ/5aZiJ/Re5TeE
 DxiU9AmIsNN2Bh0csS/wQbdScIqkOdOiDYEwT1DXOJWpmhiyCW7vR8ltaIuMJ4vP
 DtlfuUlSe4aml957nAiqqyjQAY/7gqmpoaGwu+lmrOX1K7fdtF0=
 =FeiG
 -----END PGP SIGNATURE-----

Merge tag 'kvm-riscv-6.8-1' of https://github.com/kvm-riscv/linux into HEAD

KVM/riscv changes for 6.8 part #1

- KVM_GET_REG_LIST improvement for vector registers
- Generate ISA extension reg_list using macros in get-reg-list selftest
- Steal time account support along with selftest
This commit is contained in:
Paolo Bonzini 2024-01-02 13:19:40 -05:00
commit 9cc52627c7
294 changed files with 3934 additions and 2354 deletions

View File

@ -191,6 +191,10 @@ Gao Xiang <xiang@kernel.org> <gaoxiang25@huawei.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@aol.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@linux.alibaba.com>
Gao Xiang <xiang@kernel.org> <hsiangkao@redhat.com>
Geliang Tang <geliang.tang@linux.dev> <geliang.tang@suse.com>
Geliang Tang <geliang.tang@linux.dev> <geliangtang@xiaomi.com>
Geliang Tang <geliang.tang@linux.dev> <geliangtang@gmail.com>
Geliang Tang <geliang.tang@linux.dev> <geliangtang@163.com>
Georgi Djakov <djakov@kernel.org> <georgi.djakov@linaro.org>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <geraldsc@de.ibm.com>
Gerald Schaefer <gerald.schaefer@linux.ibm.com> <gerald.schaefer@de.ibm.com>

View File

@ -3985,9 +3985,9 @@
vulnerability. System may allow data leaks with this
option.
no-steal-acc [X86,PV_OPS,ARM64,PPC/PSERIES] Disable paravirtualized
steal time accounting. steal time is computed, but
won't influence scheduler behaviour
no-steal-acc [X86,PV_OPS,ARM64,PPC/PSERIES,RISCV] Disable
paravirtualized steal time accounting. steal time is
computed, but won't influence scheduler behaviour
nosync [HW,M68K] Disables sync negotiation for all devices.

View File

@ -15,9 +15,11 @@ allOf:
properties:
compatible:
enum:
- fsl,imx23-ocotp
- fsl,imx28-ocotp
items:
- enum:
- fsl,imx23-ocotp
- fsl,imx28-ocotp
- const: fsl,ocotp
reg:
maxItems: 1
@ -35,7 +37,7 @@ unevaluatedProperties: false
examples:
- |
ocotp: efuse@8002c000 {
compatible = "fsl,imx28-ocotp";
compatible = "fsl,imx28-ocotp", "fsl,ocotp";
#address-cells = <1>;
#size-cells = <1>;
reg = <0x8002c000 0x2000>;

View File

@ -6050,10 +6050,8 @@ M: Mikulas Patocka <mpatocka@redhat.com>
M: dm-devel@lists.linux.dev
L: dm-devel@lists.linux.dev
S: Maintained
W: http://sources.redhat.com/dm
Q: http://patchwork.kernel.org/project/dm-devel/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm.git
T: quilt http://people.redhat.com/agk/patches/linux/editing/
F: Documentation/admin-guide/device-mapper/
F: drivers/md/Kconfig
F: drivers/md/Makefile
@ -9526,6 +9524,7 @@ F: drivers/bus/hisi_lpc.c
HISILICON NETWORK SUBSYSTEM 3 DRIVER (HNS3)
M: Yisen Zhuang <yisen.zhuang@huawei.com>
M: Salil Mehta <salil.mehta@huawei.com>
M: Jijie Shao <shaojijie@huawei.com>
L: netdev@vger.kernel.org
S: Maintained
W: http://www.hisilicon.com

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 7
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*

View File

@ -49,7 +49,6 @@ config ARC
select OF
select OF_EARLY_FLATTREE
select PCI_SYSCALL if PCI
select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING
select HAVE_ARCH_JUMP_LABEL if ISA_ARCV2 && !CPU_ENDIAN_BE32
select TRACE_IRQFLAGS_SUPPORT
@ -232,10 +231,6 @@ config ARC_CACHE_PAGES
Note that Global I/D ENABLE + Per Page DISABLE works but corollary
Global DISABLE + Per Page ENABLE won't work
config ARC_CACHE_VIPT_ALIASING
bool "Support VIPT Aliasing D$"
depends on ARC_HAS_DCACHE && ISA_ARCOMPACT
endif #ARC_CACHE
config ARC_HAS_ICCM

View File

@ -44,31 +44,10 @@ void dma_cache_wback(phys_addr_t start, unsigned long sz);
#define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */
#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
#define flush_cache_mm(mm) /* called on munmap/exit */
#define flush_cache_range(mm, u_vstart, u_vend)
#define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */
#else /* VIPT aliasing dcache */
/* To clear out stale userspace mappings */
void flush_cache_mm(struct mm_struct *mm);
void flush_cache_range(struct vm_area_struct *vma,
unsigned long start,unsigned long end);
void flush_cache_page(struct vm_area_struct *vma,
unsigned long user_addr, unsigned long page);
/*
* To make sure that userspace mapping is flushed to memory before
* get_user_pages() uses a kernel mapping to access the page
*/
#define ARCH_HAS_FLUSH_ANON_PAGE
void flush_anon_page(struct vm_area_struct *vma,
struct page *page, unsigned long u_vaddr);
#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
/*
* A new pagecache page has PG_arch_1 clear - thus dcache dirty by default
* This works around some PIO based drivers which don't call flush_dcache_page
@ -76,28 +55,6 @@ void flush_anon_page(struct vm_area_struct *vma,
*/
#define PG_dc_clean PG_arch_1
#define CACHE_COLORS_NUM 4
#define CACHE_COLORS_MSK (CACHE_COLORS_NUM - 1)
#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & CACHE_COLORS_MSK)
/*
* Simple wrapper over config option
* Bootup code ensures that hardware matches kernel configuration
*/
static inline int cache_is_vipt_aliasing(void)
{
return IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
}
/*
* checks if two addresses (after page aligning) index into same cache set
*/
#define addr_not_cache_congruent(addr1, addr2) \
({ \
cache_is_vipt_aliasing() ? \
(CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0; \
})
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \

View File

@ -291,4 +291,36 @@
/* M = 8-1 N = 8 */
.endm
.macro SAVE_ABI_CALLEE_REGS
push r13
push r14
push r15
push r16
push r17
push r18
push r19
push r20
push r21
push r22
push r23
push r24
push r25
.endm
.macro RESTORE_ABI_CALLEE_REGS
pop r25
pop r24
pop r23
pop r22
pop r21
pop r20
pop r19
pop r18
pop r17
pop r16
pop r15
pop r14
pop r13
.endm
#endif

View File

@ -33,6 +33,91 @@
#include <asm/irqflags-compact.h>
#include <asm/thread_info.h> /* For THREAD_SIZE */
/* Note on the LD/ST addr modes with addr reg wback
*
* LD.a same as LD.aw
*
* LD.a reg1, [reg2, x] => Pre Incr
* Eff Addr for load = [reg2 + x]
*
* LD.ab reg1, [reg2, x] => Post Incr
* Eff Addr for load = [reg2]
*/
.macro PUSHAX aux
lr r9, [\aux]
push r9
.endm
.macro POPAX aux
pop r9
sr r9, [\aux]
.endm
.macro SAVE_R0_TO_R12
push r0
push r1
push r2
push r3
push r4
push r5
push r6
push r7
push r8
push r9
push r10
push r11
push r12
.endm
.macro RESTORE_R12_TO_R0
pop r12
pop r11
pop r10
pop r9
pop r8
pop r7
pop r6
pop r5
pop r4
pop r3
pop r2
pop r1
pop r0
.endm
.macro SAVE_ABI_CALLEE_REGS
push r13
push r14
push r15
push r16
push r17
push r18
push r19
push r20
push r21
push r22
push r23
push r24
push r25
.endm
.macro RESTORE_ABI_CALLEE_REGS
pop r25
pop r24
pop r23
pop r22
pop r21
pop r20
pop r19
pop r18
pop r17
pop r16
pop r15
pop r14
pop r13
.endm
/*--------------------------------------------------------------
* Switch to Kernel Mode stack if SP points to User Mode stack
*
@ -235,7 +320,7 @@
SWITCH_TO_KERNEL_STK
PUSH 0x003\LVL\()abcd /* Dummy ECR */
st.a 0x003\LVL\()abcd, [sp, -4] /* Dummy ECR */
sub sp, sp, 8 /* skip orig_r0 (not needed)
skip pt_regs->sp, already saved above */

View File

@ -21,114 +21,12 @@
#include <asm/entry-arcv2.h>
#endif
/* Note on the LD/ST addr modes with addr reg wback
*
* LD.a same as LD.aw
*
* LD.a reg1, [reg2, x] => Pre Incr
* Eff Addr for load = [reg2 + x]
*
* LD.ab reg1, [reg2, x] => Post Incr
* Eff Addr for load = [reg2]
*/
.macro PUSH reg
st.a \reg, [sp, -4]
.endm
.macro PUSHAX aux
lr r9, [\aux]
PUSH r9
.endm
.macro POP reg
ld.ab \reg, [sp, 4]
.endm
.macro POPAX aux
POP r9
sr r9, [\aux]
.endm
/*--------------------------------------------------------------
* Helpers to save/restore Scratch Regs:
* used by Interrupt/Exception Prologue/Epilogue
*-------------------------------------------------------------*/
.macro SAVE_R0_TO_R12
PUSH r0
PUSH r1
PUSH r2
PUSH r3
PUSH r4
PUSH r5
PUSH r6
PUSH r7
PUSH r8
PUSH r9
PUSH r10
PUSH r11
PUSH r12
.endm
.macro RESTORE_R12_TO_R0
POP r12
POP r11
POP r10
POP r9
POP r8
POP r7
POP r6
POP r5
POP r4
POP r3
POP r2
POP r1
POP r0
.endm
/*--------------------------------------------------------------
* Helpers to save/restore callee-saved regs:
* used by several macros below
*-------------------------------------------------------------*/
.macro SAVE_R13_TO_R25
PUSH r13
PUSH r14
PUSH r15
PUSH r16
PUSH r17
PUSH r18
PUSH r19
PUSH r20
PUSH r21
PUSH r22
PUSH r23
PUSH r24
PUSH r25
.endm
.macro RESTORE_R25_TO_R13
POP r25
POP r24
POP r23
POP r22
POP r21
POP r20
POP r19
POP r18
POP r17
POP r16
POP r15
POP r14
POP r13
.endm
/*
* save user mode callee regs as struct callee_regs
* - needed by fork/do_signal/unaligned-access-emulation.
*/
.macro SAVE_CALLEE_SAVED_USER
SAVE_R13_TO_R25
SAVE_ABI_CALLEE_REGS
.endm
/*
@ -136,18 +34,18 @@
* - could have been changed by ptrace tracer or unaligned-access fixup
*/
.macro RESTORE_CALLEE_SAVED_USER
RESTORE_R25_TO_R13
RESTORE_ABI_CALLEE_REGS
.endm
/*
* save/restore kernel mode callee regs at the time of context switch
*/
.macro SAVE_CALLEE_SAVED_KERNEL
SAVE_R13_TO_R25
SAVE_ABI_CALLEE_REGS
.endm
.macro RESTORE_CALLEE_SAVED_KERNEL
RESTORE_R25_TO_R13
RESTORE_ABI_CALLEE_REGS
.endm
/*--------------------------------------------------------------

View File

@ -10,6 +10,13 @@
#include <linux/types.h>
#include <asm-generic/pgtable-nopmd.h>
/*
* Hugetlb definitions.
*/
#define HPAGE_SHIFT PMD_SHIFT
#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
static inline pte_t pmd_pte(pmd_t pmd)
{
return __pte(pmd_val(pmd));

View File

@ -54,6 +54,10 @@ struct pt_regs {
ecr_reg ecr;
};
struct callee_regs {
unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
};
#define MAX_REG_OFFSET offsetof(struct pt_regs, ecr)
#else
@ -92,16 +96,14 @@ struct pt_regs {
unsigned long status32;
};
#define MAX_REG_OFFSET offsetof(struct pt_regs, status32)
#endif
/* Callee saved registers - need to be saved only when you are scheduled out */
struct callee_regs {
unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
};
#define MAX_REG_OFFSET offsetof(struct pt_regs, status32)
#endif
#define instruction_pointer(regs) ((regs)->ret)
#define profile_pc(regs) instruction_pointer(regs)

View File

@ -153,7 +153,7 @@ static int arcv2_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
{
int n = 0;
#ifdef CONFIG_ISA_ARCV2
const char *release, *cpu_nm, *isa_nm = "ARCv2";
const char *release = "", *cpu_nm = "HS38", *isa_nm = "ARCv2";
int dual_issue = 0, dual_enb = 0, mpy_opt, present;
int bpu_full, bpu_cache, bpu_pred, bpu_ret_stk;
char mpy_nm[16], lpb_nm[32];
@ -172,8 +172,6 @@ static int arcv2_mumbojumbo(int c, struct cpuinfo_arc *info, char *buf, int len)
* releases only update it.
*/
cpu_nm = "HS38";
if (info->arcver > 0x50 && info->arcver <= 0x53) {
release = arc_hs_rel[info->arcver - 0x51].str;
} else {

View File

@ -62,7 +62,7 @@ struct rt_sigframe {
unsigned int sigret_magic;
};
static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
static int save_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
{
int err = 0;
#ifndef CONFIG_ISA_ARCOMPACT
@ -75,12 +75,12 @@ static int save_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
#else
v2abi.r58 = v2abi.r59 = 0;
#endif
err = __copy_to_user(&mctx->v2abi, &v2abi, sizeof(v2abi));
err = __copy_to_user(&mctx->v2abi, (void const *)&v2abi, sizeof(v2abi));
#endif
return err;
}
static int restore_arcv2_regs(struct sigcontext *mctx, struct pt_regs *regs)
static int restore_arcv2_regs(struct sigcontext __user *mctx, struct pt_regs *regs)
{
int err = 0;
#ifndef CONFIG_ISA_ARCOMPACT

View File

@ -145,10 +145,9 @@ dc_chk:
p_dc->sz_k = 1 << (dbcr.sz - 1);
n += scnprintf(buf + n, len - n,
"D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s%s\n",
"D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s\n",
p_dc->sz_k, assoc, p_dc->line_len,
vipt ? "VIPT" : "PIPT",
p_dc->colors > 1 ? " aliasing" : "",
IS_USED_CFG(CONFIG_ARC_HAS_DCACHE));
slc_chk:
@ -703,51 +702,10 @@ static inline void arc_slc_enable(void)
* Exported APIs
*/
/*
* Handle cache congruency of kernel and userspace mappings of page when kernel
* writes-to/reads-from
*
* The idea is to defer flushing of kernel mapping after a WRITE, possible if:
* -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
* -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
* -In SMP, if hardware caches are coherent
*
* There's a corollary case, where kernel READs from a userspace mapped page.
* If the U-mapping is not congruent to K-mapping, former needs flushing.
*/
void flush_dcache_folio(struct folio *folio)
{
struct address_space *mapping;
if (!cache_is_vipt_aliasing()) {
clear_bit(PG_dc_clean, &folio->flags);
return;
}
/* don't handle anon pages here */
mapping = folio_flush_mapping(folio);
if (!mapping)
return;
/*
* pagecache page, file not yet mapped to userspace
* Make a note that K-mapping is dirty
*/
if (!mapping_mapped(mapping)) {
clear_bit(PG_dc_clean, &folio->flags);
} else if (folio_mapped(folio)) {
/* kernel reading from page with U-mapping */
phys_addr_t paddr = (unsigned long)folio_address(folio);
unsigned long vaddr = folio_pos(folio);
/*
* vaddr is not actually the virtual address, but is
* congruent to every user mapping.
*/
if (addr_not_cache_congruent(paddr, vaddr))
__flush_dcache_pages(paddr, vaddr,
folio_nr_pages(folio));
}
clear_bit(PG_dc_clean, &folio->flags);
return;
}
EXPORT_SYMBOL(flush_dcache_folio);
@ -921,44 +879,6 @@ noinline void flush_cache_all(void)
}
#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
void flush_cache_mm(struct mm_struct *mm)
{
flush_cache_all();
}
void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
unsigned long pfn)
{
phys_addr_t paddr = pfn << PAGE_SHIFT;
u_vaddr &= PAGE_MASK;
__flush_dcache_pages(paddr, u_vaddr, 1);
if (vma->vm_flags & VM_EXEC)
__inv_icache_pages(paddr, u_vaddr, 1);
}
void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
flush_cache_all();
}
void flush_anon_page(struct vm_area_struct *vma, struct page *page,
unsigned long u_vaddr)
{
/* TBD: do we really need to clear the kernel mapping */
__flush_dcache_pages((phys_addr_t)page_address(page), u_vaddr, 1);
__flush_dcache_pages((phys_addr_t)page_address(page),
(phys_addr_t)page_address(page), 1);
}
#endif
void copy_user_highpage(struct page *to, struct page *from,
unsigned long u_vaddr, struct vm_area_struct *vma)
{
@ -966,46 +886,11 @@ void copy_user_highpage(struct page *to, struct page *from,
struct folio *dst = page_folio(to);
void *kfrom = kmap_atomic(from);
void *kto = kmap_atomic(to);
int clean_src_k_mappings = 0;
/*
* If SRC page was already mapped in userspace AND it's U-mapping is
* not congruent with K-mapping, sync former to physical page so that
* K-mapping in memcpy below, sees the right data
*
* Note that while @u_vaddr refers to DST page's userspace vaddr, it is
* equally valid for SRC page as well
*
* For !VIPT cache, all of this gets compiled out as
* addr_not_cache_congruent() is 0
*/
if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
__flush_dcache_pages((unsigned long)kfrom, u_vaddr, 1);
clean_src_k_mappings = 1;
}
copy_page(kto, kfrom);
/*
* Mark DST page K-mapping as dirty for a later finalization by
* update_mmu_cache(). Although the finalization could have been done
* here as well (given that both vaddr/paddr are available).
* But update_mmu_cache() already has code to do that for other
* non copied user pages (e.g. read faults which wire in pagecache page
* directly).
*/
clear_bit(PG_dc_clean, &dst->flags);
/*
* if SRC was already usermapped and non-congruent to kernel mapping
* sync the kernel mapping back to physical page
*/
if (clean_src_k_mappings) {
__flush_dcache_pages((unsigned long)kfrom,
(unsigned long)kfrom, 1);
} else {
clear_bit(PG_dc_clean, &src->flags);
}
clear_bit(PG_dc_clean, &src->flags);
kunmap_atomic(kto);
kunmap_atomic(kfrom);
@ -1140,17 +1025,8 @@ static noinline void __init arc_cache_init_master(void)
dc->line_len, L1_CACHE_BYTES);
/* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
if (is_isa_arcompact()) {
int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
if (dc->colors > 1) {
if (!handled)
panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
if (CACHE_COLORS_NUM != dc->colors)
panic("CACHE_COLORS_NUM not optimized for config\n");
} else if (handled && dc->colors == 1) {
panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
}
if (is_isa_arcompact() && dc->colors > 1) {
panic("Aliasing VIPT cache not supported\n");
}
}

View File

@ -14,10 +14,6 @@
#include <asm/cacheflush.h>
#define COLOUR_ALIGN(addr, pgoff) \
((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
(((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
/*
* Ensure that shared mappings are correctly aligned to
* avoid aliasing issues with VIPT caches.
@ -31,21 +27,13 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
int do_align = 0;
int aliasing = cache_is_vipt_aliasing();
struct vm_unmapped_area_info info;
/*
* We only need to do colour alignment if D cache aliases.
*/
if (aliasing)
do_align = filp || (flags & MAP_SHARED);
/*
* We enforce the MAP_FIXED case.
*/
if (flags & MAP_FIXED) {
if (aliasing && flags & MAP_SHARED &&
if (flags & MAP_SHARED &&
(addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
return -EINVAL;
return addr;
@ -55,10 +43,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
return -ENOMEM;
if (addr) {
if (do_align)
addr = COLOUR_ALIGN(addr, pgoff);
else
addr = PAGE_ALIGN(addr);
addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr &&
@ -70,7 +55,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
info.align_mask = 0;
info.align_offset = pgoff << PAGE_SHIFT;
return vm_unmapped_area(&info);
}

View File

@ -478,21 +478,15 @@ void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
create_tlb(vma, vaddr, ptep);
if (page == ZERO_PAGE(0)) {
if (page == ZERO_PAGE(0))
return;
}
/*
* Exec page : Independent of aliasing/page-color considerations,
* since icache doesn't snoop dcache on ARC, any dirty
* K-mapping of a code page needs to be wback+inv so that
* icache fetch by userspace sees code correctly.
* !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
* so userspace sees the right data.
* (Avoids the flush for Non-exec + congruent mapping case)
* For executable pages, since icache doesn't snoop dcache, any
* dirty K-mapping of a code page needs to be wback+inv so that
* icache fetch by userspace sees code correctly.
*/
if ((vma->vm_flags & VM_EXEC) ||
addr_not_cache_congruent(paddr, vaddr)) {
if (vma->vm_flags & VM_EXEC) {
struct folio *folio = page_folio(page);
int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);
if (dirty) {

View File

@ -359,6 +359,7 @@
<SYSC_IDLE_NO>,
<SYSC_IDLE_SMART>,
<SYSC_IDLE_SMART_WKUP>;
ti,sysc-delay-us = <2>;
clocks = <&l3s_clkctrl AM3_L3S_USB_OTG_HS_CLKCTRL 0>;
clock-names = "fck";
#address-cells = <1>;

View File

@ -147,7 +147,7 @@
l3-noc@44000000 {
compatible = "ti,dra7-l3-noc";
reg = <0x44000000 0x1000>,
reg = <0x44000000 0x1000000>,
<0x45000000 0x1000>;
interrupts-extended = <&crossbar_mpu GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
<&wakeupgen GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>;

View File

@ -793,11 +793,16 @@ void __init omap_soc_device_init(void)
soc_dev_attr->machine = soc_name;
soc_dev_attr->family = omap_get_family();
if (!soc_dev_attr->family) {
kfree(soc_dev_attr);
return;
}
soc_dev_attr->revision = soc_rev;
soc_dev_attr->custom_attr_group = omap_soc_groups[0];
soc_dev = soc_device_register(soc_dev_attr);
if (IS_ERR(soc_dev)) {
kfree(soc_dev_attr->family);
kfree(soc_dev_attr);
return;
}

View File

@ -68,10 +68,7 @@
&emac0 {
pinctrl-names = "default";
pinctrl-0 = <&ext_rgmii_pins>;
phy-mode = "rgmii";
phy-handle = <&ext_rgmii_phy>;
allwinner,rx-delay-ps = <3100>;
allwinner,tx-delay-ps = <700>;
status = "okay";
};

View File

@ -13,6 +13,9 @@
};
&emac0 {
allwinner,rx-delay-ps = <3100>;
allwinner,tx-delay-ps = <700>;
phy-mode = "rgmii";
phy-supply = <&reg_dcdce>;
};

View File

@ -13,6 +13,8 @@
};
&emac0 {
allwinner,tx-delay-ps = <700>;
phy-mode = "rgmii-rxid";
phy-supply = <&reg_dldo1>;
};

View File

@ -238,6 +238,7 @@
mt6360: pmic@34 {
compatible = "mediatek,mt6360";
reg = <0x34>;
interrupt-parent = <&pio>;
interrupts = <128 IRQ_TYPE_EDGE_FALLING>;
interrupt-names = "IRQB";
interrupt-controller;

View File

@ -44,9 +44,6 @@
return sys_ni_syscall(); \
}
#define COMPAT_SYS_NI(name) \
SYSCALL_ALIAS(__arm64_compat_sys_##name, sys_ni_posix_timers);
#endif /* CONFIG_COMPAT */
#define __SYSCALL_DEFINEx(x, name, ...) \
@ -81,6 +78,5 @@
}
asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused);
#define SYS_NI(name) SYSCALL_ALIAS(__arm64_sys_##name, sys_ni_posix_timers);
#endif /* __ASM_SYSCALL_WRAPPER_H */

View File

@ -410,7 +410,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
kvm_timer_vcpu_terminate(vcpu);
kvm_pmu_vcpu_destroy(vcpu);
kvm_vgic_vcpu_destroy(vcpu);
kvm_arm_vcpu_destroy(vcpu);
}

View File

@ -368,7 +368,7 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
vgic_v4_teardown(kvm);
}
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
@ -379,29 +379,39 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
vgic_flush_pending_lpis(vcpu);
INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
vgic_unregister_redist_iodev(vcpu);
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
}
}
static void __kvm_vgic_destroy(struct kvm *kvm)
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu *vcpu;
unsigned long i;
struct kvm *kvm = vcpu->kvm;
lockdep_assert_held(&kvm->arch.config_lock);
vgic_debug_destroy(kvm);
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_vgic_vcpu_destroy(vcpu);
kvm_vgic_dist_destroy(kvm);
mutex_lock(&kvm->slots_lock);
__kvm_vgic_vcpu_destroy(vcpu);
mutex_unlock(&kvm->slots_lock);
}
void kvm_vgic_destroy(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
unsigned long i;
mutex_lock(&kvm->slots_lock);
vgic_debug_destroy(kvm);
kvm_for_each_vcpu(i, vcpu, kvm)
__kvm_vgic_vcpu_destroy(vcpu);
mutex_lock(&kvm->arch.config_lock);
__kvm_vgic_destroy(kvm);
kvm_vgic_dist_destroy(kvm);
mutex_unlock(&kvm->arch.config_lock);
mutex_unlock(&kvm->slots_lock);
}
/**
@ -469,25 +479,26 @@ int kvm_vgic_map_resources(struct kvm *kvm)
type = VGIC_V3;
}
if (ret) {
__kvm_vgic_destroy(kvm);
if (ret)
goto out;
}
dist->ready = true;
dist_base = dist->vgic_dist_base;
mutex_unlock(&kvm->arch.config_lock);
ret = vgic_register_dist_iodev(kvm, dist_base, type);
if (ret) {
if (ret)
kvm_err("Unable to register VGIC dist MMIO regions\n");
kvm_vgic_destroy(kvm);
}
mutex_unlock(&kvm->slots_lock);
return ret;
goto out_slots;
out:
mutex_unlock(&kvm->arch.config_lock);
out_slots:
mutex_unlock(&kvm->slots_lock);
if (ret)
kvm_vgic_destroy(kvm);
return ret;
}

View File

@ -820,7 +820,7 @@ out_unlock:
return ret;
}
static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
{
struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
@ -833,6 +833,8 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
unsigned long c;
int ret = 0;
lockdep_assert_held(&kvm->slots_lock);
kvm_for_each_vcpu(c, vcpu, kvm) {
ret = vgic_register_redist_iodev(vcpu);
if (ret)

View File

@ -241,6 +241,7 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq);
int vgic_v3_save_pending_tables(struct kvm *kvm);
int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count);
int vgic_register_redist_iodev(struct kvm_vcpu *vcpu);
void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu);
bool vgic_v3_check_base(struct kvm *kvm);
void vgic_v3_load(struct kvm_vcpu *vcpu);

View File

@ -724,6 +724,25 @@ config COMPAT
If you want to execute 32-bit userspace applications, say Y.
config PARAVIRT
bool "Enable paravirtualization code"
depends on RISCV_SBI
help
This changes the kernel so it can modify itself when it is run
under a hypervisor, potentially improving performance significantly
over full virtualization.
config PARAVIRT_TIME_ACCOUNTING
bool "Paravirtual steal time accounting"
depends on PARAVIRT
help
Select this option to enable fine granularity task steal time
accounting. Time spent executing other tasks in parallel with
the current vCPU is discounted from the vCPU power. To account for
that, there can be a small performance impact.
If in doubt, say N here.
config RELOCATABLE
bool "Build a relocatable kernel"
depends on MMU && 64BIT && !XIP_KERNEL

View File

@ -41,6 +41,7 @@
KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_HFENCE \
KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(6)
enum kvm_riscv_hfence_type {
KVM_RISCV_HFENCE_UNKNOWN = 0,
@ -262,6 +263,12 @@ struct kvm_vcpu_arch {
/* 'static' configurations which are set only once */
struct kvm_vcpu_config cfg;
/* SBI steal-time accounting */
struct {
gpa_t shmem;
u64 last_steal;
} sta;
};
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
@ -370,4 +377,7 @@ bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask);
void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu);
#endif /* __RISCV_KVM_HOST_H__ */

View File

@ -15,9 +15,10 @@
#define KVM_SBI_VERSION_MINOR 0
enum kvm_riscv_sbi_ext_status {
KVM_RISCV_SBI_EXT_UNINITIALIZED,
KVM_RISCV_SBI_EXT_AVAILABLE,
KVM_RISCV_SBI_EXT_UNAVAILABLE,
KVM_RISCV_SBI_EXT_STATUS_UNINITIALIZED,
KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE,
KVM_RISCV_SBI_EXT_STATUS_ENABLED,
KVM_RISCV_SBI_EXT_STATUS_DISABLED,
};
struct kvm_vcpu_sbi_context {
@ -36,7 +37,7 @@ struct kvm_vcpu_sbi_extension {
unsigned long extid_start;
unsigned long extid_end;
bool default_unavail;
bool default_disabled;
/**
* SBI extension handler. It can be defined for a given extension or group of
@ -59,11 +60,21 @@ int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg);
int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg);
int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg);
int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg);
const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
struct kvm_vcpu *vcpu, unsigned long extid);
bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx);
int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run);
void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu, unsigned long reg_num,
unsigned long *reg_val);
int kvm_riscv_vcpu_set_reg_sbi_sta(struct kvm_vcpu *vcpu, unsigned long reg_num,
unsigned long reg_val);
#ifdef CONFIG_RISCV_SBI_V01
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01;
#endif
@ -74,6 +85,7 @@ extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental;
extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor;

View File

@ -0,0 +1,28 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_RISCV_PARAVIRT_H
#define _ASM_RISCV_PARAVIRT_H
#ifdef CONFIG_PARAVIRT
#include <linux/static_call_types.h>
struct static_key;
extern struct static_key paravirt_steal_enabled;
extern struct static_key paravirt_steal_rq_enabled;
u64 dummy_steal_clock(int cpu);
DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock);
static inline u64 paravirt_steal_clock(int cpu)
{
return static_call(pv_steal_clock)(cpu);
}
int __init pv_time_init(void);
#else
#define pv_time_init() do {} while (0)
#endif /* CONFIG_PARAVIRT */
#endif /* _ASM_RISCV_PARAVIRT_H */

View File

@ -0,0 +1 @@
#include <asm/paravirt.h>

View File

@ -31,6 +31,7 @@ enum sbi_ext_id {
SBI_EXT_SRST = 0x53525354,
SBI_EXT_PMU = 0x504D55,
SBI_EXT_DBCN = 0x4442434E,
SBI_EXT_STA = 0x535441,
/* Experimentals extensions must lie within this range */
SBI_EXT_EXPERIMENTAL_START = 0x08000000,
@ -243,6 +244,22 @@ enum sbi_ext_dbcn_fid {
SBI_EXT_DBCN_CONSOLE_WRITE_BYTE = 2,
};
/* SBI STA (steal-time accounting) extension */
enum sbi_ext_sta_fid {
SBI_EXT_STA_STEAL_TIME_SET_SHMEM = 0,
};
struct sbi_sta_struct {
__le32 sequence;
__le32 flags;
__le64 steal;
u8 preempted;
u8 pad[47];
} __packed;
#define SBI_STA_SHMEM_DISABLE -1
/* SBI spec version fields */
#define SBI_SPEC_VERSION_DEFAULT 0x1
#define SBI_SPEC_VERSION_MAJOR_SHIFT 24
#define SBI_SPEC_VERSION_MAJOR_MASK 0x7f

View File

@ -46,9 +46,6 @@ asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *);
return sys_ni_syscall(); \
}
#define COMPAT_SYS_NI(name) \
SYSCALL_ALIAS(__riscv_compat_sys_##name, sys_ni_posix_timers);
#endif /* CONFIG_COMPAT */
#define __SYSCALL_DEFINEx(x, name, ...) \
@ -82,6 +79,4 @@ asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *);
return sys_ni_syscall(); \
}
#define SYS_NI(name) SYSCALL_ALIAS(__riscv_sys_##name, sys_ni_posix_timers);
#endif /* __ASM_SYSCALL_WRAPPER_H */

View File

@ -157,9 +157,16 @@ enum KVM_RISCV_SBI_EXT_ID {
KVM_RISCV_SBI_EXT_EXPERIMENTAL,
KVM_RISCV_SBI_EXT_VENDOR,
KVM_RISCV_SBI_EXT_DBCN,
KVM_RISCV_SBI_EXT_STA,
KVM_RISCV_SBI_EXT_MAX,
};
/* SBI STA extension registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */
struct kvm_riscv_sbi_sta {
unsigned long shmem_lo;
unsigned long shmem_hi;
};
/* Possible states for kvm_riscv_timer */
#define KVM_RISCV_TIMER_STATE_OFF 0
#define KVM_RISCV_TIMER_STATE_ON 1
@ -241,6 +248,12 @@ enum KVM_RISCV_SBI_EXT_ID {
#define KVM_REG_RISCV_VECTOR_REG(n) \
((n) + sizeof(struct __riscv_v_ext_state) / sizeof(unsigned long))
/* Registers for specific SBI extensions are mapped as type 10 */
#define KVM_REG_RISCV_SBI_STATE (0x0a << KVM_REG_RISCV_TYPE_SHIFT)
#define KVM_REG_RISCV_SBI_STA (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT)
#define KVM_REG_RISCV_SBI_STA_REG(name) \
(offsetof(struct kvm_riscv_sbi_sta, name) / sizeof(unsigned long))
/* Device Control API: RISC-V AIA */
#define KVM_DEV_RISCV_APLIC_ALIGN 0x1000
#define KVM_DEV_RISCV_APLIC_SIZE 0x4000

View File

@ -85,6 +85,7 @@ obj-$(CONFIG_SMP) += sbi-ipi.o
obj-$(CONFIG_SMP) += cpu_ops_sbi.o
endif
obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o
obj-$(CONFIG_PARAVIRT) += paravirt.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_KEXEC_CORE) += kexec_relocate.o crash_save_regs.o machine_kexec.o
obj-$(CONFIG_KEXEC_FILE) += elf_kexec.o machine_kexec_file.o

View File

@ -0,0 +1,135 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2023 Ventana Micro Systems Inc.
*/
#define pr_fmt(fmt) "riscv-pv: " fmt
#include <linux/cpuhotplug.h>
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/jump_label.h>
#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/percpu-defs.h>
#include <linux/printk.h>
#include <linux/static_call.h>
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/page.h>
#include <asm/paravirt.h>
#include <asm/sbi.h>
struct static_key paravirt_steal_enabled;
struct static_key paravirt_steal_rq_enabled;
static u64 native_steal_clock(int cpu)
{
return 0;
}
DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock);
static bool steal_acc = true;
static int __init parse_no_stealacc(char *arg)
{
steal_acc = false;
return 0;
}
early_param("no-steal-acc", parse_no_stealacc);
DEFINE_PER_CPU(struct sbi_sta_struct, steal_time) __aligned(64);
static bool __init has_pv_steal_clock(void)
{
if (sbi_spec_version >= sbi_mk_version(2, 0) &&
sbi_probe_extension(SBI_EXT_STA) > 0) {
pr_info("SBI STA extension detected\n");
return true;
}
return false;
}
static int sbi_sta_steal_time_set_shmem(unsigned long lo, unsigned long hi,
unsigned long flags)
{
struct sbiret ret;
ret = sbi_ecall(SBI_EXT_STA, SBI_EXT_STA_STEAL_TIME_SET_SHMEM,
lo, hi, flags, 0, 0, 0);
if (ret.error) {
if (lo == SBI_STA_SHMEM_DISABLE && hi == SBI_STA_SHMEM_DISABLE)
pr_warn("Failed to disable steal-time shmem");
else
pr_warn("Failed to set steal-time shmem");
return sbi_err_map_linux_errno(ret.error);
}
return 0;
}
static int pv_time_cpu_online(unsigned int cpu)
{
struct sbi_sta_struct *st = this_cpu_ptr(&steal_time);
phys_addr_t pa = __pa(st);
unsigned long lo = (unsigned long)pa;
unsigned long hi = IS_ENABLED(CONFIG_32BIT) ? upper_32_bits((u64)pa) : 0;
return sbi_sta_steal_time_set_shmem(lo, hi, 0);
}
static int pv_time_cpu_down_prepare(unsigned int cpu)
{
return sbi_sta_steal_time_set_shmem(SBI_STA_SHMEM_DISABLE,
SBI_STA_SHMEM_DISABLE, 0);
}
static u64 pv_time_steal_clock(int cpu)
{
struct sbi_sta_struct *st = per_cpu_ptr(&steal_time, cpu);
u32 sequence;
u64 steal;
/*
* Check the sequence field before and after reading the steal
* field. Repeat the read if it is different or odd.
*/
do {
sequence = READ_ONCE(st->sequence);
virt_rmb();
steal = READ_ONCE(st->steal);
virt_rmb();
} while ((le32_to_cpu(sequence) & 1) ||
sequence != READ_ONCE(st->sequence));
return le64_to_cpu(steal);
}
int __init pv_time_init(void)
{
int ret;
if (!has_pv_steal_clock())
return 0;
ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
"riscv/pv_time:online",
pv_time_cpu_online,
pv_time_cpu_down_prepare);
if (ret < 0)
return ret;
static_call_update(pv_steal_clock, pv_time_steal_clock);
static_key_slow_inc(&paravirt_steal_enabled);
if (steal_acc)
static_key_slow_inc(&paravirt_steal_rq_enabled);
pr_info("Computing paravirt steal-time\n");
return 0;
}

View File

@ -12,6 +12,7 @@
#include <asm/sbi.h>
#include <asm/processor.h>
#include <asm/timex.h>
#include <asm/paravirt.h>
unsigned long riscv_timebase __ro_after_init;
EXPORT_SYMBOL_GPL(riscv_timebase);
@ -45,4 +46,6 @@ void __init time_init(void)
timer_probe();
tick_setup_hrtimer_broadcast();
pv_time_init();
}

View File

@ -30,6 +30,7 @@ config KVM
select KVM_XFER_TO_GUEST_WORK
select KVM_GENERIC_MMU_NOTIFIER
select PREEMPT_NOTIFIERS
select SCHED_INFO
help
Support hosting virtualized guest machines.

View File

@ -26,6 +26,7 @@ kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o
kvm-y += vcpu_sbi_base.o
kvm-y += vcpu_sbi_replace.o
kvm-y += vcpu_sbi_hsm.o
kvm-y += vcpu_sbi_sta.o
kvm-y += vcpu_timer.o
kvm-$(CONFIG_RISCV_PMU_SBI) += vcpu_pmu.o vcpu_sbi_pmu.o
kvm-y += aia.o

View File

@ -55,6 +55,7 @@ struct imsic {
/* IMSIC SW-file */
struct imsic_mrif *swfile;
phys_addr_t swfile_pa;
spinlock_t swfile_extirq_lock;
};
#define imsic_vs_csr_read(__c) \
@ -613,12 +614,23 @@ static void imsic_swfile_extirq_update(struct kvm_vcpu *vcpu)
{
struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
struct imsic_mrif *mrif = imsic->swfile;
unsigned long flags;
/*
* The critical section is necessary during external interrupt
* updates to avoid the risk of losing interrupts due to potential
* interruptions between reading topei and updating pending status.
*/
spin_lock_irqsave(&imsic->swfile_extirq_lock, flags);
if (imsic_mrif_atomic_read(mrif, &mrif->eidelivery) &&
imsic_mrif_topei(mrif, imsic->nr_eix, imsic->nr_msis))
kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
else
kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
spin_unlock_irqrestore(&imsic->swfile_extirq_lock, flags);
}
static void imsic_swfile_read(struct kvm_vcpu *vcpu, bool clear,
@ -1039,6 +1051,7 @@ int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu)
}
imsic->swfile = page_to_virt(swfile_page);
imsic->swfile_pa = page_to_phys(swfile_page);
spin_lock_init(&imsic->swfile_extirq_lock);
/* Setup IO device */
kvm_iodevice_init(&imsic->iodev, &imsic_iodoev_ops);

View File

@ -83,6 +83,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
vcpu->arch.hfence_tail = 0;
memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue));
kvm_riscv_vcpu_sbi_sta_reset(vcpu);
/* Reset the guest CSRs for hotplug usecase */
if (loaded)
kvm_arch_vcpu_load(vcpu, smp_processor_id());
@ -541,6 +543,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
kvm_riscv_vcpu_aia_load(vcpu, cpu);
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
vcpu->cpu = cpu;
}
@ -614,6 +618,9 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
if (kvm_check_request(KVM_REQ_HFENCE, vcpu))
kvm_riscv_hfence_process(vcpu);
if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu))
kvm_riscv_vcpu_record_steal_time(vcpu);
}
}
@ -757,8 +764,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
/* Update HVIP CSR for current CPU */
kvm_riscv_update_hvip(vcpu);
if (ret <= 0 ||
kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) ||
if (kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) ||
kvm_request_pending(vcpu) ||
xfer_to_guest_mode_work_pending()) {
vcpu->mode = OUTSIDE_GUEST_MODE;

View File

@ -485,7 +485,7 @@ static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num,
reg_val);
break;
break;
default:
rc = -ENOENT;
break;
@ -931,50 +931,92 @@ static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu)
return copy_isa_ext_reg_indices(vcpu, NULL);;
}
static inline unsigned long num_sbi_ext_regs(void)
static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
/*
* number of KVM_REG_RISCV_SBI_SINGLE +
* 2 x (number of KVM_REG_RISCV_SBI_MULTI)
*/
return KVM_RISCV_SBI_EXT_MAX + 2*(KVM_REG_RISCV_SBI_MULTI_REG_LAST+1);
}
unsigned int n = 0;
static int copy_sbi_ext_reg_indices(u64 __user *uindices)
{
int n;
/* copy KVM_REG_RISCV_SBI_SINGLE */
n = KVM_RISCV_SBI_EXT_MAX;
for (int i = 0; i < n; i++) {
for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) {
u64 size = IS_ENABLED(CONFIG_32BIT) ?
KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
KVM_REG_RISCV_SBI_SINGLE | i;
if (!riscv_vcpu_supports_sbi_ext(vcpu, i))
continue;
if (uindices) {
if (put_user(reg, uindices))
return -EFAULT;
uindices++;
}
n++;
}
/* copy KVM_REG_RISCV_SBI_MULTI */
n = KVM_REG_RISCV_SBI_MULTI_REG_LAST + 1;
for (int i = 0; i < n; i++) {
u64 size = IS_ENABLED(CONFIG_32BIT) ?
KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
KVM_REG_RISCV_SBI_MULTI_EN | i;
return n;
}
if (uindices) {
if (put_user(reg, uindices))
return -EFAULT;
uindices++;
static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu)
{
return copy_sbi_ext_reg_indices(vcpu, NULL);
}
static int copy_sbi_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
{
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
int total = 0;
if (scontext->ext_status[KVM_RISCV_SBI_EXT_STA] == KVM_RISCV_SBI_EXT_STATUS_ENABLED) {
u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
int n = sizeof(struct kvm_riscv_sbi_sta) / sizeof(unsigned long);
for (int i = 0; i < n; i++) {
u64 reg = KVM_REG_RISCV | size |
KVM_REG_RISCV_SBI_STATE |
KVM_REG_RISCV_SBI_STA | i;
if (uindices) {
if (put_user(reg, uindices))
return -EFAULT;
uindices++;
}
}
reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT |
KVM_REG_RISCV_SBI_MULTI_DIS | i;
total += n;
}
return total;
}
static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu)
{
return copy_sbi_reg_indices(vcpu, NULL);
}
static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu)
{
if (!riscv_isa_extension_available(vcpu->arch.isa, v))
return 0;
/* vstart, vl, vtype, vcsr, vlenb and 32 vector regs */
return 37;
}
static int copy_vector_reg_indices(const struct kvm_vcpu *vcpu,
u64 __user *uindices)
{
const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
int n = num_vector_regs(vcpu);
u64 reg, size;
int i;
if (n == 0)
return 0;
/* copy vstart, vl, vtype, vcsr and vlenb */
size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64;
for (i = 0; i < 5; i++) {
reg = KVM_REG_RISCV | size | KVM_REG_RISCV_VECTOR | i;
if (uindices) {
if (put_user(reg, uindices))
@ -983,7 +1025,21 @@ static int copy_sbi_ext_reg_indices(u64 __user *uindices)
}
}
return num_sbi_ext_regs();
/* vector_regs have a variable 'vlenb' size */
size = __builtin_ctzl(cntx->vector.vlenb);
size <<= KVM_REG_SIZE_SHIFT;
for (i = 0; i < 32; i++) {
reg = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | size |
KVM_REG_RISCV_VECTOR_REG(i);
if (uindices) {
if (put_user(reg, uindices))
return -EFAULT;
uindices++;
}
}
return n;
}
/*
@ -1001,8 +1057,10 @@ unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu)
res += num_timer_regs();
res += num_fp_f_regs(vcpu);
res += num_fp_d_regs(vcpu);
res += num_vector_regs(vcpu);
res += num_isa_ext_regs(vcpu);
res += num_sbi_ext_regs();
res += num_sbi_ext_regs(vcpu);
res += num_sbi_regs(vcpu);
return res;
}
@ -1045,14 +1103,25 @@ int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
return ret;
uindices += ret;
ret = copy_vector_reg_indices(vcpu, uindices);
if (ret < 0)
return ret;
uindices += ret;
ret = copy_isa_ext_reg_indices(vcpu, uindices);
if (ret < 0)
return ret;
uindices += ret;
ret = copy_sbi_ext_reg_indices(uindices);
ret = copy_sbi_ext_reg_indices(vcpu, uindices);
if (ret < 0)
return ret;
uindices += ret;
ret = copy_sbi_reg_indices(vcpu, uindices);
if (ret < 0)
return ret;
uindices += ret;
return 0;
}
@ -1075,12 +1144,14 @@ int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
case KVM_REG_RISCV_FP_D:
return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_D);
case KVM_REG_RISCV_VECTOR:
return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
case KVM_REG_RISCV_ISA_EXT:
return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
case KVM_REG_RISCV_SBI_EXT:
return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg);
case KVM_REG_RISCV_VECTOR:
return kvm_riscv_vcpu_set_reg_vector(vcpu, reg);
case KVM_REG_RISCV_SBI_STATE:
return kvm_riscv_vcpu_set_reg_sbi(vcpu, reg);
default:
break;
}
@ -1106,12 +1177,14 @@ int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
case KVM_REG_RISCV_FP_D:
return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
KVM_REG_RISCV_FP_D);
case KVM_REG_RISCV_VECTOR:
return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
case KVM_REG_RISCV_ISA_EXT:
return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
case KVM_REG_RISCV_SBI_EXT:
return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg);
case KVM_REG_RISCV_VECTOR:
return kvm_riscv_vcpu_get_reg_vector(vcpu, reg);
case KVM_REG_RISCV_SBI_STATE:
return kvm_riscv_vcpu_get_reg_sbi(vcpu, reg);
default:
break;
}

View File

@ -70,6 +70,10 @@ static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
.ext_idx = KVM_RISCV_SBI_EXT_DBCN,
.ext_ptr = &vcpu_sbi_ext_dbcn,
},
{
.ext_idx = KVM_RISCV_SBI_EXT_STA,
.ext_ptr = &vcpu_sbi_ext_sta,
},
{
.ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
.ext_ptr = &vcpu_sbi_ext_experimental,
@ -80,6 +84,34 @@ static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
},
};
static const struct kvm_riscv_sbi_extension_entry *
riscv_vcpu_get_sbi_ext(struct kvm_vcpu *vcpu, unsigned long idx)
{
const struct kvm_riscv_sbi_extension_entry *sext = NULL;
if (idx >= KVM_RISCV_SBI_EXT_MAX)
return NULL;
for (int i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
if (sbi_ext[i].ext_idx == idx) {
sext = &sbi_ext[i];
break;
}
}
return sext;
}
bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx)
{
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
const struct kvm_riscv_sbi_extension_entry *sext;
sext = riscv_vcpu_get_sbi_ext(vcpu, idx);
return sext && scontext->ext_status[sext->ext_idx] != KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
}
void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
@ -140,28 +172,19 @@ static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
unsigned long reg_num,
unsigned long reg_val)
{
unsigned long i;
const struct kvm_riscv_sbi_extension_entry *sext = NULL;
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
return -ENOENT;
const struct kvm_riscv_sbi_extension_entry *sext;
if (reg_val != 1 && reg_val != 0)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
if (sbi_ext[i].ext_idx == reg_num) {
sext = &sbi_ext[i];
break;
}
}
if (!sext)
sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
return -ENOENT;
scontext->ext_status[sext->ext_idx] = (reg_val) ?
KVM_RISCV_SBI_EXT_AVAILABLE :
KVM_RISCV_SBI_EXT_UNAVAILABLE;
KVM_RISCV_SBI_EXT_STATUS_ENABLED :
KVM_RISCV_SBI_EXT_STATUS_DISABLED;
return 0;
}
@ -170,24 +193,16 @@ static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
unsigned long reg_num,
unsigned long *reg_val)
{
unsigned long i;
const struct kvm_riscv_sbi_extension_entry *sext = NULL;
struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
const struct kvm_riscv_sbi_extension_entry *sext;
if (reg_num >= KVM_RISCV_SBI_EXT_MAX)
return -ENOENT;
for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
if (sbi_ext[i].ext_idx == reg_num) {
sext = &sbi_ext[i];
break;
}
}
if (!sext)
sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
return -ENOENT;
*reg_val = scontext->ext_status[sext->ext_idx] ==
KVM_RISCV_SBI_EXT_AVAILABLE;
KVM_RISCV_SBI_EXT_STATUS_ENABLED;
return 0;
}
@ -310,6 +325,69 @@ int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
return 0;
}
int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_SBI_STATE);
unsigned long reg_subtype, reg_val;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
switch (reg_subtype) {
case KVM_REG_RISCV_SBI_STA:
return kvm_riscv_vcpu_set_reg_sbi_sta(vcpu, reg_num, reg_val);
default:
return -EINVAL;
}
return 0;
}
int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
unsigned long __user *uaddr =
(unsigned long __user *)(unsigned long)reg->addr;
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_SBI_STATE);
unsigned long reg_subtype, reg_val;
int ret;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
switch (reg_subtype) {
case KVM_REG_RISCV_SBI_STA:
ret = kvm_riscv_vcpu_get_reg_sbi_sta(vcpu, reg_num, &reg_val);
break;
default:
return -EINVAL;
}
if (ret)
return ret;
if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
return -EFAULT;
return 0;
}
const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
struct kvm_vcpu *vcpu, unsigned long extid)
{
@ -325,7 +403,7 @@ const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
if (ext->extid_start <= extid && ext->extid_end >= extid) {
if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX ||
scontext->ext_status[entry->ext_idx] ==
KVM_RISCV_SBI_EXT_AVAILABLE)
KVM_RISCV_SBI_EXT_STATUS_ENABLED)
return ext;
return NULL;
@ -413,12 +491,12 @@ void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu)
if (ext->probe && !ext->probe(vcpu)) {
scontext->ext_status[entry->ext_idx] =
KVM_RISCV_SBI_EXT_UNAVAILABLE;
KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
continue;
}
scontext->ext_status[entry->ext_idx] = ext->default_unavail ?
KVM_RISCV_SBI_EXT_UNAVAILABLE :
KVM_RISCV_SBI_EXT_AVAILABLE;
scontext->ext_status[entry->ext_idx] = ext->default_disabled ?
KVM_RISCV_SBI_EXT_STATUS_DISABLED :
KVM_RISCV_SBI_EXT_STATUS_ENABLED;
}
}

View File

@ -204,6 +204,6 @@ static int kvm_sbi_ext_dbcn_handler(struct kvm_vcpu *vcpu,
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn = {
.extid_start = SBI_EXT_DBCN,
.extid_end = SBI_EXT_DBCN,
.default_unavail = true,
.default_disabled = true,
.handler = kvm_sbi_ext_dbcn_handler,
};

View File

@ -0,0 +1,208 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2023 Ventana Micro Systems Inc.
*/
#include <linux/kconfig.h>
#include <linux/kernel.h>
#include <linux/kvm_host.h>
#include <linux/mm.h>
#include <linux/sizes.h>
#include <asm/bug.h>
#include <asm/current.h>
#include <asm/kvm_vcpu_sbi.h>
#include <asm/page.h>
#include <asm/sbi.h>
#include <asm/uaccess.h>
void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu)
{
vcpu->arch.sta.shmem = INVALID_GPA;
vcpu->arch.sta.last_steal = 0;
}
void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu)
{
gpa_t shmem = vcpu->arch.sta.shmem;
u64 last_steal = vcpu->arch.sta.last_steal;
u32 *sequence_ptr, sequence;
u64 *steal_ptr, steal;
unsigned long hva;
gfn_t gfn;
if (shmem == INVALID_GPA)
return;
/*
* shmem is 64-byte aligned (see the enforcement in
* kvm_sbi_sta_steal_time_set_shmem()) and the size of sbi_sta_struct
* is 64 bytes, so we know all its offsets are in the same page.
*/
gfn = shmem >> PAGE_SHIFT;
hva = kvm_vcpu_gfn_to_hva(vcpu, gfn);
if (WARN_ON(kvm_is_error_hva(hva))) {
vcpu->arch.sta.shmem = INVALID_GPA;
return;
}
sequence_ptr = (u32 *)(hva + offset_in_page(shmem) +
offsetof(struct sbi_sta_struct, sequence));
steal_ptr = (u64 *)(hva + offset_in_page(shmem) +
offsetof(struct sbi_sta_struct, steal));
if (WARN_ON(get_user(sequence, sequence_ptr)))
return;
sequence = le32_to_cpu(sequence);
sequence += 1;
if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)))
return;
if (!WARN_ON(get_user(steal, steal_ptr))) {
steal = le64_to_cpu(steal);
vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay);
steal += vcpu->arch.sta.last_steal - last_steal;
WARN_ON(put_user(cpu_to_le64(steal), steal_ptr));
}
sequence += 1;
WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr));
kvm_vcpu_mark_page_dirty(vcpu, gfn);
}
static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
unsigned long shmem_phys_lo = cp->a0;
unsigned long shmem_phys_hi = cp->a1;
u32 flags = cp->a2;
struct sbi_sta_struct zero_sta = {0};
unsigned long hva;
bool writable;
gpa_t shmem;
int ret;
if (flags != 0)
return SBI_ERR_INVALID_PARAM;
if (shmem_phys_lo == SBI_STA_SHMEM_DISABLE &&
shmem_phys_hi == SBI_STA_SHMEM_DISABLE) {
vcpu->arch.sta.shmem = INVALID_GPA;
return 0;
}
if (shmem_phys_lo & (SZ_64 - 1))
return SBI_ERR_INVALID_PARAM;
shmem = shmem_phys_lo;
if (shmem_phys_hi != 0) {
if (IS_ENABLED(CONFIG_32BIT))
shmem |= ((gpa_t)shmem_phys_hi << 32);
else
return SBI_ERR_INVALID_ADDRESS;
}
hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable);
if (kvm_is_error_hva(hva) || !writable)
return SBI_ERR_INVALID_ADDRESS;
ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta));
if (ret)
return SBI_ERR_FAILURE;
vcpu->arch.sta.shmem = shmem;
vcpu->arch.sta.last_steal = current->sched_info.run_delay;
return 0;
}
static int kvm_sbi_ext_sta_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_vcpu_sbi_return *retdata)
{
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
unsigned long funcid = cp->a6;
int ret;
switch (funcid) {
case SBI_EXT_STA_STEAL_TIME_SET_SHMEM:
ret = kvm_sbi_sta_steal_time_set_shmem(vcpu);
break;
default:
ret = SBI_ERR_NOT_SUPPORTED;
break;
}
retdata->err_val = ret;
return 0;
}
static unsigned long kvm_sbi_ext_sta_probe(struct kvm_vcpu *vcpu)
{
return !!sched_info_on();
}
const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = {
.extid_start = SBI_EXT_STA,
.extid_end = SBI_EXT_STA,
.handler = kvm_sbi_ext_sta_handler,
.probe = kvm_sbi_ext_sta_probe,
};
int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu,
unsigned long reg_num,
unsigned long *reg_val)
{
switch (reg_num) {
case KVM_REG_RISCV_SBI_STA_REG(shmem_lo):
*reg_val = (unsigned long)vcpu->arch.sta.shmem;
break;
case KVM_REG_RISCV_SBI_STA_REG(shmem_hi):
if (IS_ENABLED(CONFIG_32BIT))
*reg_val = upper_32_bits(vcpu->arch.sta.shmem);
else
*reg_val = 0;
break;
default:
return -EINVAL;
}
return 0;
}
int kvm_riscv_vcpu_set_reg_sbi_sta(struct kvm_vcpu *vcpu,
unsigned long reg_num,
unsigned long reg_val)
{
switch (reg_num) {
case KVM_REG_RISCV_SBI_STA_REG(shmem_lo):
if (IS_ENABLED(CONFIG_32BIT)) {
gpa_t hi = upper_32_bits(vcpu->arch.sta.shmem);
vcpu->arch.sta.shmem = reg_val;
vcpu->arch.sta.shmem |= hi << 32;
} else {
vcpu->arch.sta.shmem = reg_val;
}
break;
case KVM_REG_RISCV_SBI_STA_REG(shmem_hi):
if (IS_ENABLED(CONFIG_32BIT)) {
gpa_t lo = lower_32_bits(vcpu->arch.sta.shmem);
vcpu->arch.sta.shmem = ((gpa_t)reg_val << 32);
vcpu->arch.sta.shmem |= lo;
} else if (reg_val != 0) {
return -EINVAL;
}
break;
default:
return -EINVAL;
}
return 0;
}

View File

@ -15,7 +15,7 @@
.altmacro
.option norelax
ENTRY(__kvm_riscv_switch_to)
SYM_FUNC_START(__kvm_riscv_switch_to)
/* Save Host GPRs (except A0 and T0-T6) */
REG_S ra, (KVM_ARCH_HOST_RA)(a0)
REG_S sp, (KVM_ARCH_HOST_SP)(a0)
@ -45,7 +45,7 @@ ENTRY(__kvm_riscv_switch_to)
REG_L t0, (KVM_ARCH_GUEST_SSTATUS)(a0)
REG_L t1, (KVM_ARCH_GUEST_HSTATUS)(a0)
REG_L t2, (KVM_ARCH_GUEST_SCOUNTEREN)(a0)
la t4, __kvm_switch_return
la t4, .Lkvm_switch_return
REG_L t5, (KVM_ARCH_GUEST_SEPC)(a0)
/* Save Host and Restore Guest SSTATUS */
@ -113,7 +113,7 @@ ENTRY(__kvm_riscv_switch_to)
/* Back to Host */
.align 2
__kvm_switch_return:
.Lkvm_switch_return:
/* Swap Guest A0 with SSCRATCH */
csrrw a0, CSR_SSCRATCH, a0
@ -208,9 +208,9 @@ __kvm_switch_return:
/* Return to C code */
ret
ENDPROC(__kvm_riscv_switch_to)
SYM_FUNC_END(__kvm_riscv_switch_to)
ENTRY(__kvm_riscv_unpriv_trap)
SYM_CODE_START(__kvm_riscv_unpriv_trap)
/*
* We assume that faulting unpriv load/store instruction is
* 4-byte long and blindly increment SEPC by 4.
@ -231,12 +231,10 @@ ENTRY(__kvm_riscv_unpriv_trap)
csrr a1, CSR_HTINST
REG_S a1, (KVM_ARCH_TRAP_HTINST)(a0)
sret
ENDPROC(__kvm_riscv_unpriv_trap)
SYM_CODE_END(__kvm_riscv_unpriv_trap)
#ifdef CONFIG_FPU
.align 3
.global __kvm_riscv_fp_f_save
__kvm_riscv_fp_f_save:
SYM_FUNC_START(__kvm_riscv_fp_f_save)
csrr t2, CSR_SSTATUS
li t1, SR_FS
csrs CSR_SSTATUS, t1
@ -276,10 +274,9 @@ __kvm_riscv_fp_f_save:
sw t0, KVM_ARCH_FP_F_FCSR(a0)
csrw CSR_SSTATUS, t2
ret
SYM_FUNC_END(__kvm_riscv_fp_f_save)
.align 3
.global __kvm_riscv_fp_d_save
__kvm_riscv_fp_d_save:
SYM_FUNC_START(__kvm_riscv_fp_d_save)
csrr t2, CSR_SSTATUS
li t1, SR_FS
csrs CSR_SSTATUS, t1
@ -319,10 +316,9 @@ __kvm_riscv_fp_d_save:
sw t0, KVM_ARCH_FP_D_FCSR(a0)
csrw CSR_SSTATUS, t2
ret
SYM_FUNC_END(__kvm_riscv_fp_d_save)
.align 3
.global __kvm_riscv_fp_f_restore
__kvm_riscv_fp_f_restore:
SYM_FUNC_START(__kvm_riscv_fp_f_restore)
csrr t2, CSR_SSTATUS
li t1, SR_FS
lw t0, KVM_ARCH_FP_F_FCSR(a0)
@ -362,10 +358,9 @@ __kvm_riscv_fp_f_restore:
fscsr t0
csrw CSR_SSTATUS, t2
ret
SYM_FUNC_END(__kvm_riscv_fp_f_restore)
.align 3
.global __kvm_riscv_fp_d_restore
__kvm_riscv_fp_d_restore:
SYM_FUNC_START(__kvm_riscv_fp_d_restore)
csrr t2, CSR_SSTATUS
li t1, SR_FS
lw t0, KVM_ARCH_FP_D_FCSR(a0)
@ -405,4 +400,5 @@ __kvm_riscv_fp_d_restore:
fscsr t0
csrw CSR_SSTATUS, t2
ret
SYM_FUNC_END(__kvm_riscv_fp_d_restore)
#endif

View File

@ -76,6 +76,7 @@ int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
cntx->vector.datap = kmalloc(riscv_v_vsize, GFP_KERNEL);
if (!cntx->vector.datap)
return -ENOMEM;
cntx->vector.vlenb = riscv_v_vsize / 32;
vcpu->arch.host_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL);
if (!vcpu->arch.host_context.vector.datap)
@ -115,6 +116,9 @@ static int kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu,
case KVM_REG_RISCV_VECTOR_CSR_REG(vcsr):
*reg_addr = &cntx->vector.vcsr;
break;
case KVM_REG_RISCV_VECTOR_CSR_REG(vlenb):
*reg_addr = &cntx->vector.vlenb;
break;
case KVM_REG_RISCV_VECTOR_CSR_REG(datap):
default:
return -ENOENT;
@ -173,6 +177,18 @@ int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu,
if (!riscv_isa_extension_available(isa, v))
return -ENOENT;
if (reg_num == KVM_REG_RISCV_VECTOR_CSR_REG(vlenb)) {
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
unsigned long reg_val;
if (copy_from_user(&reg_val, uaddr, reg_size))
return -EFAULT;
if (reg_val != cntx->vector.vlenb)
return -EINVAL;
return 0;
}
rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, &reg_addr);
if (rc)
return rc;

View File

@ -44,8 +44,7 @@ CONFIG_KEXEC_FILE=y
CONFIG_KEXEC_SIG=y
CONFIG_CRASH_DUMP=y
CONFIG_LIVEPATCH=y
CONFIG_MARCH_ZEC12=y
CONFIG_TUNE_ZEC12=y
CONFIG_MARCH_Z13=y
CONFIG_NR_CPUS=512
CONFIG_NUMA=y
CONFIG_HZ_100=y
@ -76,7 +75,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_MODULE_SIG_SHA256=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
CONFIG_BLK_CGROUP_IOLATENCY=y
@ -93,6 +91,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BINFMT_MISC=m
CONFIG_ZSWAP=y
CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y
CONFIG_ZSMALLOC_STAT=y
CONFIG_SLUB_STATS=y
# CONFIG_COMPAT_BRK is not set
@ -619,6 +618,9 @@ CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_BTRFS_DEBUG=y
CONFIG_BTRFS_ASSERT=y
CONFIG_NILFS2_FS=m
CONFIG_BCACHEFS_FS=y
CONFIG_BCACHEFS_QUOTA=y
CONFIG_BCACHEFS_POSIX_ACL=y
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FS_ENCRYPTION=y
@ -691,7 +693,6 @@ CONFIG_PERSISTENT_KEYRINGS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_KEY_NOTIFICATIONS=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_FORTIFY_SOURCE=y
CONFIG_SECURITY_SELINUX=y

View File

@ -42,8 +42,7 @@ CONFIG_KEXEC_FILE=y
CONFIG_KEXEC_SIG=y
CONFIG_CRASH_DUMP=y
CONFIG_LIVEPATCH=y
CONFIG_MARCH_ZEC12=y
CONFIG_TUNE_ZEC12=y
CONFIG_MARCH_Z13=y
CONFIG_NR_CPUS=512
CONFIG_NUMA=y
CONFIG_HZ_100=y
@ -71,7 +70,6 @@ CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODULE_UNLOAD_TAINT_TRACKING=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_MODULE_SIG_SHA256=y
CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
CONFIG_BLK_CGROUP_IOLATENCY=y
@ -88,6 +86,7 @@ CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_IOSCHED_BFQ=y
CONFIG_BINFMT_MISC=m
CONFIG_ZSWAP=y
CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y
CONFIG_ZSMALLOC_STAT=y
# CONFIG_COMPAT_BRK is not set
CONFIG_MEMORY_HOTPLUG=y
@ -605,6 +604,9 @@ CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m
CONFIG_BCACHEFS_FS=m
CONFIG_BCACHEFS_QUOTA=y
CONFIG_BCACHEFS_POSIX_ACL=y
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FS_ENCRYPTION=y
@ -677,7 +679,6 @@ CONFIG_PERSISTENT_KEYRINGS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_KEY_NOTIFICATIONS=y
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_LOCKDOWN_LSM=y

View File

@ -9,8 +9,7 @@ CONFIG_BPF_SYSCALL=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_CRASH_DUMP=y
CONFIG_MARCH_ZEC12=y
CONFIG_TUNE_ZEC12=y
CONFIG_MARCH_Z13=y
# CONFIG_COMPAT is not set
CONFIG_NR_CPUS=2
CONFIG_HZ_100=y

View File

@ -79,7 +79,7 @@ static inline int test_fp_ctl(u32 fpc)
#define KERNEL_VXR_HIGH (KERNEL_VXR_V16V23|KERNEL_VXR_V24V31)
#define KERNEL_VXR (KERNEL_VXR_LOW|KERNEL_VXR_HIGH)
#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_V0V7)
#define KERNEL_FPR (KERNEL_FPC|KERNEL_VXR_LOW)
struct kernel_fpu;

View File

@ -63,10 +63,6 @@
cond_syscall(__s390x_sys_##name); \
cond_syscall(__s390_sys_##name)
#define SYS_NI(name) \
SYSCALL_ALIAS(__s390x_sys_##name, sys_ni_posix_timers); \
SYSCALL_ALIAS(__s390_sys_##name, sys_ni_posix_timers)
#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
long __s390_compat_sys##name(struct pt_regs *regs); \
ALLOW_ERROR_INJECTION(__s390_compat_sys##name, ERRNO); \
@ -85,15 +81,11 @@
/*
* As some compat syscalls may not be implemented, we need to expand
* COND_SYSCALL_COMPAT in kernel/sys_ni.c and COMPAT_SYS_NI in
* kernel/time/posix-stubs.c to cover this case as well.
* COND_SYSCALL_COMPAT in kernel/sys_ni.c to cover this case as well.
*/
#define COND_SYSCALL_COMPAT(name) \
cond_syscall(__s390_compat_sys_##name)
#define COMPAT_SYS_NI(name) \
SYSCALL_ALIAS(__s390_compat_sys_##name, sys_ni_posix_timers)
#define __S390_SYS_STUBx(x, name, ...) \
long __s390_sys##name(struct pt_regs *regs); \
ALLOW_ERROR_INJECTION(__s390_sys##name, ERRNO); \
@ -124,9 +116,6 @@
#define COND_SYSCALL(name) \
cond_syscall(__s390x_sys_##name)
#define SYS_NI(name) \
SYSCALL_ALIAS(__s390x_sys_##name, sys_ni_posix_timers)
#define __S390_SYS_STUBx(x, fullname, name, ...)
#endif /* CONFIG_COMPAT */

View File

@ -86,9 +86,6 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
return sys_ni_syscall(); \
}
#define __SYS_NI(abi, name) \
SYSCALL_ALIAS(__##abi##_##name, sys_ni_posix_timers);
#ifdef CONFIG_X86_64
#define __X64_SYS_STUB0(name) \
__SYS_STUB0(x64, sys_##name)
@ -100,13 +97,10 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
#define __X64_COND_SYSCALL(name) \
__COND_SYSCALL(x64, sys_##name)
#define __X64_SYS_NI(name) \
__SYS_NI(x64, sys_##name)
#else /* CONFIG_X86_64 */
#define __X64_SYS_STUB0(name)
#define __X64_SYS_STUBx(x, name, ...)
#define __X64_COND_SYSCALL(name)
#define __X64_SYS_NI(name)
#endif /* CONFIG_X86_64 */
#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
@ -120,13 +114,10 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
#define __IA32_COND_SYSCALL(name) \
__COND_SYSCALL(ia32, sys_##name)
#define __IA32_SYS_NI(name) \
__SYS_NI(ia32, sys_##name)
#else /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
#define __IA32_SYS_STUB0(name)
#define __IA32_SYS_STUBx(x, name, ...)
#define __IA32_COND_SYSCALL(name)
#define __IA32_SYS_NI(name)
#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
#ifdef CONFIG_IA32_EMULATION
@ -135,8 +126,7 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
* additional wrappers (aptly named __ia32_sys_xyzzy) which decode the
* ia32 regs in the proper order for shared or "common" syscalls. As some
* syscalls may not be implemented, we need to expand COND_SYSCALL in
* kernel/sys_ni.c and SYS_NI in kernel/time/posix-stubs.c to cover this
* case as well.
* kernel/sys_ni.c to cover this case as well.
*/
#define __IA32_COMPAT_SYS_STUB0(name) \
__SYS_STUB0(ia32, compat_sys_##name)
@ -148,14 +138,10 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
#define __IA32_COMPAT_COND_SYSCALL(name) \
__COND_SYSCALL(ia32, compat_sys_##name)
#define __IA32_COMPAT_SYS_NI(name) \
__SYS_NI(ia32, compat_sys_##name)
#else /* CONFIG_IA32_EMULATION */
#define __IA32_COMPAT_SYS_STUB0(name)
#define __IA32_COMPAT_SYS_STUBx(x, name, ...)
#define __IA32_COMPAT_COND_SYSCALL(name)
#define __IA32_COMPAT_SYS_NI(name)
#endif /* CONFIG_IA32_EMULATION */
@ -175,13 +161,10 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
#define __X32_COMPAT_COND_SYSCALL(name) \
__COND_SYSCALL(x64, compat_sys_##name)
#define __X32_COMPAT_SYS_NI(name) \
__SYS_NI(x64, compat_sys_##name)
#else /* CONFIG_X86_X32_ABI */
#define __X32_COMPAT_SYS_STUB0(name)
#define __X32_COMPAT_SYS_STUBx(x, name, ...)
#define __X32_COMPAT_COND_SYSCALL(name)
#define __X32_COMPAT_SYS_NI(name)
#endif /* CONFIG_X86_X32_ABI */
@ -212,17 +195,12 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
/*
* As some compat syscalls may not be implemented, we need to expand
* COND_SYSCALL_COMPAT in kernel/sys_ni.c and COMPAT_SYS_NI in
* kernel/time/posix-stubs.c to cover this case as well.
* COND_SYSCALL_COMPAT in kernel/sys_ni.c to cover this case as well.
*/
#define COND_SYSCALL_COMPAT(name) \
__IA32_COMPAT_COND_SYSCALL(name) \
__X32_COMPAT_COND_SYSCALL(name)
#define COMPAT_SYS_NI(name) \
__IA32_COMPAT_SYS_NI(name) \
__X32_COMPAT_SYS_NI(name)
#endif /* CONFIG_COMPAT */
#define __SYSCALL_DEFINEx(x, name, ...) \
@ -243,8 +221,8 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
* As the generic SYSCALL_DEFINE0() macro does not decode any parameters for
* obvious reasons, and passing struct pt_regs *regs to it in %rdi does not
* hurt, we only need to re-define it here to keep the naming congruent to
* SYSCALL_DEFINEx() -- which is essential for the COND_SYSCALL() and SYS_NI()
* macros to work correctly.
* SYSCALL_DEFINEx() -- which is essential for the COND_SYSCALL() macro
* to work correctly.
*/
#define SYSCALL_DEFINE0(sname) \
SYSCALL_METADATA(_##sname, 0); \
@ -257,10 +235,6 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
__X64_COND_SYSCALL(name) \
__IA32_COND_SYSCALL(name)
#define SYS_NI(name) \
__X64_SYS_NI(name) \
__IA32_SYS_NI(name)
/*
* For VSYSCALLS, we need to declare these three syscalls with the new

View File

@ -293,6 +293,7 @@ acpi_parse_lapic(union acpi_subtable_headers * header, const unsigned long end)
processor->processor_id, /* ACPI ID */
processor->lapic_flags & ACPI_MADT_ENABLED);
has_lapic_cpus = true;
return 0;
}
@ -1134,7 +1135,6 @@ static int __init acpi_parse_madt_lapic_entries(void)
if (!count) {
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC,
acpi_parse_lapic, MAX_LOCAL_APIC);
has_lapic_cpus = count > 0;
x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC,
acpi_parse_x2apic, MAX_LOCAL_APIC);
}

View File

@ -255,6 +255,16 @@ static void __init_or_module noinline optimize_nops(u8 *instr, size_t len)
}
}
static void __init_or_module noinline optimize_nops_inplace(u8 *instr, size_t len)
{
unsigned long flags;
local_irq_save(flags);
optimize_nops(instr, len);
sync_core();
local_irq_restore(flags);
}
/*
* In this context, "source" is where the instructions are placed in the
* section .altinstr_replacement, for example during kernel build by the
@ -438,7 +448,7 @@ void __init_or_module noinline apply_alternatives(struct alt_instr *start,
* patch if feature is *NOT* present.
*/
if (!boot_cpu_has(a->cpuid) == !(a->flags & ALT_FLAG_NOT)) {
optimize_nops(instr, a->instrlen);
optimize_nops_inplace(instr, a->instrlen);
continue;
}
@ -1685,8 +1695,8 @@ void __init_or_module text_poke_early(void *addr, const void *opcode,
} else {
local_irq_save(flags);
memcpy(addr, opcode, len);
local_irq_restore(flags);
sync_core();
local_irq_restore(flags);
/*
* Could also do a CLFLUSH here to speed up CPU recovery; but

View File

@ -255,6 +255,22 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
testl $X2APIC_ENABLE, %eax
jnz .Lread_apicid_msr
#ifdef CONFIG_X86_X2APIC
/*
* If system is in X2APIC mode then MMIO base might not be
* mapped causing the MMIO read below to fault. Faults can't
* be handled at that point.
*/
cmpl $0, x2apic_mode(%rip)
jz .Lread_apicid_mmio
/* Force the AP into X2APIC mode. */
orl $X2APIC_ENABLE, %eax
wrmsr
jmp .Lread_apicid_msr
#endif
.Lread_apicid_mmio:
/* Read the APIC ID from the fix-mapped MMIO space. */
movq apic_mmio_base(%rip), %rcx
addq $APIC_ID, %rcx

View File

@ -2972,6 +2972,25 @@ static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, v_tsc_aux, v_tsc_aux);
}
/*
* For SEV-ES, accesses to MSR_IA32_XSS should not be intercepted if
* the host/guest supports its use.
*
* guest_can_use() checks a number of requirements on the host/guest to
* ensure that MSR_IA32_XSS is available, but it might report true even
* if X86_FEATURE_XSAVES isn't configured in the guest to ensure host
* MSR_IA32_XSS is always properly restored. For SEV-ES, it is better
* to further check that the guest CPUID actually supports
* X86_FEATURE_XSAVES so that accesses to MSR_IA32_XSS by misbehaved
* guests will still get intercepted and caught in the normal
* kvm_emulate_rdmsr()/kvm_emulated_wrmsr() paths.
*/
if (guest_can_use(vcpu, X86_FEATURE_XSAVES) &&
guest_cpuid_has(vcpu, X86_FEATURE_XSAVES))
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 1, 1);
else
set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 0, 0);
}
void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)

View File

@ -103,6 +103,7 @@ static const struct svm_direct_access_msrs {
{ .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
{ .index = MSR_IA32_LASTINTFROMIP, .always = false },
{ .index = MSR_IA32_LASTINTTOIP, .always = false },
{ .index = MSR_IA32_XSS, .always = false },
{ .index = MSR_EFER, .always = false },
{ .index = MSR_IA32_CR_PAT, .always = false },
{ .index = MSR_AMD64_SEV_ES_GHCB, .always = true },

View File

@ -30,7 +30,7 @@
#define IOPM_SIZE PAGE_SIZE * 3
#define MSRPM_SIZE PAGE_SIZE * 2
#define MAX_DIRECT_ACCESS_MSRS 46
#define MAX_DIRECT_ACCESS_MSRS 47
#define MSRPM_OFFSETS 32
extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
extern bool npt_enabled;

View File

@ -9,6 +9,7 @@ config XEN
select PARAVIRT_CLOCK
select X86_HV_CALLBACK_VECTOR
depends on X86_64 || (X86_32 && X86_PAE)
depends on X86_64 || (X86_GENERIC || MPENTIUM4 || MCORE2 || MATOM || MK8)
depends on X86_LOCAL_APIC && X86_TSC
help
This is the Linux Xen port. Enabling this will allow the

View File

@ -11,6 +11,7 @@
#include <linux/module.h>
#include <asm/unaligned.h>
#include <linux/atomic.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/slab.h>
@ -44,6 +45,7 @@ struct vhci_data {
bool wakeup;
__u16 msft_opcode;
bool aosp_capable;
atomic_t initialized;
};
static int vhci_open_dev(struct hci_dev *hdev)
@ -75,11 +77,10 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1);
mutex_lock(&data->open_mutex);
skb_queue_tail(&data->readq, skb);
mutex_unlock(&data->open_mutex);
wake_up_interruptible(&data->read_wait);
if (atomic_read(&data->initialized))
wake_up_interruptible(&data->read_wait);
return 0;
}
@ -464,7 +465,8 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
skb_put_u8(skb, 0xff);
skb_put_u8(skb, opcode);
put_unaligned_le16(hdev->id, skb_put(skb, 2));
skb_queue_tail(&data->readq, skb);
skb_queue_head(&data->readq, skb);
atomic_inc(&data->initialized);
wake_up_interruptible(&data->read_wait);
return 0;

View File

@ -2158,13 +2158,23 @@ static int sysc_reset(struct sysc *ddata)
sysc_val = sysc_read_sysconfig(ddata);
sysc_val |= sysc_mask;
sysc_write(ddata, sysc_offset, sysc_val);
/* Flush posted write */
/*
* Some devices need a delay before reading registers
* after reset. Presumably a srst_udelay is not needed
* for devices that use a rstctrl register reset.
*/
if (ddata->cfg.srst_udelay)
fsleep(ddata->cfg.srst_udelay);
/*
* Flush posted write. For devices needing srst_udelay
* this should trigger an interconnect error if the
* srst_udelay value is needed but not configured.
*/
sysc_val = sysc_read_sysconfig(ddata);
}
if (ddata->cfg.srst_udelay)
fsleep(ddata->cfg.srst_udelay);
if (ddata->post_reset_quirk)
ddata->post_reset_quirk(ddata);

View File

@ -282,13 +282,15 @@ static void dwapb_irq_enable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
unsigned long flags;
u32 val;
raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
val = dwapb_read(gpio, GPIO_INTEN);
val |= BIT(irqd_to_hwirq(d));
val = dwapb_read(gpio, GPIO_INTEN) | BIT(hwirq);
dwapb_write(gpio, GPIO_INTEN, val);
val = dwapb_read(gpio, GPIO_INTMASK) & ~BIT(hwirq);
dwapb_write(gpio, GPIO_INTMASK, val);
raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}
@ -296,12 +298,14 @@ static void dwapb_irq_disable(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct dwapb_gpio *gpio = to_dwapb_gpio(gc);
irq_hw_number_t hwirq = irqd_to_hwirq(d);
unsigned long flags;
u32 val;
raw_spin_lock_irqsave(&gc->bgpio_lock, flags);
val = dwapb_read(gpio, GPIO_INTEN);
val &= ~BIT(irqd_to_hwirq(d));
val = dwapb_read(gpio, GPIO_INTMASK) | BIT(hwirq);
dwapb_write(gpio, GPIO_INTMASK, val);
val = dwapb_read(gpio, GPIO_INTEN) & ~BIT(hwirq);
dwapb_write(gpio, GPIO_INTEN, val);
raw_spin_unlock_irqrestore(&gc->bgpio_lock, flags);
}

View File

@ -2481,10 +2481,7 @@ static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip)
return 0;
}
/*
* gpio_ioctl() - ioctl handler for the GPIO chardev
*/
static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
static long gpio_ioctl_unlocked(struct file *file, unsigned int cmd, unsigned long arg)
{
struct gpio_chardev_data *cdev = file->private_data;
struct gpio_device *gdev = cdev->gdev;
@ -2521,6 +2518,17 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
}
}
/*
* gpio_ioctl() - ioctl handler for the GPIO chardev
*/
static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct gpio_chardev_data *cdev = file->private_data;
return call_ioctl_locked(file, cmd, arg, cdev->gdev,
gpio_ioctl_unlocked);
}
#ifdef CONFIG_COMPAT
static long gpio_ioctl_compat(struct file *file, unsigned int cmd,
unsigned long arg)

View File

@ -285,6 +285,7 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
struct amdgpu_bo *bo = vm_bo->bo;
vm_bo->moved = true;
if (!bo || bo->tbo.type != ttm_bo_type_kernel)
list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
else if (bo->parent)

View File

@ -1653,18 +1653,24 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
if (test_bit(gpuidx, prange->bitmap_access))
bitmap_set(ctx->bitmap, gpuidx, 1);
}
/*
* If prange is already mapped or with always mapped flag,
* update mapping on GPUs with ACCESS attribute
*/
if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
if (prange->mapped_to_gpu ||
prange->flags & KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED)
bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
}
} else {
bitmap_or(ctx->bitmap, prange->bitmap_access,
prange->bitmap_aip, MAX_GPU_INSTANCE);
}
if (bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
bitmap_copy(ctx->bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
if (!prange->mapped_to_gpu ||
bitmap_empty(ctx->bitmap, MAX_GPU_INSTANCE)) {
r = 0;
goto free_ctx;
}
r = 0;
goto free_ctx;
}
if (prange->actual_loc && !prange->ttm_res) {

View File

@ -1014,13 +1014,20 @@ static enum bp_result get_ss_info_v4_5(
DC_LOG_BIOS("AS_SIGNAL_TYPE_HDMI ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_DISPLAY_PORT:
ss_info->spread_spectrum_percentage =
if (bp->base.integrated_info) {
DC_LOG_BIOS("gpuclk_ss_percentage (unit of 0.001 percent): %d\n", bp->base.integrated_info->gpuclk_ss_percentage);
ss_info->spread_spectrum_percentage =
bp->base.integrated_info->gpuclk_ss_percentage;
ss_info->type.CENTER_MODE =
bp->base.integrated_info->gpuclk_ss_type;
} else {
ss_info->spread_spectrum_percentage =
disp_cntl_tbl->dp_ss_percentage;
ss_info->spread_spectrum_range =
ss_info->spread_spectrum_range =
disp_cntl_tbl->dp_ss_rate_10hz * 10;
if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
if (disp_cntl_tbl->dp_ss_mode & ATOM_SS_CENTRE_SPREAD_MODE)
ss_info->type.CENTER_MODE = true;
}
DC_LOG_BIOS("AS_SIGNAL_TYPE_DISPLAY_PORT ss_percentage: %d\n", ss_info->spread_spectrum_percentage);
break;
case AS_SIGNAL_TYPE_GPU_PLL:
@ -2386,13 +2393,7 @@ static enum bp_result get_vram_info_v30(
return BP_RESULT_BADBIOSTABLE;
info->num_chans = info_v30->channel_num;
/* As suggested by VBIOS we should always use
* dram_channel_width_bytes = 2 when using VRAM
* table version 3.0. This is because the channel_width
* param in the VRAM info table is changed in 7000 series and
* no longer represents the memory channel width.
*/
info->dram_channel_width_bytes = 2;
info->dram_channel_width_bytes = (1 << info_v30->channel_width) / 8;
return result;
}
@ -2820,6 +2821,8 @@ static enum bp_result get_integrated_info_v2_2(
info->ma_channel_number = info_v2_2->umachannelnumber;
info->dp_ss_control =
le16_to_cpu(info_v2_2->reserved1);
info->gpuclk_ss_percentage = info_v2_2->gpuclk_ss_percentage;
info->gpuclk_ss_type = info_v2_2->gpuclk_ss_type;
for (i = 0; i < NUMBER_OF_UCHAR_FOR_GUID; ++i) {
info->ext_disp_conn_info.gu_id[i] =

View File

@ -5095,18 +5095,28 @@ void dc_mclk_switch_using_fw_based_vblank_stretch_shut_down(struct dc *dc)
*/
bool dc_is_dmub_outbox_supported(struct dc *dc)
{
/* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
!dc->debug.dpia_debug.bits.disable_dpia)
return true;
switch (dc->ctx->asic_id.chip_family) {
if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 &&
!dc->debug.dpia_debug.bits.disable_dpia)
return true;
case FAMILY_YELLOW_CARP:
/* DCN31 B0 USB4 DPIA needs dmub notifications for interrupts */
if (dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
!dc->debug.dpia_debug.bits.disable_dpia)
return true;
break;
case AMDGPU_FAMILY_GC_11_0_1:
case AMDGPU_FAMILY_GC_11_5_0:
if (!dc->debug.dpia_debug.bits.disable_dpia)
return true;
break;
default:
break;
}
/* dmub aux needs dmub notifications to be enabled */
return dc->debug.enable_dmub_aux_for_legacy_ddc;
}
/**

View File

@ -5420,7 +5420,7 @@ static void CalculateOutputLink(
*OutBpp = TruncToValidBPP((1 - Downspreading / 100) * 13500, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,
OutputFormat, DSCInputBitPerComponent, NumberOfDSCSlices, (dml_uint_t)AudioSampleRate, AudioSampleLayout, ODMModeNoDSC, ODMModeDSC, RequiredSlots);
if (OutBpp == 0 && PHYCLKD32PerState < 20000 / 32 && DSCEnable == dml_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
if (*OutBpp == 0 && PHYCLKD32PerState < 20000 / 32 && DSCEnable == dml_dsc_enable_if_necessary && ForcedOutputLinkBPP == 0) {
*RequiresDSC = true;
LinkDSCEnable = true;
*OutBpp = TruncToValidBPP((1 - Downspreading / 100) * 13500, OutputLinkDPLanes, HTotal, HActive, PixelClockBackEnd, ForcedOutputLinkBPP, LinkDSCEnable, Output,

View File

@ -960,6 +960,12 @@ void dcn32_init_hw(struct dc *dc)
dc->caps.dmub_caps.subvp_psr = dc->ctx->dmub_srv->dmub->feature_caps.subvp_psr_support;
dc->caps.dmub_caps.gecc_enable = dc->ctx->dmub_srv->dmub->feature_caps.gecc_enable;
dc->caps.dmub_caps.mclk_sw = dc->ctx->dmub_srv->dmub->feature_caps.fw_assisted_mclk_switch;
if (dc->ctx->dmub_srv->dmub->fw_version <
DMUB_FW_VERSION(7, 0, 35)) {
dc->debug.force_disable_subvp = true;
dc->debug.disable_fpo_optimizations = true;
}
}
}

View File

@ -417,6 +417,8 @@ struct integrated_info {
/* V2.1 */
struct edp_info edp1_info;
struct edp_info edp2_info;
uint32_t gpuclk_ss_percentage;
uint32_t gpuclk_ss_type;
};
/*

View File

@ -2465,7 +2465,8 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
val |= XELPDP_FORWARD_CLOCK_UNGATE;
if (is_hdmi_frl(crtc_state->port_clock))
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
is_hdmi_frl(crtc_state->port_clock))
val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_DIV18CLK);
else
val |= XELPDP_DDI_CLOCK_SELECT(XELPDP_DDI_CLOCK_SELECT_MAXPCLK);

View File

@ -3747,8 +3747,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
if (!active)
goto out;
intel_dsc_get_config(pipe_config);
intel_bigjoiner_get_config(pipe_config);
intel_dsc_get_config(pipe_config);
if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
DISPLAY_VER(dev_priv) >= 11)
@ -6033,6 +6033,17 @@ static int intel_async_flip_check_uapi(struct intel_atomic_state *state,
return -EINVAL;
}
/*
* FIXME: Bigjoiner+async flip is busted currently.
* Remove this check once the issues are fixed.
*/
if (new_crtc_state->bigjoiner_pipes) {
drm_dbg_kms(&i915->drm,
"[CRTC:%d:%s] async flip disallowed with bigjoiner\n",
crtc->base.base.id, crtc->base.name);
return -EINVAL;
}
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
if (plane->pipe != crtc->pipe)

View File

@ -389,7 +389,7 @@ disable_all_flip_queue_events(struct drm_i915_private *i915)
enum intel_dmc_id dmc_id;
/* TODO: check if the following applies to all D13+ platforms. */
if (!IS_DG2(i915) && !IS_TIGERLAKE(i915))
if (!IS_TIGERLAKE(i915))
return;
for_each_dmc_id(dmc_id) {
@ -493,6 +493,45 @@ void intel_dmc_disable_pipe(struct drm_i915_private *i915, enum pipe pipe)
intel_de_rmw(i915, PIPEDMC_CONTROL(pipe), PIPEDMC_ENABLE, 0);
}
static bool is_dmc_evt_ctl_reg(struct drm_i915_private *i915,
enum intel_dmc_id dmc_id, i915_reg_t reg)
{
u32 offset = i915_mmio_reg_offset(reg);
u32 start = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, 0));
u32 end = i915_mmio_reg_offset(DMC_EVT_CTL(i915, dmc_id, DMC_EVENT_HANDLER_COUNT_GEN12));
return offset >= start && offset < end;
}
static bool disable_dmc_evt(struct drm_i915_private *i915,
enum intel_dmc_id dmc_id,
i915_reg_t reg, u32 data)
{
if (!is_dmc_evt_ctl_reg(i915, dmc_id, reg))
return false;
/* keep all pipe DMC events disabled by default */
if (dmc_id != DMC_FW_MAIN)
return true;
return false;
}
static u32 dmc_mmiodata(struct drm_i915_private *i915,
struct intel_dmc *dmc,
enum intel_dmc_id dmc_id, int i)
{
if (disable_dmc_evt(i915, dmc_id,
dmc->dmc_info[dmc_id].mmioaddr[i],
dmc->dmc_info[dmc_id].mmiodata[i]))
return REG_FIELD_PREP(DMC_EVT_CTL_TYPE_MASK,
DMC_EVT_CTL_TYPE_EDGE_0_1) |
REG_FIELD_PREP(DMC_EVT_CTL_EVENT_ID_MASK,
DMC_EVT_CTL_EVENT_ID_FALSE);
else
return dmc->dmc_info[dmc_id].mmiodata[i];
}
/**
* intel_dmc_load_program() - write the firmware from memory to register.
* @i915: i915 drm device.
@ -532,7 +571,7 @@ void intel_dmc_load_program(struct drm_i915_private *i915)
for_each_dmc_id(dmc_id) {
for (i = 0; i < dmc->dmc_info[dmc_id].mmio_count; i++) {
intel_de_write(i915, dmc->dmc_info[dmc_id].mmioaddr[i],
dmc->dmc_info[dmc_id].mmiodata[i]);
dmc_mmiodata(i915, dmc, dmc_id, i));
}
}

View File

@ -175,7 +175,7 @@ hwm_power1_max_interval_show(struct device *dev, struct device_attribute *attr,
* tau4 = (4 | x) << y
* but add 2 when doing the final right shift to account for units
*/
tau4 = ((1 << x_w) | x) << y;
tau4 = (u64)((1 << x_w) | x) << y;
/* val in hwmon interface units (millisec) */
out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
@ -211,7 +211,7 @@ hwm_power1_max_interval_store(struct device *dev,
r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
tau4 = ((1 << x_w) | x) << y;
tau4 = (u64)((1 << x_w) | x) << y;
max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
if (val > max_win)

View File

@ -325,28 +325,28 @@ struct joycon_imu_cal {
* All the controller's button values are stored in a u32.
* They can be accessed with bitwise ANDs.
*/
static const u32 JC_BTN_Y = BIT(0);
static const u32 JC_BTN_X = BIT(1);
static const u32 JC_BTN_B = BIT(2);
static const u32 JC_BTN_A = BIT(3);
static const u32 JC_BTN_SR_R = BIT(4);
static const u32 JC_BTN_SL_R = BIT(5);
static const u32 JC_BTN_R = BIT(6);
static const u32 JC_BTN_ZR = BIT(7);
static const u32 JC_BTN_MINUS = BIT(8);
static const u32 JC_BTN_PLUS = BIT(9);
static const u32 JC_BTN_RSTICK = BIT(10);
static const u32 JC_BTN_LSTICK = BIT(11);
static const u32 JC_BTN_HOME = BIT(12);
static const u32 JC_BTN_CAP = BIT(13); /* capture button */
static const u32 JC_BTN_DOWN = BIT(16);
static const u32 JC_BTN_UP = BIT(17);
static const u32 JC_BTN_RIGHT = BIT(18);
static const u32 JC_BTN_LEFT = BIT(19);
static const u32 JC_BTN_SR_L = BIT(20);
static const u32 JC_BTN_SL_L = BIT(21);
static const u32 JC_BTN_L = BIT(22);
static const u32 JC_BTN_ZL = BIT(23);
#define JC_BTN_Y BIT(0)
#define JC_BTN_X BIT(1)
#define JC_BTN_B BIT(2)
#define JC_BTN_A BIT(3)
#define JC_BTN_SR_R BIT(4)
#define JC_BTN_SL_R BIT(5)
#define JC_BTN_R BIT(6)
#define JC_BTN_ZR BIT(7)
#define JC_BTN_MINUS BIT(8)
#define JC_BTN_PLUS BIT(9)
#define JC_BTN_RSTICK BIT(10)
#define JC_BTN_LSTICK BIT(11)
#define JC_BTN_HOME BIT(12)
#define JC_BTN_CAP BIT(13) /* capture button */
#define JC_BTN_DOWN BIT(16)
#define JC_BTN_UP BIT(17)
#define JC_BTN_RIGHT BIT(18)
#define JC_BTN_LEFT BIT(19)
#define JC_BTN_SR_L BIT(20)
#define JC_BTN_SL_L BIT(21)
#define JC_BTN_L BIT(22)
#define JC_BTN_ZL BIT(23)
enum joycon_msg_type {
JOYCON_MSG_TYPE_NONE,
@ -927,14 +927,27 @@ static int joycon_request_calibration(struct joycon_ctlr *ctlr)
*/
static void joycon_calc_imu_cal_divisors(struct joycon_ctlr *ctlr)
{
int i;
int i, divz = 0;
for (i = 0; i < 3; i++) {
ctlr->imu_cal_accel_divisor[i] = ctlr->accel_cal.scale[i] -
ctlr->accel_cal.offset[i];
ctlr->imu_cal_gyro_divisor[i] = ctlr->gyro_cal.scale[i] -
ctlr->gyro_cal.offset[i];
if (ctlr->imu_cal_accel_divisor[i] == 0) {
ctlr->imu_cal_accel_divisor[i] = 1;
divz++;
}
if (ctlr->imu_cal_gyro_divisor[i] == 0) {
ctlr->imu_cal_gyro_divisor[i] = 1;
divz++;
}
}
if (divz)
hid_warn(ctlr->hdev, "inaccurate IMU divisors (%d)\n", divz);
}
static const s16 DFLT_ACCEL_OFFSET /*= 0*/;
@ -1163,16 +1176,16 @@ static void joycon_parse_imu_report(struct joycon_ctlr *ctlr,
JC_IMU_SAMPLES_PER_DELTA_AVG) {
ctlr->imu_avg_delta_ms = ctlr->imu_delta_samples_sum /
ctlr->imu_delta_samples_count;
/* don't ever want divide by zero shenanigans */
if (ctlr->imu_avg_delta_ms == 0) {
ctlr->imu_avg_delta_ms = 1;
hid_warn(ctlr->hdev,
"calculated avg imu delta of 0\n");
}
ctlr->imu_delta_samples_count = 0;
ctlr->imu_delta_samples_sum = 0;
}
/* don't ever want divide by zero shenanigans */
if (ctlr->imu_avg_delta_ms == 0) {
ctlr->imu_avg_delta_ms = 1;
hid_warn(ctlr->hdev, "calculated avg imu delta of 0\n");
}
/* useful for debugging IMU sample rate */
hid_dbg(ctlr->hdev,
"imu_report: ms=%u last_ms=%u delta=%u avg_delta=%u\n",

View File

@ -249,18 +249,46 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
if (!slave)
return 0;
command = readl(bus->base + ASPEED_I2C_CMD_REG);
/*
* Handle stop conditions early, prior to SLAVE_MATCH. Some masters may drive
* transfers with low enough latency between the nak/stop phase of the current
* command and the start/address phase of the following command that the
* interrupts are coalesced by the time we process them.
*/
if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
bus->slave_state = ASPEED_I2C_SLAVE_STOP;
}
/* Slave was requested, restart state machine. */
if (irq_status & ASPEED_I2CD_INTR_TX_NAK &&
bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) {
irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
bus->slave_state = ASPEED_I2C_SLAVE_STOP;
}
/* Propagate any stop conditions to the slave implementation. */
if (bus->slave_state == ASPEED_I2C_SLAVE_STOP) {
i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
}
/*
* Now that we've dealt with any potentially coalesced stop conditions,
* address any start conditions.
*/
if (irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH) {
irq_handled |= ASPEED_I2CD_INTR_SLAVE_MATCH;
bus->slave_state = ASPEED_I2C_SLAVE_START;
}
/* Slave is not currently active, irq was for someone else. */
/*
* If the slave has been stopped and not started then slave interrupt
* handling is complete.
*/
if (bus->slave_state == ASPEED_I2C_SLAVE_INACTIVE)
return irq_handled;
command = readl(bus->base + ASPEED_I2C_CMD_REG);
dev_dbg(bus->dev, "slave irq status 0x%08x, cmd 0x%08x\n",
irq_status, command);
@ -279,17 +307,6 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
irq_handled |= ASPEED_I2CD_INTR_RX_DONE;
}
/* Slave was asked to stop. */
if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) {
irq_handled |= ASPEED_I2CD_INTR_NORMAL_STOP;
bus->slave_state = ASPEED_I2C_SLAVE_STOP;
}
if (irq_status & ASPEED_I2CD_INTR_TX_NAK &&
bus->slave_state == ASPEED_I2C_SLAVE_READ_PROCESSED) {
irq_handled |= ASPEED_I2CD_INTR_TX_NAK;
bus->slave_state = ASPEED_I2C_SLAVE_STOP;
}
switch (bus->slave_state) {
case ASPEED_I2C_SLAVE_READ_REQUESTED:
if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_ACK))
@ -324,8 +341,7 @@ static u32 aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus, u32 irq_status)
i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value);
break;
case ASPEED_I2C_SLAVE_STOP:
i2c_slave_event(slave, I2C_SLAVE_STOP, &value);
bus->slave_state = ASPEED_I2C_SLAVE_INACTIVE;
/* Stop event handling is done early. Unreachable. */
break;
case ASPEED_I2C_SLAVE_START:
/* Slave was just started. Waiting for the next event. */;

View File

@ -858,6 +858,7 @@ static int geni_i2c_probe(struct platform_device *pdev)
ret = geni_se_resources_on(&gi2c->se);
if (ret) {
dev_err(dev, "Error turning on resources %d\n", ret);
clk_disable_unprepare(gi2c->core_clk);
return ret;
}
proto = geni_se_read_proto(&gi2c->se);
@ -877,8 +878,11 @@ static int geni_i2c_probe(struct platform_device *pdev)
/* FIFO is disabled, so we can only use GPI DMA */
gi2c->gpi_mode = true;
ret = setup_gpi_dma(gi2c);
if (ret)
if (ret) {
geni_se_resources_off(&gi2c->se);
clk_disable_unprepare(gi2c->core_clk);
return dev_err_probe(dev, ret, "Failed to setup GPI DMA mode\n");
}
dev_dbg(dev, "Using GPI DMA mode for I2C\n");
} else {
@ -891,6 +895,8 @@ static int geni_i2c_probe(struct platform_device *pdev)
if (!tx_depth) {
dev_err(dev, "Invalid TX FIFO depth\n");
geni_se_resources_off(&gi2c->se);
clk_disable_unprepare(gi2c->core_clk);
return -EINVAL;
}

View File

@ -178,6 +178,7 @@ struct rk3x_i2c_soc_data {
* @clk: function clk for rk3399 or function & Bus clks for others
* @pclk: Bus clk for rk3399
* @clk_rate_nb: i2c clk rate change notify
* @irq: irq number
* @t: I2C known timing information
* @lock: spinlock for the i2c bus
* @wait: the waitqueue to wait for i2c transfer
@ -200,6 +201,7 @@ struct rk3x_i2c {
struct clk *clk;
struct clk *pclk;
struct notifier_block clk_rate_nb;
int irq;
/* Settings */
struct i2c_timings t;
@ -1087,13 +1089,18 @@ static int rk3x_i2c_xfer_common(struct i2c_adapter *adap,
spin_unlock_irqrestore(&i2c->lock, flags);
rk3x_i2c_start(i2c);
if (!polling) {
rk3x_i2c_start(i2c);
timeout = wait_event_timeout(i2c->wait, !i2c->busy,
msecs_to_jiffies(WAIT_TIMEOUT));
} else {
disable_irq(i2c->irq);
rk3x_i2c_start(i2c);
timeout = rk3x_i2c_wait_xfer_poll(i2c);
enable_irq(i2c->irq);
}
spin_lock_irqsave(&i2c->lock, flags);
@ -1310,6 +1317,8 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
return ret;
}
i2c->irq = irq;
platform_set_drvdata(pdev, i2c);
if (i2c->soc_data->calc_timings == rk3x_i2c_v0_calc_timings) {

View File

@ -393,17 +393,17 @@ static const unsigned int kx022a_odrs[] = {
* (range / 2^bits) * g = (range / 2^bits) * 9.80665 m/s^2
* => KX022A uses 16 bit (HiRes mode - assume the low 8 bits are zeroed
* in low-power mode(?) )
* => +/-2G => 4 / 2^16 * 9,80665 * 10^6 (to scale to micro)
* => +/-2G - 598.550415
* +/-4G - 1197.10083
* +/-8G - 2394.20166
* +/-16G - 4788.40332
* => +/-2G => 4 / 2^16 * 9,80665
* => +/-2G - 0.000598550415
* +/-4G - 0.00119710083
* +/-8G - 0.00239420166
* +/-16G - 0.00478840332
*/
static const int kx022a_scale_table[][2] = {
{ 598, 550415 },
{ 1197, 100830 },
{ 2394, 201660 },
{ 4788, 403320 },
{ 0, 598550 },
{ 0, 1197101 },
{ 0, 2394202 },
{ 0, 4788403 },
};
static int kx022a_read_avail(struct iio_dev *indio_dev,
@ -422,7 +422,7 @@ static int kx022a_read_avail(struct iio_dev *indio_dev,
*vals = (const int *)kx022a_scale_table;
*length = ARRAY_SIZE(kx022a_scale_table) *
ARRAY_SIZE(kx022a_scale_table[0]);
*type = IIO_VAL_INT_PLUS_MICRO;
*type = IIO_VAL_INT_PLUS_NANO;
return IIO_AVAIL_LIST;
default:
return -EINVAL;
@ -485,6 +485,20 @@ static int kx022a_turn_on_unlock(struct kx022a_data *data)
return ret;
}
static int kx022a_write_raw_get_fmt(struct iio_dev *idev,
struct iio_chan_spec const *chan,
long mask)
{
switch (mask) {
case IIO_CHAN_INFO_SCALE:
return IIO_VAL_INT_PLUS_NANO;
case IIO_CHAN_INFO_SAMP_FREQ:
return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
}
static int kx022a_write_raw(struct iio_dev *idev,
struct iio_chan_spec const *chan,
int val, int val2, long mask)
@ -629,7 +643,7 @@ static int kx022a_read_raw(struct iio_dev *idev,
kx022a_reg2scale(regval, val, val2);
return IIO_VAL_INT_PLUS_MICRO;
return IIO_VAL_INT_PLUS_NANO;
}
return -EINVAL;
@ -856,6 +870,7 @@ static int kx022a_fifo_flush(struct iio_dev *idev, unsigned int samples)
static const struct iio_info kx022a_info = {
.read_raw = &kx022a_read_raw,
.write_raw = &kx022a_write_raw,
.write_raw_get_fmt = &kx022a_write_raw_get_fmt,
.read_avail = &kx022a_read_avail,
.validate_trigger = iio_validate_own_trigger,

View File

@ -93,6 +93,10 @@ static const struct iio_chan_spec imx93_adc_iio_channels[] = {
IMX93_ADC_CHAN(1),
IMX93_ADC_CHAN(2),
IMX93_ADC_CHAN(3),
IMX93_ADC_CHAN(4),
IMX93_ADC_CHAN(5),
IMX93_ADC_CHAN(6),
IMX93_ADC_CHAN(7),
};
static void imx93_adc_power_down(struct imx93_adc *adc)

View File

@ -918,7 +918,7 @@ static int mcp3564_write_raw(struct iio_dev *indio_dev,
mutex_unlock(&adc->lock);
return ret;
case IIO_CHAN_INFO_CALIBBIAS:
if (val < mcp3564_calib_bias[0] && val > mcp3564_calib_bias[2])
if (val < mcp3564_calib_bias[0] || val > mcp3564_calib_bias[2])
return -EINVAL;
mutex_lock(&adc->lock);
@ -928,7 +928,7 @@ static int mcp3564_write_raw(struct iio_dev *indio_dev,
mutex_unlock(&adc->lock);
return ret;
case IIO_CHAN_INFO_CALIBSCALE:
if (val < mcp3564_calib_scale[0] && val > mcp3564_calib_scale[2])
if (val < mcp3564_calib_scale[0] || val > mcp3564_calib_scale[2])
return -EINVAL;
if (adc->calib_scale == val)
@ -1122,7 +1122,7 @@ static int mcp3564_config(struct iio_dev *indio_dev)
enum mcp3564_ids ids;
int ret = 0;
unsigned int tmp = 0x01;
bool err = true;
bool err = false;
/*
* The address is set on a per-device basis by fuses in the factory,
@ -1509,5 +1509,5 @@ static struct spi_driver mcp3564_driver = {
module_spi_driver(mcp3564_driver);
MODULE_AUTHOR("Marius Cristea <marius.cristea@microchip.com>");
MODULE_DESCRIPTION("Microchip MCP346x/MCP346xR and MCP356x/MCP346xR ADCs");
MODULE_DESCRIPTION("Microchip MCP346x/MCP346xR and MCP356x/MCP356xR ADCs");
MODULE_LICENSE("GPL v2");

View File

@ -1241,6 +1241,20 @@ static const struct meson_sar_adc_param meson_sar_adc_gxl_param = {
.cmv_select = 1,
};
static const struct meson_sar_adc_param meson_sar_adc_axg_param = {
.has_bl30_integration = true,
.clock_rate = 1200000,
.bandgap_reg = MESON_SAR_ADC_REG11,
.regmap_config = &meson_sar_adc_regmap_config_gxbb,
.resolution = 12,
.disable_ring_counter = 1,
.has_reg11 = true,
.vref_volatge = 1,
.has_vref_select = true,
.vref_select = VREF_VDDA,
.cmv_select = 1,
};
static const struct meson_sar_adc_param meson_sar_adc_g12a_param = {
.has_bl30_integration = false,
.clock_rate = 1200000,
@ -1285,7 +1299,7 @@ static const struct meson_sar_adc_data meson_sar_adc_gxm_data = {
};
static const struct meson_sar_adc_data meson_sar_adc_axg_data = {
.param = &meson_sar_adc_gxl_param,
.param = &meson_sar_adc_axg_param,
.name = "meson-axg-saradc",
};

View File

@ -670,8 +670,10 @@ static int tiadc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, indio_dev);
err = tiadc_request_dma(pdev, adc_dev);
if (err && err == -EPROBE_DEFER)
if (err && err != -ENODEV) {
dev_err_probe(&pdev->dev, err, "DMA request failed\n");
goto err_dma;
}
return 0;

View File

@ -46,6 +46,16 @@ int iio_triggered_buffer_setup_ext(struct iio_dev *indio_dev,
struct iio_buffer *buffer;
int ret;
/*
* iio_triggered_buffer_cleanup() assumes that the buffer allocated here
* is assigned to indio_dev->buffer but this is only the case if this
* function is the first caller to iio_device_attach_buffer(). If
* indio_dev->buffer is already set then we can't proceed otherwise the
* cleanup function will try to free a buffer that was not allocated here.
*/
if (indio_dev->buffer)
return -EADDRINUSE;
buffer = iio_kfifo_allocate();
if (!buffer) {
ret = -ENOMEM;

View File

@ -15,8 +15,8 @@
/* Conversion times in us */
static const u16 ms_sensors_ht_t_conversion_time[] = { 50000, 25000,
13000, 7000 };
static const u16 ms_sensors_ht_h_conversion_time[] = { 16000, 3000,
5000, 8000 };
static const u16 ms_sensors_ht_h_conversion_time[] = { 16000, 5000,
3000, 8000 };
static const u16 ms_sensors_tp_conversion_time[] = { 500, 1100, 2100,
4100, 8220, 16440 };

View File

@ -70,8 +70,8 @@
#define ADIS16475_MAX_SCAN_DATA 20
/* spi max speed in brust mode */
#define ADIS16475_BURST_MAX_SPEED 1000000
#define ADIS16475_LSB_DEC_MASK BIT(0)
#define ADIS16475_LSB_FIR_MASK BIT(1)
#define ADIS16475_LSB_DEC_MASK 0
#define ADIS16475_LSB_FIR_MASK 1
#define ADIS16500_BURST_DATA_SEL_0_CHN_MASK GENMASK(5, 0)
#define ADIS16500_BURST_DATA_SEL_1_CHN_MASK GENMASK(12, 7)
@ -1406,6 +1406,59 @@ static int adis16475_config_irq_pin(struct adis16475 *st)
return 0;
}
static int adis16475_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct adis16475 *st;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
st = iio_priv(indio_dev);
st->info = spi_get_device_match_data(spi);
if (!st->info)
return -EINVAL;
ret = adis_init(&st->adis, indio_dev, spi, &st->info->adis_data);
if (ret)
return ret;
indio_dev->name = st->info->name;
indio_dev->channels = st->info->channels;
indio_dev->num_channels = st->info->num_channels;
indio_dev->info = &adis16475_info;
indio_dev->modes = INDIO_DIRECT_MODE;
ret = __adis_initial_startup(&st->adis);
if (ret)
return ret;
ret = adis16475_config_irq_pin(st);
if (ret)
return ret;
ret = adis16475_config_sync_mode(st);
if (ret)
return ret;
ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev,
adis16475_trigger_handler);
if (ret)
return ret;
ret = devm_iio_device_register(&spi->dev, indio_dev);
if (ret)
return ret;
adis16475_debugfs_init(indio_dev);
return 0;
}
static const struct of_device_id adis16475_of_match[] = {
{ .compatible = "adi,adis16470",
.data = &adis16475_chip_info[ADIS16470] },
@ -1451,57 +1504,30 @@ static const struct of_device_id adis16475_of_match[] = {
};
MODULE_DEVICE_TABLE(of, adis16475_of_match);
static int adis16475_probe(struct spi_device *spi)
{
struct iio_dev *indio_dev;
struct adis16475 *st;
int ret;
indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
if (!indio_dev)
return -ENOMEM;
st = iio_priv(indio_dev);
st->info = device_get_match_data(&spi->dev);
if (!st->info)
return -EINVAL;
ret = adis_init(&st->adis, indio_dev, spi, &st->info->adis_data);
if (ret)
return ret;
indio_dev->name = st->info->name;
indio_dev->channels = st->info->channels;
indio_dev->num_channels = st->info->num_channels;
indio_dev->info = &adis16475_info;
indio_dev->modes = INDIO_DIRECT_MODE;
ret = __adis_initial_startup(&st->adis);
if (ret)
return ret;
ret = adis16475_config_irq_pin(st);
if (ret)
return ret;
ret = adis16475_config_sync_mode(st);
if (ret)
return ret;
ret = devm_adis_setup_buffer_and_trigger(&st->adis, indio_dev,
adis16475_trigger_handler);
if (ret)
return ret;
ret = devm_iio_device_register(&spi->dev, indio_dev);
if (ret)
return ret;
adis16475_debugfs_init(indio_dev);
return 0;
}
static const struct spi_device_id adis16475_ids[] = {
{ "adis16470", (kernel_ulong_t)&adis16475_chip_info[ADIS16470] },
{ "adis16475-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16475_1] },
{ "adis16475-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16475_2] },
{ "adis16475-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16475_3] },
{ "adis16477-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16477_1] },
{ "adis16477-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16477_2] },
{ "adis16477-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16477_3] },
{ "adis16465-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16465_1] },
{ "adis16465-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16465_2] },
{ "adis16465-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16465_3] },
{ "adis16467-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16467_1] },
{ "adis16467-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16467_2] },
{ "adis16467-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16467_3] },
{ "adis16500", (kernel_ulong_t)&adis16475_chip_info[ADIS16500] },
{ "adis16505-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16505_1] },
{ "adis16505-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16505_2] },
{ "adis16505-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16505_3] },
{ "adis16507-1", (kernel_ulong_t)&adis16475_chip_info[ADIS16507_1] },
{ "adis16507-2", (kernel_ulong_t)&adis16475_chip_info[ADIS16507_2] },
{ "adis16507-3", (kernel_ulong_t)&adis16475_chip_info[ADIS16507_3] },
{ }
};
MODULE_DEVICE_TABLE(spi, adis16475_ids);
static struct spi_driver adis16475_driver = {
.driver = {
@ -1509,6 +1535,7 @@ static struct spi_driver adis16475_driver = {
.of_match_table = adis16475_of_match,
},
.probe = adis16475_probe,
.id_table = adis16475_ids,
};
module_spi_driver(adis16475_driver);

View File

@ -750,13 +750,13 @@ inv_mpu6050_read_raw(struct iio_dev *indio_dev,
ret = inv_mpu6050_sensor_show(st, st->reg->gyro_offset,
chan->channel2, val);
mutex_unlock(&st->lock);
return IIO_VAL_INT;
return ret;
case IIO_ACCEL:
mutex_lock(&st->lock);
ret = inv_mpu6050_sensor_show(st, st->reg->accl_offset,
chan->channel2, val);
mutex_unlock(&st->lock);
return IIO_VAL_INT;
return ret;
default:
return -EINVAL;

View File

@ -14,11 +14,8 @@
#include "../common/hid-sensors/hid-sensor-trigger.h"
enum {
CHANNEL_SCAN_INDEX_INTENSITY,
CHANNEL_SCAN_INDEX_ILLUM,
CHANNEL_SCAN_INDEX_COLOR_TEMP,
CHANNEL_SCAN_INDEX_CHROMATICITY_X,
CHANNEL_SCAN_INDEX_CHROMATICITY_Y,
CHANNEL_SCAN_INDEX_INTENSITY = 0,
CHANNEL_SCAN_INDEX_ILLUM = 1,
CHANNEL_SCAN_INDEX_MAX
};
@ -68,40 +65,6 @@ static const struct iio_chan_spec als_channels[] = {
BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE),
.scan_index = CHANNEL_SCAN_INDEX_ILLUM,
},
{
.type = IIO_COLORTEMP,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_HYSTERESIS) |
BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE),
.scan_index = CHANNEL_SCAN_INDEX_COLOR_TEMP,
},
{
.type = IIO_CHROMATICITY,
.modified = 1,
.channel2 = IIO_MOD_X,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_HYSTERESIS) |
BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE),
.scan_index = CHANNEL_SCAN_INDEX_CHROMATICITY_X,
},
{
.type = IIO_CHROMATICITY,
.modified = 1,
.channel2 = IIO_MOD_Y,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
BIT(IIO_CHAN_INFO_SCALE) |
BIT(IIO_CHAN_INFO_SAMP_FREQ) |
BIT(IIO_CHAN_INFO_HYSTERESIS) |
BIT(IIO_CHAN_INFO_HYSTERESIS_RELATIVE),
.scan_index = CHANNEL_SCAN_INDEX_CHROMATICITY_Y,
},
IIO_CHAN_SOFT_TIMESTAMP(CHANNEL_SCAN_INDEX_TIMESTAMP)
};
@ -140,21 +103,6 @@ static int als_read_raw(struct iio_dev *indio_dev,
min = als_state->als[chan->scan_index].logical_minimum;
address = HID_USAGE_SENSOR_LIGHT_ILLUM;
break;
case CHANNEL_SCAN_INDEX_COLOR_TEMP:
report_id = als_state->als[chan->scan_index].report_id;
min = als_state->als[chan->scan_index].logical_minimum;
address = HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE;
break;
case CHANNEL_SCAN_INDEX_CHROMATICITY_X:
report_id = als_state->als[chan->scan_index].report_id;
min = als_state->als[chan->scan_index].logical_minimum;
address = HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X;
break;
case CHANNEL_SCAN_INDEX_CHROMATICITY_Y:
report_id = als_state->als[chan->scan_index].report_id;
min = als_state->als[chan->scan_index].logical_minimum;
address = HID_USAGE_SENSOR_LIGHT_CHROMATICITY_Y;
break;
default:
report_id = -1;
break;
@ -275,18 +223,6 @@ static int als_capture_sample(struct hid_sensor_hub_device *hsdev,
als_state->scan.illum[CHANNEL_SCAN_INDEX_ILLUM] = sample_data;
ret = 0;
break;
case HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE:
als_state->scan.illum[CHANNEL_SCAN_INDEX_COLOR_TEMP] = sample_data;
ret = 0;
break;
case HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X:
als_state->scan.illum[CHANNEL_SCAN_INDEX_CHROMATICITY_X] = sample_data;
ret = 0;
break;
case HID_USAGE_SENSOR_LIGHT_CHROMATICITY_Y:
als_state->scan.illum[CHANNEL_SCAN_INDEX_CHROMATICITY_Y] = sample_data;
ret = 0;
break;
case HID_USAGE_SENSOR_TIME_TIMESTAMP:
als_state->timestamp = hid_sensor_convert_timestamp(&als_state->common_attributes,
*(s64 *)raw_data);
@ -322,38 +258,6 @@ static int als_parse_report(struct platform_device *pdev,
st->als[i].report_id);
}
ret = sensor_hub_input_get_attribute_info(hsdev, HID_INPUT_REPORT,
usage_id,
HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE,
&st->als[CHANNEL_SCAN_INDEX_COLOR_TEMP]);
if (ret < 0)
return ret;
als_adjust_channel_bit_mask(channels, CHANNEL_SCAN_INDEX_COLOR_TEMP,
st->als[CHANNEL_SCAN_INDEX_COLOR_TEMP].size);
dev_dbg(&pdev->dev, "als %x:%x\n",
st->als[CHANNEL_SCAN_INDEX_COLOR_TEMP].index,
st->als[CHANNEL_SCAN_INDEX_COLOR_TEMP].report_id);
for (i = 0; i < 2; i++) {
int next_scan_index = CHANNEL_SCAN_INDEX_CHROMATICITY_X + i;
ret = sensor_hub_input_get_attribute_info(hsdev,
HID_INPUT_REPORT, usage_id,
HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X + i,
&st->als[next_scan_index]);
if (ret < 0)
return ret;
als_adjust_channel_bit_mask(channels,
CHANNEL_SCAN_INDEX_CHROMATICITY_X + i,
st->als[next_scan_index].size);
dev_dbg(&pdev->dev, "als %x:%x\n",
st->als[next_scan_index].index,
st->als[next_scan_index].report_id);
}
st->scale_precision = hid_sensor_format_scale(usage_id,
&st->als[CHANNEL_SCAN_INDEX_INTENSITY],
&st->scale_pre_decml, &st->scale_post_decml);

View File

@ -356,7 +356,7 @@ static int tmag5273_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_OFFSET:
switch (chan->type) {
case IIO_TEMP:
*val = -266314;
*val = -16005;
return IIO_VAL_INT;
default:
return -EINVAL;

View File

@ -286,6 +286,7 @@ static const struct xpad_device {
{ 0x146b, 0x0604, "Bigben Interactive DAIJA Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
{ 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
{ 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
{ 0x1532, 0x0a29, "Razer Wolverine V2", 0, XTYPE_XBOXONE },
{ 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
{ 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
{ 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },

View File

@ -765,6 +765,44 @@ static void atkbd_deactivate(struct atkbd *atkbd)
ps2dev->serio->phys);
}
#ifdef CONFIG_X86
static bool atkbd_is_portable_device(void)
{
static const char * const chassis_types[] = {
"8", /* Portable */
"9", /* Laptop */
"10", /* Notebook */
"14", /* Sub-Notebook */
"31", /* Convertible */
"32", /* Detachable */
};
int i;
for (i = 0; i < ARRAY_SIZE(chassis_types); i++)
if (dmi_match(DMI_CHASSIS_TYPE, chassis_types[i]))
return true;
return false;
}
/*
* On many modern laptops ATKBD_CMD_GETID may cause problems, on these laptops
* the controller is always in translated mode. In this mode mice/touchpads will
* not work. So in this case simply assume a keyboard is connected to avoid
* confusing some laptop keyboards.
*
* Skipping ATKBD_CMD_GETID ends up using a fake keyboard id. Using a fake id is
* ok in translated mode, only atkbd_select_set() checks atkbd->id and in
* translated mode that is a no-op.
*/
static bool atkbd_skip_getid(struct atkbd *atkbd)
{
return atkbd->translated && atkbd_is_portable_device();
}
#else
static inline bool atkbd_skip_getid(struct atkbd *atkbd) { return false; }
#endif
/*
* atkbd_probe() probes for an AT keyboard on a serio port.
*/
@ -794,12 +832,12 @@ static int atkbd_probe(struct atkbd *atkbd)
*/
param[0] = param[1] = 0xa5; /* initialize with invalid values */
if (ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
if (atkbd_skip_getid(atkbd) || ps2_command(ps2dev, param, ATKBD_CMD_GETID)) {
/*
* If the get ID command failed, we check if we can at least set the LEDs on
* the keyboard. This should work on every keyboard out there. It also turns
* the LEDs off, which we want anyway.
* If the get ID command was skipped or failed, we check if we can at least set
* the LEDs on the keyboard. This should work on every keyboard out there.
* It also turns the LEDs off, which we want anyway.
*/
param[0] = 0;
if (ps2_command(ps2dev, param, ATKBD_CMD_SETLEDS))

View File

@ -105,6 +105,9 @@ static int micro_key_probe(struct platform_device *pdev)
keys->codes = devm_kmemdup(&pdev->dev, micro_keycodes,
keys->input->keycodesize * keys->input->keycodemax,
GFP_KERNEL);
if (!keys->codes)
return -ENOMEM;
keys->input->keycode = keys->codes;
__set_bit(EV_KEY, keys->input->evbit);

View File

@ -299,6 +299,11 @@ static int soc_button_parse_btn_desc(struct device *dev,
info->name = "power";
info->event_code = KEY_POWER;
info->wakeup = true;
} else if (upage == 0x01 && usage == 0xc6) {
info->name = "airplane mode switch";
info->event_type = EV_SW;
info->event_code = SW_RFKILL_ALL;
info->active_low = false;
} else if (upage == 0x01 && usage == 0xca) {
info->name = "rotation lock switch";
info->event_type = EV_SW;

View File

@ -125,16 +125,15 @@ static int __init amimouse_probe(struct platform_device *pdev)
return 0;
}
static int __exit amimouse_remove(struct platform_device *pdev)
static void __exit amimouse_remove(struct platform_device *pdev)
{
struct input_dev *dev = platform_get_drvdata(pdev);
input_unregister_device(dev);
return 0;
}
static struct platform_driver amimouse_driver = {
.remove = __exit_p(amimouse_remove),
.remove_new = __exit_p(amimouse_remove),
.driver = {
.name = "amiga-mouse",
},

View File

@ -183,6 +183,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN009b", /* T580 */
"LEN0402", /* X1 Extreme Gen 2 / P1 Gen 2 */
"LEN040f", /* P1 Gen 3 */
"LEN0411", /* L14 Gen 1 */
"LEN200f", /* T450s */
"LEN2044", /* L470 */
"LEN2054", /* E480 */

View File

@ -360,6 +360,14 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
},
.driver_data = (void *)(SERIO_QUIRK_DRITEK)
},
{
/* Acer TravelMate P459-G2-M */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate P459-G2-M"),
},
.driver_data = (void *)(SERIO_QUIRK_NOMUX)
},
{
/* Amoi M636/A737 */
.matches = {

Some files were not shown because too many files have changed in this diff Show More