Merge branch 'i2c/for-current' into i2c/for-mergewindow
This commit is contained in:
commit
c55526a1c1
@ -17,9 +17,10 @@ properties:
|
||||
oneOf:
|
||||
- enum:
|
||||
- fsl,imx7ulp-lpi2c
|
||||
- fsl,imx8qm-lpi2c
|
||||
- items:
|
||||
- const: fsl,imx8qxp-lpi2c
|
||||
- enum:
|
||||
- fsl,imx8qxp-lpi2c
|
||||
- fsl,imx8qm-lpi2c
|
||||
- const: fsl,imx7ulp-lpi2c
|
||||
|
||||
reg:
|
||||
|
@ -50,11 +50,11 @@ ksmbd.mountd (user space daemon)
|
||||
--------------------------------
|
||||
|
||||
ksmbd.mountd is userspace process to, transfer user account and password that
|
||||
are registered using ksmbd.adduser(part of utils for user space). Further it
|
||||
are registered using ksmbd.adduser (part of utils for user space). Further it
|
||||
allows sharing information parameters that parsed from smb.conf to ksmbd in
|
||||
kernel. For the execution part it has a daemon which is continuously running
|
||||
and connected to the kernel interface using netlink socket, it waits for the
|
||||
requests(dcerpc and share/user info). It handles RPC calls (at a minimum few
|
||||
requests (dcerpc and share/user info). It handles RPC calls (at a minimum few
|
||||
dozen) that are most important for file server from NetShareEnum and
|
||||
NetServerGetInfo. Complete DCE/RPC response is prepared from the user space
|
||||
and passed over to the associated kernel thread for the client.
|
||||
@ -154,11 +154,11 @@ Each layer
|
||||
1. Enable all component prints
|
||||
# sudo ksmbd.control -d "all"
|
||||
|
||||
2. Enable one of components(smb, auth, vfs, oplock, ipc, conn, rdma)
|
||||
2. Enable one of components (smb, auth, vfs, oplock, ipc, conn, rdma)
|
||||
# sudo ksmbd.control -d "smb"
|
||||
|
||||
3. Show what prints are enable.
|
||||
# cat/sys/class/ksmbd-control/debug
|
||||
3. Show what prints are enabled.
|
||||
# cat /sys/class/ksmbd-control/debug
|
||||
[smb] auth vfs oplock ipc conn [rdma]
|
||||
|
||||
4. Disable prints:
|
||||
|
@ -36,6 +36,8 @@ Key to symbols
|
||||
|
||||
=============== =============================================================
|
||||
S Start condition
|
||||
Sr Repeated start condition, used to switch from write to
|
||||
read mode.
|
||||
P Stop condition
|
||||
Rd/Wr (1 bit) Read/Write bit. Rd equals 1, Wr equals 0.
|
||||
A, NA (1 bit) Acknowledge (ACK) and Not Acknowledge (NACK) bit
|
||||
@ -100,7 +102,7 @@ Implemented by i2c_smbus_read_byte_data()
|
||||
This reads a single byte from a device, from a designated register.
|
||||
The register is specified through the Comm byte::
|
||||
|
||||
S Addr Wr [A] Comm [A] S Addr Rd [A] [Data] NA P
|
||||
S Addr Wr [A] Comm [A] Sr Addr Rd [A] [Data] NA P
|
||||
|
||||
Functionality flag: I2C_FUNC_SMBUS_READ_BYTE_DATA
|
||||
|
||||
@ -114,7 +116,7 @@ This operation is very like Read Byte; again, data is read from a
|
||||
device, from a designated register that is specified through the Comm
|
||||
byte. But this time, the data is a complete word (16 bits)::
|
||||
|
||||
S Addr Wr [A] Comm [A] S Addr Rd [A] [DataLow] A [DataHigh] NA P
|
||||
S Addr Wr [A] Comm [A] Sr Addr Rd [A] [DataLow] A [DataHigh] NA P
|
||||
|
||||
Functionality flag: I2C_FUNC_SMBUS_READ_WORD_DATA
|
||||
|
||||
@ -164,7 +166,7 @@ This command selects a device register (through the Comm byte), sends
|
||||
16 bits of data to it, and reads 16 bits of data in return::
|
||||
|
||||
S Addr Wr [A] Comm [A] DataLow [A] DataHigh [A]
|
||||
S Addr Rd [A] [DataLow] A [DataHigh] NA P
|
||||
Sr Addr Rd [A] [DataLow] A [DataHigh] NA P
|
||||
|
||||
Functionality flag: I2C_FUNC_SMBUS_PROC_CALL
|
||||
|
||||
@ -181,7 +183,7 @@ of data is specified by the device in the Count byte.
|
||||
::
|
||||
|
||||
S Addr Wr [A] Comm [A]
|
||||
S Addr Rd [A] [Count] A [Data] A [Data] A ... A [Data] NA P
|
||||
Sr Addr Rd [A] [Count] A [Data] A [Data] A ... A [Data] NA P
|
||||
|
||||
Functionality flag: I2C_FUNC_SMBUS_READ_BLOCK_DATA
|
||||
|
||||
@ -212,7 +214,7 @@ This command selects a device register (through the Comm byte), sends
|
||||
1 to 31 bytes of data to it, and reads 1 to 31 bytes of data in return::
|
||||
|
||||
S Addr Wr [A] Comm [A] Count [A] Data [A] ...
|
||||
S Addr Rd [A] [Count] A [Data] ... A P
|
||||
Sr Addr Rd [A] [Count] A [Data] ... A P
|
||||
|
||||
Functionality flag: I2C_FUNC_SMBUS_BLOCK_PROC_CALL
|
||||
|
||||
@ -300,7 +302,7 @@ This command reads a block of bytes from a device, from a
|
||||
designated register that is specified through the Comm byte::
|
||||
|
||||
S Addr Wr [A] Comm [A]
|
||||
S Addr Rd [A] [Data] A [Data] A ... A [Data] NA P
|
||||
Sr Addr Rd [A] [Data] A [Data] A ... A [Data] NA P
|
||||
|
||||
Functionality flag: I2C_FUNC_SMBUS_READ_I2C_BLOCK
|
||||
|
||||
|
@ -37,8 +37,7 @@ conn_reuse_mode - INTEGER
|
||||
|
||||
0: disable any special handling on port reuse. The new
|
||||
connection will be delivered to the same real server that was
|
||||
servicing the previous connection. This will effectively
|
||||
disable expire_nodest_conn.
|
||||
servicing the previous connection.
|
||||
|
||||
bit 1: enable rescheduling of new connections when it is safe.
|
||||
That is, whenever expire_nodest_conn and for TCP sockets, when
|
||||
|
@ -486,8 +486,8 @@ of packets.
|
||||
Drivers are free to use a more permissive configuration than the requested
|
||||
configuration. It is expected that drivers should only implement directly the
|
||||
most generic mode that can be supported. For example if the hardware can
|
||||
support HWTSTAMP_FILTER_V2_EVENT, then it should generally always upscale
|
||||
HWTSTAMP_FILTER_V2_L2_SYNC_MESSAGE, and so forth, as HWTSTAMP_FILTER_V2_EVENT
|
||||
support HWTSTAMP_FILTER_PTP_V2_EVENT, then it should generally always upscale
|
||||
HWTSTAMP_FILTER_PTP_V2_L2_SYNC, and so forth, as HWTSTAMP_FILTER_PTP_V2_EVENT
|
||||
is more generic (and more useful to applications).
|
||||
|
||||
A driver which supports hardware time stamping shall update the struct
|
||||
|
19
MAINTAINERS
19
MAINTAINERS
@ -2263,6 +2263,15 @@ L: linux-iio@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/counter/microchip-tcb-capture.c
|
||||
|
||||
ARM/MILBEAUT ARCHITECTURE
|
||||
M: Taichi Sugaya <sugaya.taichi@socionext.com>
|
||||
M: Takao Orito <orito.takao@socionext.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: arch/arm/boot/dts/milbeaut*
|
||||
F: arch/arm/mach-milbeaut/
|
||||
N: milbeaut
|
||||
|
||||
ARM/MIOA701 MACHINE SUPPORT
|
||||
M: Robert Jarzmik <robert.jarzmik@free.fr>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
@ -2729,10 +2738,11 @@ S: Maintained
|
||||
F: drivers/memory/*emif*
|
||||
|
||||
ARM/TEXAS INSTRUMENT KEYSTONE ARCHITECTURE
|
||||
M: Nishanth Menon <nm@ti.com>
|
||||
M: Santosh Shilimkar <ssantosh@kernel.org>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git
|
||||
F: arch/arm/boot/dts/keystone-*
|
||||
F: arch/arm/mach-keystone/
|
||||
|
||||
@ -3570,13 +3580,14 @@ L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/broadcom/b44.*
|
||||
|
||||
BROADCOM B53 ETHERNET SWITCH DRIVER
|
||||
BROADCOM B53/SF2 ETHERNET SWITCH DRIVER
|
||||
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
L: openwrt-devel@lists.openwrt.org (subscribers-only)
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml
|
||||
F: drivers/net/dsa/b53/*
|
||||
F: drivers/net/dsa/bcm_sf2*
|
||||
F: include/linux/dsa/brcm.h
|
||||
F: include/linux/platform_data/b53.h
|
||||
|
||||
@ -18483,6 +18494,7 @@ F: include/uapi/linux/pkt_sched.h
|
||||
F: include/uapi/linux/tc_act/
|
||||
F: include/uapi/linux/tc_ematch/
|
||||
F: net/sched/
|
||||
F: tools/testing/selftests/tc-testing
|
||||
|
||||
TC90522 MEDIA DRIVER
|
||||
M: Akihiro Tsukada <tskd08@gmail.com>
|
||||
@ -19031,11 +19043,12 @@ F: drivers/mmc/host/tifm_sd.c
|
||||
F: include/linux/tifm.h
|
||||
|
||||
TI KEYSTONE MULTICORE NAVIGATOR DRIVERS
|
||||
M: Nishanth Menon <nm@ti.com>
|
||||
M: Santosh Shilimkar <ssantosh@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git
|
||||
F: drivers/soc/ti/*
|
||||
|
||||
TI LM49xxx FAMILY ASoC CODEC DRIVERS
|
||||
|
4
Makefile
4
Makefile
@ -2,8 +2,8 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 16
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Trick or Treat
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Gobble Gobble
|
||||
|
||||
# *DOCUMENTATION*
|
||||
# To see a list of typical targets execute "make help"
|
||||
|
10
arch/Kconfig
10
arch/Kconfig
@ -991,6 +991,16 @@ config HAVE_ARCH_COMPAT_MMAP_BASES
|
||||
and vice-versa 32-bit applications to call 64-bit mmap().
|
||||
Required for applications doing different bitness syscalls.
|
||||
|
||||
config PAGE_SIZE_LESS_THAN_64KB
|
||||
def_bool y
|
||||
depends on !ARM64_64K_PAGES
|
||||
depends on !IA64_PAGE_SIZE_64KB
|
||||
depends on !PAGE_SIZE_64KB
|
||||
depends on !PARISC_PAGE_SIZE_64KB
|
||||
depends on !PPC_64K_PAGES
|
||||
depends on !PPC_256K_PAGES
|
||||
depends on !PAGE_SIZE_256KB
|
||||
|
||||
# This allows to use a set of generic functions to determine mmap base
|
||||
# address by giving priority to top-down scheme only if the process
|
||||
# is not in legacy mode (compat task, unlimited stack size or
|
||||
|
@ -488,3 +488,4 @@
|
||||
556 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 557 reserved for memfd_secret
|
||||
558 common process_mrelease sys_process_mrelease
|
||||
559 common futex_waitv sys_futex_waitv
|
||||
|
@ -36,7 +36,6 @@ void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
|
||||
void flush_dcache_page(struct page *page);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
void dma_cache_wback_inv(phys_addr_t start, unsigned long sz);
|
||||
void dma_cache_inv(phys_addr_t start, unsigned long sz);
|
||||
|
@ -506,11 +506,17 @@
|
||||
#address-cells = <3>;
|
||||
#interrupt-cells = <1>;
|
||||
#size-cells = <2>;
|
||||
interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
|
||||
interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "pcie", "msi";
|
||||
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
|
||||
interrupt-map = <0 0 0 1 &gicv2 GIC_SPI 143
|
||||
IRQ_TYPE_LEVEL_HIGH>,
|
||||
<0 0 0 2 &gicv2 GIC_SPI 144
|
||||
IRQ_TYPE_LEVEL_HIGH>,
|
||||
<0 0 0 3 &gicv2 GIC_SPI 145
|
||||
IRQ_TYPE_LEVEL_HIGH>,
|
||||
<0 0 0 4 &gicv2 GIC_SPI 146
|
||||
IRQ_TYPE_LEVEL_HIGH>;
|
||||
msi-controller;
|
||||
msi-parent = <&pcie0>;
|
||||
|
@ -242,6 +242,8 @@
|
||||
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
pcie0: pcie@12000 {
|
||||
@ -408,7 +410,7 @@
|
||||
i2c0: i2c@18009000 {
|
||||
compatible = "brcm,iproc-i2c";
|
||||
reg = <0x18009000 0x50>;
|
||||
interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
clock-frequency = <100000>;
|
||||
|
@ -290,7 +290,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
|
||||
*/
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
extern void flush_dcache_page(struct page *);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
|
||||
static inline void flush_kernel_vmap_range(void *addr, int size)
|
||||
|
@ -33,7 +33,7 @@ extern void __iomem *sdr_ctl_base_addr;
|
||||
u32 socfpga_sdram_self_refresh(u32 sdr_base);
|
||||
extern unsigned int socfpga_sdram_self_refresh_sz;
|
||||
|
||||
extern char secondary_trampoline, secondary_trampoline_end;
|
||||
extern char secondary_trampoline[], secondary_trampoline_end[];
|
||||
|
||||
extern unsigned long socfpga_cpu1start_addr;
|
||||
|
||||
|
@ -20,14 +20,14 @@
|
||||
|
||||
static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
{
|
||||
int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
|
||||
int trampoline_size = secondary_trampoline_end - secondary_trampoline;
|
||||
|
||||
if (socfpga_cpu1start_addr) {
|
||||
/* This will put CPU #1 into reset. */
|
||||
writel(RSTMGR_MPUMODRST_CPU1,
|
||||
rst_manager_base_addr + SOCFPGA_RSTMGR_MODMPURST);
|
||||
|
||||
memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
|
||||
memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
|
||||
|
||||
writel(__pa_symbol(secondary_startup),
|
||||
sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff));
|
||||
@ -45,12 +45,12 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
|
||||
static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
{
|
||||
int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
|
||||
int trampoline_size = secondary_trampoline_end - secondary_trampoline;
|
||||
|
||||
if (socfpga_cpu1start_addr) {
|
||||
writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr +
|
||||
SOCFPGA_A10_RSTMGR_MODMPURST);
|
||||
memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
|
||||
memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
|
||||
|
||||
writel(__pa_symbol(secondary_startup),
|
||||
sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff));
|
||||
|
@ -296,8 +296,7 @@
|
||||
pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>;
|
||||
phys = <&ufs_0_phy>;
|
||||
phy-names = "ufs-phy";
|
||||
samsung,sysreg = <&syscon_fsys2>;
|
||||
samsung,ufs-shareability-reg-offset = <0x710>;
|
||||
samsung,sysreg = <&syscon_fsys2 0x710>;
|
||||
status = "disabled";
|
||||
};
|
||||
};
|
||||
|
@ -12,6 +12,17 @@
|
||||
|
||||
#define HAVE_FUNCTION_GRAPH_FP_TEST
|
||||
|
||||
/*
|
||||
* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR means that the architecture can provide a
|
||||
* "return address pointer" which can be used to uniquely identify a return
|
||||
* address which has been overwritten.
|
||||
*
|
||||
* On arm64 we use the address of the caller's frame record, which remains the
|
||||
* same for the lifetime of the instrumented function, unlike the return
|
||||
* address in the LR.
|
||||
*/
|
||||
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
||||
#define ARCH_SUPPORTS_FTRACE_OPS 1
|
||||
#else
|
||||
|
@ -76,7 +76,7 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
|
||||
static inline void
|
||||
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
|
||||
{
|
||||
VM_BUG_ON(mm != &init_mm);
|
||||
VM_BUG_ON(mm && mm != &init_mm);
|
||||
__pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE | PMD_TABLE_UXN);
|
||||
}
|
||||
|
||||
|
@ -47,9 +47,6 @@ struct stack_info {
|
||||
* @prev_type: The type of stack this frame record was on, or a synthetic
|
||||
* value of STACK_TYPE_UNKNOWN. This is used to detect a
|
||||
* transition from one stack to another.
|
||||
*
|
||||
* @graph: When FUNCTION_GRAPH_TRACER is selected, holds the index of a
|
||||
* replacement lr value in the ftrace graph stack.
|
||||
*/
|
||||
struct stackframe {
|
||||
unsigned long fp;
|
||||
@ -57,9 +54,6 @@ struct stackframe {
|
||||
DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
|
||||
unsigned long prev_fp;
|
||||
enum stack_type prev_type;
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
int graph;
|
||||
#endif
|
||||
#ifdef CONFIG_KRETPROBES
|
||||
struct llist_node *kr_cur;
|
||||
#endif
|
||||
|
@ -281,12 +281,22 @@ do { \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* We must not call into the scheduler between uaccess_ttbr0_enable() and
|
||||
* uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
|
||||
* we must evaluate these outside of the critical section.
|
||||
*/
|
||||
#define __raw_get_user(x, ptr, err) \
|
||||
do { \
|
||||
__typeof__(*(ptr)) __user *__rgu_ptr = (ptr); \
|
||||
__typeof__(x) __rgu_val; \
|
||||
__chk_user_ptr(ptr); \
|
||||
\
|
||||
uaccess_ttbr0_enable(); \
|
||||
__raw_get_mem("ldtr", x, ptr, err); \
|
||||
__raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err); \
|
||||
uaccess_ttbr0_disable(); \
|
||||
\
|
||||
(x) = __rgu_val; \
|
||||
} while (0)
|
||||
|
||||
#define __get_user_error(x, ptr, err) \
|
||||
@ -310,14 +320,22 @@ do { \
|
||||
|
||||
#define get_user __get_user
|
||||
|
||||
/*
|
||||
* We must not call into the scheduler between __uaccess_enable_tco_async() and
|
||||
* __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
|
||||
* functions, we must evaluate these outside of the critical section.
|
||||
*/
|
||||
#define __get_kernel_nofault(dst, src, type, err_label) \
|
||||
do { \
|
||||
__typeof__(dst) __gkn_dst = (dst); \
|
||||
__typeof__(src) __gkn_src = (src); \
|
||||
int __gkn_err = 0; \
|
||||
\
|
||||
__uaccess_enable_tco_async(); \
|
||||
__raw_get_mem("ldr", *((type *)(dst)), \
|
||||
(__force type *)(src), __gkn_err); \
|
||||
__raw_get_mem("ldr", *((type *)(__gkn_dst)), \
|
||||
(__force type *)(__gkn_src), __gkn_err); \
|
||||
__uaccess_disable_tco_async(); \
|
||||
\
|
||||
if (unlikely(__gkn_err)) \
|
||||
goto err_label; \
|
||||
} while (0)
|
||||
@ -351,11 +369,19 @@ do { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* We must not call into the scheduler between uaccess_ttbr0_enable() and
|
||||
* uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
|
||||
* we must evaluate these outside of the critical section.
|
||||
*/
|
||||
#define __raw_put_user(x, ptr, err) \
|
||||
do { \
|
||||
__chk_user_ptr(ptr); \
|
||||
__typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \
|
||||
__typeof__(*(ptr)) __rpu_val = (x); \
|
||||
__chk_user_ptr(__rpu_ptr); \
|
||||
\
|
||||
uaccess_ttbr0_enable(); \
|
||||
__raw_put_mem("sttr", x, ptr, err); \
|
||||
__raw_put_mem("sttr", __rpu_val, __rpu_ptr, err); \
|
||||
uaccess_ttbr0_disable(); \
|
||||
} while (0)
|
||||
|
||||
@ -380,14 +406,22 @@ do { \
|
||||
|
||||
#define put_user __put_user
|
||||
|
||||
/*
|
||||
* We must not call into the scheduler between __uaccess_enable_tco_async() and
|
||||
* __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
|
||||
* functions, we must evaluate these outside of the critical section.
|
||||
*/
|
||||
#define __put_kernel_nofault(dst, src, type, err_label) \
|
||||
do { \
|
||||
__typeof__(dst) __pkn_dst = (dst); \
|
||||
__typeof__(src) __pkn_src = (src); \
|
||||
int __pkn_err = 0; \
|
||||
\
|
||||
__uaccess_enable_tco_async(); \
|
||||
__raw_put_mem("str", *((type *)(src)), \
|
||||
(__force type *)(dst), __pkn_err); \
|
||||
__raw_put_mem("str", *((type *)(__pkn_src)), \
|
||||
(__force type *)(__pkn_dst), __pkn_err); \
|
||||
__uaccess_disable_tco_async(); \
|
||||
\
|
||||
if (unlikely(__pkn_err)) \
|
||||
goto err_label; \
|
||||
} while(0)
|
||||
|
@ -244,8 +244,6 @@ void arch_ftrace_update_code(int command)
|
||||
* on the way back to parent. For this purpose, this function is called
|
||||
* in _mcount() or ftrace_caller() to replace return address (*parent) on
|
||||
* the call stack to return_to_handler.
|
||||
*
|
||||
* Note that @frame_pointer is used only for sanity check later.
|
||||
*/
|
||||
void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
|
||||
unsigned long frame_pointer)
|
||||
@ -263,8 +261,10 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
|
||||
*/
|
||||
old = *parent;
|
||||
|
||||
if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
|
||||
if (!function_graph_enter(old, self_addr, frame_pointer,
|
||||
(void *)frame_pointer)) {
|
||||
*parent = return_hooker;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
@ -38,9 +38,6 @@ void start_backtrace(struct stackframe *frame, unsigned long fp,
|
||||
{
|
||||
frame->fp = fp;
|
||||
frame->pc = pc;
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
frame->graph = 0;
|
||||
#endif
|
||||
#ifdef CONFIG_KRETPROBES
|
||||
frame->kr_cur = NULL;
|
||||
#endif
|
||||
@ -116,20 +113,23 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
|
||||
frame->prev_fp = fp;
|
||||
frame->prev_type = info.type;
|
||||
|
||||
frame->pc = ptrauth_strip_insn_pac(frame->pc);
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
if (tsk->ret_stack &&
|
||||
(ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) {
|
||||
struct ftrace_ret_stack *ret_stack;
|
||||
(frame->pc == (unsigned long)return_to_handler)) {
|
||||
unsigned long orig_pc;
|
||||
/*
|
||||
* This is a case where function graph tracer has
|
||||
* modified a return address (LR) in a stack frame
|
||||
* to hook a function return.
|
||||
* So replace it to an original value.
|
||||
*/
|
||||
ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++);
|
||||
if (WARN_ON_ONCE(!ret_stack))
|
||||
orig_pc = ftrace_graph_ret_addr(tsk, NULL, frame->pc,
|
||||
(void *)frame->fp);
|
||||
if (WARN_ON_ONCE(frame->pc == orig_pc))
|
||||
return -EINVAL;
|
||||
frame->pc = ret_stack->ret;
|
||||
frame->pc = orig_pc;
|
||||
}
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
#ifdef CONFIG_KRETPROBES
|
||||
@ -137,8 +137,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
|
||||
frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur);
|
||||
#endif
|
||||
|
||||
frame->pc = ptrauth_strip_insn_pac(frame->pc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(unwind_frame);
|
||||
|
@ -369,3 +369,4 @@
|
||||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
@ -250,7 +250,6 @@ static inline void __flush_page_to_ram(void *vaddr)
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
#define flush_dcache_page(page) __flush_page_to_ram(page_address(page))
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
#define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
|
||||
|
@ -448,3 +448,4 @@
|
||||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
@ -454,3 +454,4 @@
|
||||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
@ -3097,7 +3097,7 @@ config STACKTRACE_SUPPORT
|
||||
config PGTABLE_LEVELS
|
||||
int
|
||||
default 4 if PAGE_SIZE_4KB && MIPS_VA_BITS_48
|
||||
default 3 if 64BIT && !PAGE_SIZE_64KB
|
||||
default 3 if 64BIT && (!PAGE_SIZE_64KB || MIPS_VA_BITS_48)
|
||||
default 2
|
||||
|
||||
config MIPS_AUTO_PFN_OFFSET
|
||||
|
@ -52,7 +52,7 @@ endif
|
||||
|
||||
vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o
|
||||
|
||||
vmlinuzobjs-$(CONFIG_KERNEL_ZSTD) += $(obj)/bswapdi.o
|
||||
vmlinuzobjs-$(CONFIG_KERNEL_ZSTD) += $(obj)/bswapdi.o $(obj)/ashldi3.o
|
||||
|
||||
targets := $(notdir $(vmlinuzobjs-y))
|
||||
|
||||
|
@ -61,8 +61,6 @@ static inline void flush_dcache_page(struct page *page)
|
||||
SetPageDcacheDirty(page);
|
||||
}
|
||||
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
|
||||
|
@ -1734,8 +1734,6 @@ static inline void decode_cpucfg(struct cpuinfo_mips *c)
|
||||
|
||||
static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
|
||||
{
|
||||
decode_configs(c);
|
||||
|
||||
/* All Loongson processors covered here define ExcCode 16 as GSExc. */
|
||||
c->options |= MIPS_CPU_GSEXCEX;
|
||||
|
||||
@ -1796,6 +1794,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
|
||||
panic("Unknown Loongson Processor ID!");
|
||||
break;
|
||||
}
|
||||
|
||||
decode_configs(c);
|
||||
}
|
||||
#else
|
||||
static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { }
|
||||
|
@ -185,7 +185,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
seq_puts(m, " tx39_cache");
|
||||
if (cpu_has_octeon_cache)
|
||||
seq_puts(m, " octeon_cache");
|
||||
if (cpu_has_fpu)
|
||||
if (raw_cpu_has_fpu)
|
||||
seq_puts(m, " fpu");
|
||||
if (cpu_has_32fpr)
|
||||
seq_puts(m, " 32fpr");
|
||||
|
@ -27,7 +27,6 @@ void flush_cache_vunmap(unsigned long start, unsigned long end);
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
void flush_dcache_page(struct page *page);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long vaddr, void *dst, void *src, int len);
|
||||
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
|
@ -29,7 +29,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
|
||||
unsigned long pfn);
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
void flush_dcache_page(struct page *page);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
extern void flush_icache_range(unsigned long start, unsigned long end);
|
||||
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
|
||||
|
@ -147,6 +147,17 @@
|
||||
extrd,u \r, 63-(\sa), 64-(\sa), \t
|
||||
.endm
|
||||
|
||||
/* Extract unsigned for 32- and 64-bit
|
||||
* The extru instruction leaves the most significant 32 bits of the
|
||||
* target register in an undefined state on PA 2.0 systems. */
|
||||
.macro extru_safe r, p, len, t
|
||||
#ifdef CONFIG_64BIT
|
||||
extrd,u \r, 32+(\p), \len, \t
|
||||
#else
|
||||
extru \r, \p, \len, \t
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/* load 32-bit 'value' into 'reg' compensating for the ldil
|
||||
* sign-extension when running in wide mode.
|
||||
* WARNING!! neither 'value' nor 'reg' can be expressions
|
||||
|
@ -50,7 +50,6 @@ void invalidate_kernel_vmap_range(void *vaddr, int size);
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
void flush_dcache_page(struct page *page);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
|
||||
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
|
||||
|
@ -366,17 +366,9 @@
|
||||
*/
|
||||
.macro L2_ptep pmd,pte,index,va,fault
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
|
||||
extru_safe \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
|
||||
#else
|
||||
# if defined(CONFIG_64BIT)
|
||||
extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
|
||||
#else
|
||||
# if PAGE_SIZE > 4096
|
||||
extru \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
|
||||
# else
|
||||
extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
|
||||
# endif
|
||||
# endif
|
||||
extru_safe \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
|
||||
#endif
|
||||
dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
|
||||
#if CONFIG_PGTABLE_LEVELS < 3
|
||||
@ -386,7 +378,7 @@
|
||||
bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
|
||||
dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
|
||||
SHLREG \pmd,PxD_VALUE_SHIFT,\pmd
|
||||
extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
|
||||
extru_safe \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
|
||||
dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
|
||||
shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
|
||||
.endm
|
||||
|
@ -566,7 +566,7 @@ lws_compare_and_swap:
|
||||
ldo R%lws_lock_start(%r20), %r28
|
||||
|
||||
/* Extract eight bits from r26 and hash lock (Bits 3-11) */
|
||||
extru %r26, 28, 8, %r20
|
||||
extru_safe %r26, 28, 8, %r20
|
||||
|
||||
/* Find lock to use, the hash is either one of 0 to
|
||||
15, multiplied by 16 (keep it 16-byte aligned)
|
||||
@ -751,7 +751,7 @@ cas2_lock_start:
|
||||
ldo R%lws_lock_start(%r20), %r28
|
||||
|
||||
/* Extract eight bits from r26 and hash lock (Bits 3-11) */
|
||||
extru %r26, 28, 8, %r20
|
||||
extru_safe %r26, 28, 8, %r20
|
||||
|
||||
/* Find lock to use, the hash is either one of 0 to
|
||||
15, multiplied by 16 (keep it 16-byte aligned)
|
||||
|
@ -57,8 +57,6 @@ SECTIONS
|
||||
{
|
||||
. = KERNEL_BINARY_TEXT_START;
|
||||
|
||||
_stext = .; /* start of kernel text, includes init code & data */
|
||||
|
||||
__init_begin = .;
|
||||
HEAD_TEXT_SECTION
|
||||
MLONGCALL_DISCARD(INIT_TEXT_SECTION(8))
|
||||
@ -82,6 +80,7 @@ SECTIONS
|
||||
/* freed after init ends here */
|
||||
|
||||
_text = .; /* Text and read-only data */
|
||||
_stext = .;
|
||||
MLONGCALL_KEEP(INIT_TEXT_SECTION(8))
|
||||
.text ALIGN(PAGE_SIZE) : {
|
||||
TEXT_TEXT
|
||||
|
@ -202,11 +202,11 @@ vmap_stack_overflow:
|
||||
mfspr r1, SPRN_SPRG_THREAD
|
||||
lwz r1, TASK_CPU - THREAD(r1)
|
||||
slwi r1, r1, 3
|
||||
addis r1, r1, emergency_ctx@ha
|
||||
addis r1, r1, emergency_ctx-PAGE_OFFSET@ha
|
||||
#else
|
||||
lis r1, emergency_ctx@ha
|
||||
lis r1, emergency_ctx-PAGE_OFFSET@ha
|
||||
#endif
|
||||
lwz r1, emergency_ctx@l(r1)
|
||||
lwz r1, emergency_ctx-PAGE_OFFSET@l(r1)
|
||||
addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
|
||||
EXCEPTION_PROLOG_2 0 vmap_stack_overflow
|
||||
prepare_transfer_to_handler
|
||||
|
@ -528,3 +528,4 @@
|
||||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
@ -695,6 +695,7 @@ static void flush_guest_tlb(struct kvm *kvm)
|
||||
"r" (0) : "memory");
|
||||
}
|
||||
asm volatile("ptesync": : :"memory");
|
||||
// POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
|
||||
asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
|
||||
} else {
|
||||
for (set = 0; set < kvm->arch.tlb_sets; ++set) {
|
||||
@ -705,7 +706,9 @@ static void flush_guest_tlb(struct kvm *kvm)
|
||||
rb += PPC_BIT(51); /* increment set number */
|
||||
}
|
||||
asm volatile("ptesync": : :"memory");
|
||||
asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
|
||||
// POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
||||
asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,6 @@ extern void flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end);
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
void flush_dcache_page(struct page *page);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
extern void flush_icache_range(unsigned long start, unsigned long end);
|
||||
#define flush_icache_user_range flush_icache_range
|
||||
extern void flush_icache_page(struct vm_area_struct *vma,
|
||||
|
@ -451,3 +451,4 @@
|
||||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
@ -494,3 +494,4 @@
|
||||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
@ -102,12 +102,6 @@ extern void switch_fpu_return(void);
|
||||
*/
|
||||
extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
|
||||
|
||||
/*
|
||||
* Tasks that are not using SVA have mm->pasid set to zero to note that they
|
||||
* will not have the valid bit set in MSR_IA32_PASID while they are running.
|
||||
*/
|
||||
#define PASID_DISABLED 0
|
||||
|
||||
/* Trap handling */
|
||||
extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
|
||||
extern void fpu_sync_fpstate(struct fpu *fpu);
|
||||
|
@ -281,13 +281,13 @@ HYPERVISOR_callback_op(int cmd, void *arg)
|
||||
return _hypercall2(int, callback_op, cmd, arg);
|
||||
}
|
||||
|
||||
static inline int
|
||||
static __always_inline int
|
||||
HYPERVISOR_set_debugreg(int reg, unsigned long value)
|
||||
{
|
||||
return _hypercall2(int, set_debugreg, reg, value);
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
static __always_inline unsigned long
|
||||
HYPERVISOR_get_debugreg(int reg)
|
||||
{
|
||||
return _hypercall1(unsigned long, get_debugreg, reg);
|
||||
|
@ -64,6 +64,7 @@ void xen_arch_unregister_cpu(int num);
|
||||
|
||||
#ifdef CONFIG_PVH
|
||||
void __init xen_pvh_init(struct boot_params *boot_params);
|
||||
void __init mem_map_via_hcall(struct boot_params *boot_params_p);
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_XEN_HYPERVISOR_H */
|
||||
|
@ -742,7 +742,7 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static char *prepare_command_line(void)
|
||||
static char * __init prepare_command_line(void)
|
||||
{
|
||||
#ifdef CONFIG_CMDLINE_BOOL
|
||||
#ifdef CONFIG_CMDLINE_OVERRIDE
|
||||
|
@ -121,7 +121,6 @@ void flush_cache_page(struct vm_area_struct*,
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
void flush_dcache_page(struct page *);
|
||||
void flush_dcache_folio(struct folio *);
|
||||
|
||||
void local_flush_cache_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end);
|
||||
@ -138,9 +137,7 @@ void local_flush_cache_page(struct vm_area_struct *vma,
|
||||
#define flush_cache_vunmap(start,end) do { } while (0)
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
|
||||
#define flush_dcache_page(page) do { } while (0)
|
||||
static inline void flush_dcache_folio(struct folio *folio) { }
|
||||
|
||||
#define flush_icache_range local_flush_icache_range
|
||||
#define flush_cache_page(vma, addr, pfn) do { } while (0)
|
||||
|
@ -419,3 +419,4 @@
|
||||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
12
block/bdev.c
12
block/bdev.c
@ -753,8 +753,7 @@ struct block_device *blkdev_get_no_open(dev_t dev)
|
||||
|
||||
if (!bdev)
|
||||
return NULL;
|
||||
if ((bdev->bd_disk->flags & GENHD_FL_HIDDEN) ||
|
||||
!try_module_get(bdev->bd_disk->fops->owner)) {
|
||||
if ((bdev->bd_disk->flags & GENHD_FL_HIDDEN)) {
|
||||
put_device(&bdev->bd_device);
|
||||
return NULL;
|
||||
}
|
||||
@ -764,7 +763,6 @@ struct block_device *blkdev_get_no_open(dev_t dev)
|
||||
|
||||
void blkdev_put_no_open(struct block_device *bdev)
|
||||
{
|
||||
module_put(bdev->bd_disk->fops->owner);
|
||||
put_device(&bdev->bd_device);
|
||||
}
|
||||
|
||||
@ -820,12 +818,14 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
|
||||
ret = -ENXIO;
|
||||
if (!disk_live(disk))
|
||||
goto abort_claiming;
|
||||
if (!try_module_get(disk->fops->owner))
|
||||
goto abort_claiming;
|
||||
if (bdev_is_partition(bdev))
|
||||
ret = blkdev_get_part(bdev, mode);
|
||||
else
|
||||
ret = blkdev_get_whole(bdev, mode);
|
||||
if (ret)
|
||||
goto abort_claiming;
|
||||
goto put_module;
|
||||
if (mode & FMODE_EXCL) {
|
||||
bd_finish_claiming(bdev, holder);
|
||||
|
||||
@ -847,7 +847,8 @@ struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
|
||||
if (unblock_events)
|
||||
disk_unblock_events(disk);
|
||||
return bdev;
|
||||
|
||||
put_module:
|
||||
module_put(disk->fops->owner);
|
||||
abort_claiming:
|
||||
if (mode & FMODE_EXCL)
|
||||
bd_abort_claiming(bdev, holder);
|
||||
@ -956,6 +957,7 @@ void blkdev_put(struct block_device *bdev, fmode_t mode)
|
||||
blkdev_put_whole(bdev, mode);
|
||||
mutex_unlock(&disk->open_mutex);
|
||||
|
||||
module_put(disk->fops->owner);
|
||||
blkdev_put_no_open(bdev);
|
||||
}
|
||||
EXPORT_SYMBOL(blkdev_put);
|
||||
|
@ -1017,6 +1017,7 @@ EXPORT_SYMBOL(submit_bio);
|
||||
/**
|
||||
* bio_poll - poll for BIO completions
|
||||
* @bio: bio to poll for
|
||||
* @iob: batches of IO
|
||||
* @flags: BLK_POLL_* flags that control the behavior
|
||||
*
|
||||
* Poll for completions on queue associated with the bio. Returns number of
|
||||
|
@ -860,13 +860,14 @@ void blk_mq_end_request_batch(struct io_comp_batch *iob)
|
||||
if (iob->need_ts)
|
||||
__blk_mq_end_request_acct(rq, now);
|
||||
|
||||
rq_qos_done(rq->q, rq);
|
||||
|
||||
WRITE_ONCE(rq->state, MQ_RQ_IDLE);
|
||||
if (!refcount_dec_and_test(&rq->ref))
|
||||
continue;
|
||||
|
||||
blk_crypto_free_request(rq);
|
||||
blk_pm_mark_last_busy(rq);
|
||||
rq_qos_done(rq->q, rq);
|
||||
|
||||
if (nr_tags == TAG_COMP_BATCH || cur_hctx != rq->mq_hctx) {
|
||||
if (cur_hctx)
|
||||
|
@ -998,7 +998,14 @@ static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
|
||||
static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
|
||||
{
|
||||
struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
|
||||
struct cpc_register_resource *reg = &cpc_desc->cpc_regs[reg_idx];
|
||||
struct cpc_register_resource *reg;
|
||||
|
||||
if (!cpc_desc) {
|
||||
pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
reg = &cpc_desc->cpc_regs[reg_idx];
|
||||
|
||||
if (CPC_IN_PCC(reg)) {
|
||||
int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
|
||||
|
@ -1084,21 +1084,17 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
|
||||
* Returns parent node of an ACPI device or data firmware node or %NULL if
|
||||
* not available.
|
||||
*/
|
||||
struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode)
|
||||
static struct fwnode_handle *
|
||||
acpi_node_get_parent(const struct fwnode_handle *fwnode)
|
||||
{
|
||||
if (is_acpi_data_node(fwnode)) {
|
||||
/* All data nodes have parent pointer so just return that */
|
||||
return to_acpi_data_node(fwnode)->parent;
|
||||
} else if (is_acpi_device_node(fwnode)) {
|
||||
acpi_handle handle, parent_handle;
|
||||
struct device *dev = to_acpi_device_node(fwnode)->dev.parent;
|
||||
|
||||
handle = to_acpi_device_node(fwnode)->handle;
|
||||
if (ACPI_SUCCESS(acpi_get_parent(handle, &parent_handle))) {
|
||||
struct acpi_device *adev;
|
||||
|
||||
if (!acpi_bus_get_device(parent_handle, &adev))
|
||||
return acpi_fwnode_handle(adev);
|
||||
}
|
||||
if (dev)
|
||||
return acpi_fwnode_handle(to_acpi_device(dev));
|
||||
}
|
||||
|
||||
return NULL;
|
||||
|
@ -2710,7 +2710,7 @@ static void binder_transaction(struct binder_proc *proc,
|
||||
t->from = thread;
|
||||
else
|
||||
t->from = NULL;
|
||||
t->sender_euid = proc->cred->euid;
|
||||
t->sender_euid = task_euid(proc->tsk);
|
||||
t->to_proc = target_proc;
|
||||
t->to_thread = target_thread;
|
||||
t->code = tr->code;
|
||||
|
@ -316,7 +316,7 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
struct request *req = bd->rq;
|
||||
struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
|
||||
unsigned long flags;
|
||||
unsigned int num;
|
||||
int num;
|
||||
int qid = hctx->queue_num;
|
||||
bool notify = false;
|
||||
blk_status_t status;
|
||||
@ -1049,7 +1049,6 @@ static struct virtio_driver virtio_blk = {
|
||||
.feature_table_size = ARRAY_SIZE(features),
|
||||
.feature_table_legacy = features_legacy,
|
||||
.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
|
||||
.suppress_used_validation = true,
|
||||
.driver.name = KBUILD_MODNAME,
|
||||
.driver.owner = THIS_MODULE,
|
||||
.id_table = id_table,
|
||||
|
@ -1853,12 +1853,14 @@ static const struct block_device_operations zram_devops = {
|
||||
.owner = THIS_MODULE
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ZRAM_WRITEBACK
|
||||
static const struct block_device_operations zram_wb_devops = {
|
||||
.open = zram_open,
|
||||
.submit_bio = zram_submit_bio,
|
||||
.swap_slot_free_notify = zram_slot_free_notify,
|
||||
.owner = THIS_MODULE
|
||||
};
|
||||
#endif
|
||||
|
||||
static DEVICE_ATTR_WO(compact);
|
||||
static DEVICE_ATTR_RW(disksize);
|
||||
|
@ -338,6 +338,8 @@ static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
|
||||
|
||||
static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
|
||||
|
||||
#define CPPC_MAX_PERF U8_MAX
|
||||
|
||||
static void intel_pstate_set_itmt_prio(int cpu)
|
||||
{
|
||||
struct cppc_perf_caps cppc_perf;
|
||||
@ -348,6 +350,14 @@ static void intel_pstate_set_itmt_prio(int cpu)
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
/*
|
||||
* On some systems with overclocking enabled, CPPC.highest_perf is hardcoded to 0xff.
|
||||
* In this case we can't use CPPC.highest_perf to enable ITMT.
|
||||
* In this case we can look at MSR_HWP_CAPABILITIES bits [8:0] to decide.
|
||||
*/
|
||||
if (cppc_perf.highest_perf == CPPC_MAX_PERF)
|
||||
cppc_perf.highest_perf = HWP_HIGHEST_PERF(READ_ONCE(all_cpu_data[cpu]->hwp_cap_cached));
|
||||
|
||||
/*
|
||||
* The priorities can be set regardless of whether or not
|
||||
* sched_set_itmt_support(true) has been called and it is valid to
|
||||
@ -1006,6 +1016,12 @@ static void intel_pstate_hwp_offline(struct cpudata *cpu)
|
||||
*/
|
||||
value &= ~GENMASK_ULL(31, 24);
|
||||
value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached);
|
||||
/*
|
||||
* However, make sure that EPP will be set to "performance" when
|
||||
* the CPU is brought back online again and the "performance"
|
||||
* scaling algorithm is still in effect.
|
||||
*/
|
||||
cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2353,6 +2369,7 @@ static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
|
||||
X86_MATCH(BROADWELL_D, core_funcs),
|
||||
X86_MATCH(BROADWELL_X, core_funcs),
|
||||
X86_MATCH(SKYLAKE_X, core_funcs),
|
||||
X86_MATCH(ICELAKE_X, core_funcs),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -34,6 +34,12 @@ struct scmi_msg_resp_base_attributes {
|
||||
__le16 reserved;
|
||||
};
|
||||
|
||||
struct scmi_msg_resp_base_discover_agent {
|
||||
__le32 agent_id;
|
||||
u8 name[SCMI_MAX_STR_SIZE];
|
||||
};
|
||||
|
||||
|
||||
struct scmi_msg_base_error_notify {
|
||||
__le32 event_control;
|
||||
#define BASE_TP_NOTIFY_ALL BIT(0)
|
||||
@ -225,18 +231,21 @@ static int scmi_base_discover_agent_get(const struct scmi_protocol_handle *ph,
|
||||
int id, char *name)
|
||||
{
|
||||
int ret;
|
||||
struct scmi_msg_resp_base_discover_agent *agent_info;
|
||||
struct scmi_xfer *t;
|
||||
|
||||
ret = ph->xops->xfer_get_init(ph, BASE_DISCOVER_AGENT,
|
||||
sizeof(__le32), SCMI_MAX_STR_SIZE, &t);
|
||||
sizeof(__le32), sizeof(*agent_info), &t);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
put_unaligned_le32(id, t->tx.buf);
|
||||
|
||||
ret = ph->xops->do_xfer(ph, t);
|
||||
if (!ret)
|
||||
strlcpy(name, t->rx.buf, SCMI_MAX_STR_SIZE);
|
||||
if (!ret) {
|
||||
agent_info = t->rx.buf;
|
||||
strlcpy(name, agent_info->name, SCMI_MAX_STR_SIZE);
|
||||
}
|
||||
|
||||
ph->xops->xfer_put(ph, t);
|
||||
|
||||
|
@ -138,9 +138,7 @@ static int scmi_pm_domain_probe(struct scmi_device *sdev)
|
||||
scmi_pd_data->domains = domains;
|
||||
scmi_pd_data->num_domains = num_domains;
|
||||
|
||||
of_genpd_add_provider_onecell(np, scmi_pd_data);
|
||||
|
||||
return 0;
|
||||
return of_genpd_add_provider_onecell(np, scmi_pd_data);
|
||||
}
|
||||
|
||||
static const struct scmi_device_id scmi_id_table[] = {
|
||||
|
@ -637,7 +637,7 @@ static int scmi_sensor_config_get(const struct scmi_protocol_handle *ph,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
put_unaligned_le32(cpu_to_le32(sensor_id), t->tx.buf);
|
||||
put_unaligned_le32(sensor_id, t->tx.buf);
|
||||
ret = ph->xops->do_xfer(ph, t);
|
||||
if (!ret) {
|
||||
struct sensors_info *si = ph->get_priv(ph);
|
||||
|
@ -82,7 +82,8 @@ static bool scmi_vio_have_vq_rx(struct virtio_device *vdev)
|
||||
}
|
||||
|
||||
static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
|
||||
struct scmi_vio_msg *msg)
|
||||
struct scmi_vio_msg *msg,
|
||||
struct device *dev)
|
||||
{
|
||||
struct scatterlist sg_in;
|
||||
int rc;
|
||||
@ -94,8 +95,7 @@ static int scmi_vio_feed_vq_rx(struct scmi_vio_channel *vioch,
|
||||
|
||||
rc = virtqueue_add_inbuf(vioch->vqueue, &sg_in, 1, msg, GFP_ATOMIC);
|
||||
if (rc)
|
||||
dev_err_once(vioch->cinfo->dev,
|
||||
"failed to add to virtqueue (%d)\n", rc);
|
||||
dev_err_once(dev, "failed to add to virtqueue (%d)\n", rc);
|
||||
else
|
||||
virtqueue_kick(vioch->vqueue);
|
||||
|
||||
@ -108,7 +108,7 @@ static void scmi_finalize_message(struct scmi_vio_channel *vioch,
|
||||
struct scmi_vio_msg *msg)
|
||||
{
|
||||
if (vioch->is_rx) {
|
||||
scmi_vio_feed_vq_rx(vioch, msg);
|
||||
scmi_vio_feed_vq_rx(vioch, msg, vioch->cinfo->dev);
|
||||
} else {
|
||||
/* Here IRQs are assumed to be already disabled by the caller */
|
||||
spin_lock(&vioch->lock);
|
||||
@ -269,7 +269,7 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
|
||||
list_add_tail(&msg->list, &vioch->free_list);
|
||||
spin_unlock_irqrestore(&vioch->lock, flags);
|
||||
} else {
|
||||
scmi_vio_feed_vq_rx(vioch, msg);
|
||||
scmi_vio_feed_vq_rx(vioch, msg, cinfo->dev);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -156,7 +156,7 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph,
|
||||
int cnt;
|
||||
|
||||
cmd->domain_id = cpu_to_le32(v->id);
|
||||
cmd->level_index = desc_index;
|
||||
cmd->level_index = cpu_to_le32(desc_index);
|
||||
ret = ph->xops->do_xfer(ph, tl);
|
||||
if (ret)
|
||||
break;
|
||||
|
@ -50,7 +50,7 @@ static int __init smccc_soc_init(void)
|
||||
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
|
||||
ARM_SMCCC_ARCH_SOC_ID, &res);
|
||||
|
||||
if (res.a0 == SMCCC_RET_NOT_SUPPORTED) {
|
||||
if ((int)res.a0 == SMCCC_RET_NOT_SUPPORTED) {
|
||||
pr_info("ARCH_SOC_ID not implemented, skipping ....\n");
|
||||
return 0;
|
||||
}
|
||||
|
@ -646,12 +646,6 @@ kfd_mem_attach_dmabuf(struct amdgpu_device *adev, struct kgd_mem *mem,
|
||||
if (IS_ERR(gobj))
|
||||
return PTR_ERR(gobj);
|
||||
|
||||
/* Import takes an extra reference on the dmabuf. Drop it now to
|
||||
* avoid leaking it. We only need the one reference in
|
||||
* kgd_mem->dmabuf.
|
||||
*/
|
||||
dma_buf_put(mem->dmabuf);
|
||||
|
||||
*bo = gem_to_amdgpu_bo(gobj);
|
||||
(*bo)->flags |= AMDGPU_GEM_CREATE_PREEMPTIBLE;
|
||||
(*bo)->parent = amdgpu_bo_ref(mem->bo);
|
||||
|
@ -1569,6 +1569,18 @@ void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
|
||||
WREG32(adev->bios_scratch_reg_offset + 3, tmp);
|
||||
}
|
||||
|
||||
void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev,
|
||||
u32 backlight_level)
|
||||
{
|
||||
u32 tmp = RREG32(adev->bios_scratch_reg_offset + 2);
|
||||
|
||||
tmp &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
|
||||
tmp |= (backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) &
|
||||
ATOM_S2_CURRENT_BL_LEVEL_MASK;
|
||||
|
||||
WREG32(adev->bios_scratch_reg_offset + 2, tmp);
|
||||
}
|
||||
|
||||
bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev)
|
||||
{
|
||||
u32 tmp = RREG32(adev->bios_scratch_reg_offset + 7);
|
||||
|
@ -185,6 +185,8 @@ bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev);
|
||||
void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock);
|
||||
void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
|
||||
bool hung);
|
||||
void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev,
|
||||
u32 backlight_level);
|
||||
bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev);
|
||||
|
||||
void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le);
|
||||
|
@ -4316,7 +4316,6 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
|
||||
|
||||
amdgpu_irq_gpu_reset_resume_helper(adev);
|
||||
r = amdgpu_ib_ring_tests(adev);
|
||||
amdgpu_amdkfd_post_reset(adev);
|
||||
|
||||
error:
|
||||
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
|
||||
@ -5089,7 +5088,7 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */
|
||||
|
||||
tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter));
|
||||
/* Actual ASIC resets if needed.*/
|
||||
/* TODO Implement XGMI hive reset logic for SRIOV */
|
||||
/* Host driver will handle XGMI hive reset for SRIOV */
|
||||
if (amdgpu_sriov_vf(adev)) {
|
||||
r = amdgpu_device_reset_sriov(adev, job ? false : true);
|
||||
if (r)
|
||||
@ -5149,8 +5148,8 @@ skip_hw_reset:
|
||||
|
||||
skip_sched_resume:
|
||||
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
|
||||
/* unlock kfd: SRIOV would do it separately */
|
||||
if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
|
||||
/* unlock kfd */
|
||||
if (!need_emergency_restart)
|
||||
amdgpu_amdkfd_post_reset(tmp_adev);
|
||||
|
||||
/* kfd_post_reset will do nothing if kfd device is not initialized,
|
||||
|
@ -248,8 +248,8 @@ get_from_vram:
|
||||
|
||||
offset = offsetof(struct binary_header, binary_checksum) +
|
||||
sizeof(bhdr->binary_checksum);
|
||||
size = bhdr->binary_size - offset;
|
||||
checksum = bhdr->binary_checksum;
|
||||
size = le16_to_cpu(bhdr->binary_size) - offset;
|
||||
checksum = le16_to_cpu(bhdr->binary_checksum);
|
||||
|
||||
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
|
||||
size, checksum)) {
|
||||
@ -270,7 +270,7 @@ get_from_vram:
|
||||
}
|
||||
|
||||
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
|
||||
ihdr->size, checksum)) {
|
||||
le16_to_cpu(ihdr->size), checksum)) {
|
||||
DRM_ERROR("invalid ip discovery data table checksum\n");
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
@ -282,7 +282,7 @@ get_from_vram:
|
||||
ghdr = (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
|
||||
|
||||
if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
|
||||
ghdr->size, checksum)) {
|
||||
le32_to_cpu(ghdr->size), checksum)) {
|
||||
DRM_ERROR("invalid gc data table checksum\n");
|
||||
r = -EINVAL;
|
||||
goto out;
|
||||
@ -489,10 +489,10 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
|
||||
le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset));
|
||||
|
||||
for (i = 0; i < 32; i++) {
|
||||
if (le32_to_cpu(harvest_info->list[i].hw_id) == 0)
|
||||
if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
|
||||
break;
|
||||
|
||||
switch (le32_to_cpu(harvest_info->list[i].hw_id)) {
|
||||
switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
|
||||
case VCN_HWID:
|
||||
vcn_harvest_count++;
|
||||
if (harvest_info->list[i].number_instance == 0)
|
||||
|
@ -223,7 +223,7 @@ int amdgpu_ih_wait_on_checkpoint_process(struct amdgpu_device *adev,
|
||||
*/
|
||||
int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
|
||||
{
|
||||
unsigned int count = AMDGPU_IH_MAX_NUM_IVS;
|
||||
unsigned int count;
|
||||
u32 wptr;
|
||||
|
||||
if (!ih->enabled || adev->shutdown)
|
||||
@ -232,6 +232,7 @@ int amdgpu_ih_process(struct amdgpu_device *adev, struct amdgpu_ih_ring *ih)
|
||||
wptr = amdgpu_ih_get_wptr(adev, ih);
|
||||
|
||||
restart_ih:
|
||||
count = AMDGPU_IH_MAX_NUM_IVS;
|
||||
DRM_DEBUG("%s: rptr %d, wptr %d\n", __func__, ih->rptr, wptr);
|
||||
|
||||
/* Order reading of wptr vs. reading of IH ring data */
|
||||
|
@ -7707,8 +7707,19 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
|
||||
switch (adev->ip_versions[GC_HWIP][0]) {
|
||||
case IP_VERSION(10, 3, 1):
|
||||
case IP_VERSION(10, 3, 3):
|
||||
clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) |
|
||||
((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL);
|
||||
preempt_disable();
|
||||
clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh);
|
||||
clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh);
|
||||
hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh);
|
||||
/* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over
|
||||
* roughly every 42 seconds.
|
||||
*/
|
||||
if (hi_check != clock_hi) {
|
||||
clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh);
|
||||
clock_hi = hi_check;
|
||||
}
|
||||
preempt_enable();
|
||||
clock = clock_lo | (clock_hi << 32ULL);
|
||||
break;
|
||||
default:
|
||||
preempt_disable();
|
||||
|
@ -140,6 +140,11 @@ MODULE_FIRMWARE("amdgpu/aldebaran_rlc.bin");
|
||||
#define mmTCP_CHAN_STEER_5_ARCT 0x0b0c
|
||||
#define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0
|
||||
|
||||
#define mmGOLDEN_TSC_COUNT_UPPER_Renoir 0x0025
|
||||
#define mmGOLDEN_TSC_COUNT_UPPER_Renoir_BASE_IDX 1
|
||||
#define mmGOLDEN_TSC_COUNT_LOWER_Renoir 0x0026
|
||||
#define mmGOLDEN_TSC_COUNT_LOWER_Renoir_BASE_IDX 1
|
||||
|
||||
enum ta_ras_gfx_subblock {
|
||||
/*CPC*/
|
||||
TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0,
|
||||
@ -4238,19 +4243,38 @@ failed_kiq_read:
|
||||
|
||||
static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
|
||||
{
|
||||
uint64_t clock;
|
||||
uint64_t clock, clock_lo, clock_hi, hi_check;
|
||||
|
||||
amdgpu_gfx_off_ctrl(adev, false);
|
||||
mutex_lock(&adev->gfx.gpu_clock_mutex);
|
||||
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) {
|
||||
clock = gfx_v9_0_kiq_read_clock(adev);
|
||||
} else {
|
||||
WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
|
||||
clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
|
||||
((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
|
||||
switch (adev->ip_versions[GC_HWIP][0]) {
|
||||
case IP_VERSION(9, 3, 0):
|
||||
preempt_disable();
|
||||
clock_hi = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
|
||||
clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
|
||||
hi_check = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Renoir);
|
||||
/* The SMUIO TSC clock frequency is 100MHz, which sets 32-bit carry over
|
||||
* roughly every 42 seconds.
|
||||
*/
|
||||
if (hi_check != clock_hi) {
|
||||
clock_lo = RREG32_SOC15_NO_KIQ(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Renoir);
|
||||
clock_hi = hi_check;
|
||||
}
|
||||
preempt_enable();
|
||||
clock = clock_lo | (clock_hi << 32ULL);
|
||||
break;
|
||||
default:
|
||||
amdgpu_gfx_off_ctrl(adev, false);
|
||||
mutex_lock(&adev->gfx.gpu_clock_mutex);
|
||||
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) {
|
||||
clock = gfx_v9_0_kiq_read_clock(adev);
|
||||
} else {
|
||||
WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
|
||||
clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
|
||||
((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
|
||||
}
|
||||
mutex_unlock(&adev->gfx.gpu_clock_mutex);
|
||||
amdgpu_gfx_off_ctrl(adev, true);
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&adev->gfx.gpu_clock_mutex);
|
||||
amdgpu_gfx_off_ctrl(adev, true);
|
||||
return clock;
|
||||
}
|
||||
|
||||
|
@ -160,6 +160,7 @@ static int navi10_ih_toggle_ring_interrupts(struct amdgpu_device *adev,
|
||||
|
||||
tmp = RREG32(ih_regs->ih_rb_cntl);
|
||||
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_ENABLE, (enable ? 1 : 0));
|
||||
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_GPU_TS_ENABLE, 1);
|
||||
/* enable_intr field is only valid in ring0 */
|
||||
if (ih == &adev->irq.ih)
|
||||
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, ENABLE_INTR, (enable ? 1 : 0));
|
||||
@ -275,10 +276,8 @@ static int navi10_ih_enable_ring(struct amdgpu_device *adev,
|
||||
tmp = navi10_ih_rb_cntl(ih, tmp);
|
||||
if (ih == &adev->irq.ih)
|
||||
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RPTR_REARM, !!adev->irq.msi_enabled);
|
||||
if (ih == &adev->irq.ih1) {
|
||||
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_ENABLE, 0);
|
||||
if (ih == &adev->irq.ih1)
|
||||
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, RB_FULL_DRAIN_ENABLE, 1);
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev) && amdgpu_sriov_reg_indirect_ih(adev)) {
|
||||
if (psp_reg_program(&adev->psp, ih_regs->psp_reg_id, tmp)) {
|
||||
@ -319,7 +318,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct amdgpu_ih_ring *ih[] = {&adev->irq.ih, &adev->irq.ih1, &adev->irq.ih2};
|
||||
u32 ih_chicken;
|
||||
u32 tmp;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
@ -363,15 +361,6 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
|
||||
adev->nbio.funcs->ih_doorbell_range(adev, ih[0]->use_doorbell,
|
||||
ih[0]->doorbell_index);
|
||||
|
||||
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, IH_STORM_CLIENT_LIST_CNTL,
|
||||
CLIENT18_IS_STORM_CLIENT, 1);
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL, tmp);
|
||||
|
||||
tmp = RREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL);
|
||||
tmp = REG_SET_FIELD(tmp, IH_INT_FLOOD_CNTL, FLOOD_CNTL_ENABLE, 1);
|
||||
WREG32_SOC15(OSSSYS, 0, mmIH_INT_FLOOD_CNTL, tmp);
|
||||
|
||||
pci_set_master(adev->pdev);
|
||||
|
||||
/* enable interrupts */
|
||||
@ -420,12 +409,19 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
|
||||
u32 wptr, tmp;
|
||||
struct amdgpu_ih_regs *ih_regs;
|
||||
|
||||
wptr = le32_to_cpu(*ih->wptr_cpu);
|
||||
if (ih == &adev->irq.ih) {
|
||||
/* Only ring0 supports writeback. On other rings fall back
|
||||
* to register-based code with overflow checking below.
|
||||
*/
|
||||
wptr = le32_to_cpu(*ih->wptr_cpu);
|
||||
|
||||
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
|
||||
goto out;
|
||||
}
|
||||
|
||||
ih_regs = &ih->ih_regs;
|
||||
|
||||
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
|
||||
goto out;
|
||||
|
||||
/* Double check that the overflow wasn't already cleared. */
|
||||
wptr = RREG32_NO_KIQ(ih_regs->ih_rb_wptr);
|
||||
if (!REG_GET_FIELD(wptr, IH_RB_WPTR, RB_OVERFLOW))
|
||||
goto out;
|
||||
@ -513,15 +509,11 @@ static int navi10_ih_self_irq(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
{
|
||||
uint32_t wptr = cpu_to_le32(entry->src_data[0]);
|
||||
|
||||
switch (entry->ring_id) {
|
||||
case 1:
|
||||
*adev->irq.ih1.wptr_cpu = wptr;
|
||||
schedule_work(&adev->irq.ih1_work);
|
||||
break;
|
||||
case 2:
|
||||
*adev->irq.ih2.wptr_cpu = wptr;
|
||||
schedule_work(&adev->irq.ih2_work);
|
||||
break;
|
||||
default: break;
|
||||
|
@ -359,6 +359,10 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
|
||||
|
||||
if (def != data)
|
||||
WREG32_PCIE(smnPCIE_CONFIG_CNTL, data);
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
|
||||
mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
|
||||
}
|
||||
|
||||
#define NAVI10_PCIE__LC_L0S_INACTIVITY_DEFAULT 0x00000000 // off by default, no gains over L1
|
||||
|
@ -276,6 +276,10 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev)
|
||||
|
||||
if (def != data)
|
||||
WREG32_PCIE(smnPCIE_CI_CNTL, data);
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
|
||||
mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
|
||||
}
|
||||
|
||||
static void nbio_v6_1_program_ltr(struct amdgpu_device *adev)
|
||||
|
@ -273,7 +273,9 @@ const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg = {
|
||||
|
||||
static void nbio_v7_0_init_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
adev->rmmio_remap.reg_offset =
|
||||
SOC15_REG_OFFSET(NBIO, 0, mmHDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
|
||||
}
|
||||
|
||||
const struct amdgpu_nbio_funcs nbio_v7_0_funcs = {
|
||||
|
@ -371,6 +371,10 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev)
|
||||
if (def != data)
|
||||
WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL), data);
|
||||
}
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
|
||||
regBIF_BX_PF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
|
||||
}
|
||||
|
||||
const struct amdgpu_nbio_funcs nbio_v7_2_funcs = {
|
||||
|
@ -362,7 +362,9 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald = {
|
||||
|
||||
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
|
||||
{
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0,
|
||||
mmBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2;
|
||||
}
|
||||
|
||||
static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev)
|
||||
@ -692,6 +694,9 @@ static void nbio_v7_4_program_aspm(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t def, data;
|
||||
|
||||
if (adev->ip_versions[NBIO_HWIP][0] == IP_VERSION(7, 4, 4))
|
||||
return;
|
||||
|
||||
def = data = RREG32_PCIE(smnPCIE_LC_CNTL);
|
||||
data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK;
|
||||
data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK;
|
||||
|
@ -731,8 +731,10 @@ static int nv_common_early_init(void *handle)
|
||||
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
|
||||
adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
|
||||
adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
|
||||
}
|
||||
adev->smc_rreg = NULL;
|
||||
adev->smc_wreg = NULL;
|
||||
adev->pcie_rreg = &nv_pcie_rreg;
|
||||
@ -1032,7 +1034,7 @@ static int nv_common_hw_init(void *handle)
|
||||
* for the purpose of expose those registers
|
||||
* to process space
|
||||
*/
|
||||
if (adev->nbio.funcs->remap_hdp_registers)
|
||||
if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
|
||||
adev->nbio.funcs->remap_hdp_registers(adev);
|
||||
/* enable the doorbell aperture */
|
||||
nv_enable_doorbell_aperture(adev, true);
|
||||
|
@ -971,8 +971,10 @@ static int soc15_common_early_init(void *handle)
|
||||
#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE)
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
|
||||
adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET;
|
||||
adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET;
|
||||
}
|
||||
adev->smc_rreg = NULL;
|
||||
adev->smc_wreg = NULL;
|
||||
adev->pcie_rreg = &soc15_pcie_rreg;
|
||||
@ -1285,7 +1287,7 @@ static int soc15_common_hw_init(void *handle)
|
||||
* for the purpose of expose those registers
|
||||
* to process space
|
||||
*/
|
||||
if (adev->nbio.funcs->remap_hdp_registers)
|
||||
if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev))
|
||||
adev->nbio.funcs->remap_hdp_registers(adev);
|
||||
|
||||
/* enable the doorbell aperture */
|
||||
|
@ -766,7 +766,7 @@ struct svm_range_list {
|
||||
struct list_head deferred_range_list;
|
||||
spinlock_t deferred_list_lock;
|
||||
atomic_t evicted_ranges;
|
||||
bool drain_pagefaults;
|
||||
atomic_t drain_pagefaults;
|
||||
struct delayed_work restore_work;
|
||||
DECLARE_BITMAP(bitmap_supported, MAX_GPU_INSTANCE);
|
||||
struct task_struct *faulting_task;
|
||||
|
@ -1968,10 +1968,16 @@ static void svm_range_drain_retry_fault(struct svm_range_list *svms)
|
||||
struct kfd_process_device *pdd;
|
||||
struct amdgpu_device *adev;
|
||||
struct kfd_process *p;
|
||||
int drain;
|
||||
uint32_t i;
|
||||
|
||||
p = container_of(svms, struct kfd_process, svms);
|
||||
|
||||
restart:
|
||||
drain = atomic_read(&svms->drain_pagefaults);
|
||||
if (!drain)
|
||||
return;
|
||||
|
||||
for_each_set_bit(i, svms->bitmap_supported, p->n_pdds) {
|
||||
pdd = p->pdds[i];
|
||||
if (!pdd)
|
||||
@ -1983,6 +1989,8 @@ static void svm_range_drain_retry_fault(struct svm_range_list *svms)
|
||||
amdgpu_ih_wait_on_checkpoint_process(adev, &adev->irq.ih1);
|
||||
pr_debug("drain retry fault gpu %d svms 0x%p done\n", i, svms);
|
||||
}
|
||||
if (atomic_cmpxchg(&svms->drain_pagefaults, drain, 0) != drain)
|
||||
goto restart;
|
||||
}
|
||||
|
||||
static void svm_range_deferred_list_work(struct work_struct *work)
|
||||
@ -1990,43 +1998,41 @@ static void svm_range_deferred_list_work(struct work_struct *work)
|
||||
struct svm_range_list *svms;
|
||||
struct svm_range *prange;
|
||||
struct mm_struct *mm;
|
||||
struct kfd_process *p;
|
||||
|
||||
svms = container_of(work, struct svm_range_list, deferred_list_work);
|
||||
pr_debug("enter svms 0x%p\n", svms);
|
||||
|
||||
p = container_of(svms, struct kfd_process, svms);
|
||||
/* Avoid mm is gone when inserting mmu notifier */
|
||||
mm = get_task_mm(p->lead_thread);
|
||||
if (!mm) {
|
||||
pr_debug("svms 0x%p process mm gone\n", svms);
|
||||
return;
|
||||
}
|
||||
retry:
|
||||
mmap_write_lock(mm);
|
||||
|
||||
/* Checking for the need to drain retry faults must be inside
|
||||
* mmap write lock to serialize with munmap notifiers.
|
||||
*/
|
||||
if (unlikely(atomic_read(&svms->drain_pagefaults))) {
|
||||
mmap_write_unlock(mm);
|
||||
svm_range_drain_retry_fault(svms);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
spin_lock(&svms->deferred_list_lock);
|
||||
while (!list_empty(&svms->deferred_range_list)) {
|
||||
prange = list_first_entry(&svms->deferred_range_list,
|
||||
struct svm_range, deferred_list);
|
||||
spin_unlock(&svms->deferred_list_lock);
|
||||
pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
|
||||
prange->start, prange->last, prange->work_item.op);
|
||||
|
||||
mm = prange->work_item.mm;
|
||||
retry:
|
||||
mmap_write_lock(mm);
|
||||
mutex_lock(&svms->lock);
|
||||
|
||||
/* Checking for the need to drain retry faults must be in
|
||||
* mmap write lock to serialize with munmap notifiers.
|
||||
*
|
||||
* Remove from deferred_list must be inside mmap write lock,
|
||||
* otherwise, svm_range_list_lock_and_flush_work may hold mmap
|
||||
* write lock, and continue because deferred_list is empty, then
|
||||
* deferred_list handle is blocked by mmap write lock.
|
||||
*/
|
||||
spin_lock(&svms->deferred_list_lock);
|
||||
if (unlikely(svms->drain_pagefaults)) {
|
||||
svms->drain_pagefaults = false;
|
||||
spin_unlock(&svms->deferred_list_lock);
|
||||
mutex_unlock(&svms->lock);
|
||||
mmap_write_unlock(mm);
|
||||
svm_range_drain_retry_fault(svms);
|
||||
goto retry;
|
||||
}
|
||||
list_del_init(&prange->deferred_list);
|
||||
spin_unlock(&svms->deferred_list_lock);
|
||||
|
||||
pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange,
|
||||
prange->start, prange->last, prange->work_item.op);
|
||||
|
||||
mutex_lock(&svms->lock);
|
||||
mutex_lock(&prange->migrate_mutex);
|
||||
while (!list_empty(&prange->child_list)) {
|
||||
struct svm_range *pchild;
|
||||
@ -2042,12 +2048,13 @@ retry:
|
||||
|
||||
svm_range_handle_list_op(svms, prange);
|
||||
mutex_unlock(&svms->lock);
|
||||
mmap_write_unlock(mm);
|
||||
|
||||
spin_lock(&svms->deferred_list_lock);
|
||||
}
|
||||
spin_unlock(&svms->deferred_list_lock);
|
||||
|
||||
mmap_write_unlock(mm);
|
||||
mmput(mm);
|
||||
pr_debug("exit svms 0x%p\n", svms);
|
||||
}
|
||||
|
||||
@ -2056,12 +2063,6 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
|
||||
struct mm_struct *mm, enum svm_work_list_ops op)
|
||||
{
|
||||
spin_lock(&svms->deferred_list_lock);
|
||||
/* Make sure pending page faults are drained in the deferred worker
|
||||
* before the range is freed to avoid straggler interrupts on
|
||||
* unmapped memory causing "phantom faults".
|
||||
*/
|
||||
if (op == SVM_OP_UNMAP_RANGE)
|
||||
svms->drain_pagefaults = true;
|
||||
/* if prange is on the deferred list */
|
||||
if (!list_empty(&prange->deferred_list)) {
|
||||
pr_debug("update exist prange 0x%p work op %d\n", prange, op);
|
||||
@ -2140,6 +2141,12 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
|
||||
pr_debug("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n", svms,
|
||||
prange, prange->start, prange->last, start, last);
|
||||
|
||||
/* Make sure pending page faults are drained in the deferred worker
|
||||
* before the range is freed to avoid straggler interrupts on
|
||||
* unmapped memory causing "phantom faults".
|
||||
*/
|
||||
atomic_inc(&svms->drain_pagefaults);
|
||||
|
||||
unmap_parent = start <= prange->start && last >= prange->last;
|
||||
|
||||
list_for_each_entry(pchild, &prange->child_list, child_list) {
|
||||
@ -2559,20 +2566,13 @@ svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p,
|
||||
}
|
||||
|
||||
static bool
|
||||
svm_fault_allowed(struct mm_struct *mm, uint64_t addr, bool write_fault)
|
||||
svm_fault_allowed(struct vm_area_struct *vma, bool write_fault)
|
||||
{
|
||||
unsigned long requested = VM_READ;
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
if (write_fault)
|
||||
requested |= VM_WRITE;
|
||||
|
||||
vma = find_vma(mm, addr << PAGE_SHIFT);
|
||||
if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
|
||||
pr_debug("address 0x%llx VMA is removed\n", addr);
|
||||
return true;
|
||||
}
|
||||
|
||||
pr_debug("requested 0x%lx, vma permission flags 0x%lx\n", requested,
|
||||
vma->vm_flags);
|
||||
return (vma->vm_flags & requested) == requested;
|
||||
@ -2590,6 +2590,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
|
||||
int32_t best_loc;
|
||||
int32_t gpuidx = MAX_GPU_INSTANCE;
|
||||
bool write_locked = false;
|
||||
struct vm_area_struct *vma;
|
||||
int r = 0;
|
||||
|
||||
if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) {
|
||||
@ -2600,7 +2601,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
|
||||
p = kfd_lookup_process_by_pasid(pasid);
|
||||
if (!p) {
|
||||
pr_debug("kfd process not founded pasid 0x%x\n", pasid);
|
||||
return -ESRCH;
|
||||
return 0;
|
||||
}
|
||||
if (!p->xnack_enabled) {
|
||||
pr_debug("XNACK not enabled for pasid 0x%x\n", pasid);
|
||||
@ -2611,10 +2612,17 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
|
||||
|
||||
pr_debug("restoring svms 0x%p fault address 0x%llx\n", svms, addr);
|
||||
|
||||
if (atomic_read(&svms->drain_pagefaults)) {
|
||||
pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* p->lead_thread is available as kfd_process_wq_release flush the work
|
||||
* before releasing task ref.
|
||||
*/
|
||||
mm = get_task_mm(p->lead_thread);
|
||||
if (!mm) {
|
||||
pr_debug("svms 0x%p failed to get mm\n", svms);
|
||||
r = -ESRCH;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -2663,7 +2671,17 @@ retry_write_locked:
|
||||
goto out_unlock_range;
|
||||
}
|
||||
|
||||
if (!svm_fault_allowed(mm, addr, write_fault)) {
|
||||
/* __do_munmap removed VMA, return success as we are handling stale
|
||||
* retry fault.
|
||||
*/
|
||||
vma = find_vma(mm, addr << PAGE_SHIFT);
|
||||
if (!vma || (addr << PAGE_SHIFT) < vma->vm_start) {
|
||||
pr_debug("address 0x%llx VMA is removed\n", addr);
|
||||
r = 0;
|
||||
goto out_unlock_range;
|
||||
}
|
||||
|
||||
if (!svm_fault_allowed(vma, write_fault)) {
|
||||
pr_debug("fault addr 0x%llx no %s permission\n", addr,
|
||||
write_fault ? "write" : "read");
|
||||
r = -EPERM;
|
||||
@ -2741,6 +2759,14 @@ void svm_range_list_fini(struct kfd_process *p)
|
||||
/* Ensure list work is finished before process is destroyed */
|
||||
flush_work(&p->svms.deferred_list_work);
|
||||
|
||||
/*
|
||||
* Ensure no retry fault comes in afterwards, as page fault handler will
|
||||
* not find kfd process and take mm lock to recover fault.
|
||||
*/
|
||||
atomic_inc(&p->svms.drain_pagefaults);
|
||||
svm_range_drain_retry_fault(&p->svms);
|
||||
|
||||
|
||||
list_for_each_entry_safe(prange, next, &p->svms.list, list) {
|
||||
svm_range_unlink(prange);
|
||||
svm_range_remove_notifier(prange);
|
||||
@ -2761,6 +2787,7 @@ int svm_range_list_init(struct kfd_process *p)
|
||||
mutex_init(&svms->lock);
|
||||
INIT_LIST_HEAD(&svms->list);
|
||||
atomic_set(&svms->evicted_ranges, 0);
|
||||
atomic_set(&svms->drain_pagefaults, 0);
|
||||
INIT_DELAYED_WORK(&svms->restore_work, svm_range_restore_work);
|
||||
INIT_WORK(&svms->deferred_list_work, svm_range_deferred_list_work);
|
||||
INIT_LIST_HEAD(&svms->deferred_range_list);
|
||||
|
@ -51,6 +51,7 @@
|
||||
#include <drm/drm_hdcp.h>
|
||||
#endif
|
||||
#include "amdgpu_pm.h"
|
||||
#include "amdgpu_atombios.h"
|
||||
|
||||
#include "amd_shared.h"
|
||||
#include "amdgpu_dm_irq.h"
|
||||
@ -2561,6 +2562,22 @@ static int dm_resume(void *handle)
|
||||
if (amdgpu_in_reset(adev)) {
|
||||
dc_state = dm->cached_dc_state;
|
||||
|
||||
/*
|
||||
* The dc->current_state is backed up into dm->cached_dc_state
|
||||
* before we commit 0 streams.
|
||||
*
|
||||
* DC will clear link encoder assignments on the real state
|
||||
* but the changes won't propagate over to the copy we made
|
||||
* before the 0 streams commit.
|
||||
*
|
||||
* DC expects that link encoder assignments are *not* valid
|
||||
* when committing a state, so as a workaround it needs to be
|
||||
* cleared here.
|
||||
*/
|
||||
link_enc_cfg_init(dm->dc, dc_state);
|
||||
|
||||
amdgpu_dm_outbox_init(adev);
|
||||
|
||||
r = dm_dmub_hw_init(adev);
|
||||
if (r)
|
||||
DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
|
||||
@ -2572,8 +2589,8 @@ static int dm_resume(void *handle)
|
||||
|
||||
for (i = 0; i < dc_state->stream_count; i++) {
|
||||
dc_state->streams[i]->mode_changed = true;
|
||||
for (j = 0; j < dc_state->stream_status->plane_count; j++) {
|
||||
dc_state->stream_status->plane_states[j]->update_flags.raw
|
||||
for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
|
||||
dc_state->stream_status[i].plane_states[j]->update_flags.raw
|
||||
= 0xffffffff;
|
||||
}
|
||||
}
|
||||
@ -3909,6 +3926,9 @@ static int amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
|
||||
caps = dm->backlight_caps[bl_idx];
|
||||
|
||||
dm->brightness[bl_idx] = user_brightness;
|
||||
/* update scratch register */
|
||||
if (bl_idx == 0)
|
||||
amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]);
|
||||
brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
|
||||
link = (struct dc_link *)dm->backlight_link[bl_idx];
|
||||
|
||||
|
@ -1637,7 +1637,7 @@ void dcn10_reset_hw_ctx_wrap(
|
||||
|
||||
dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
|
||||
if (hws->funcs.enable_stream_gating)
|
||||
hws->funcs.enable_stream_gating(dc, pipe_ctx);
|
||||
hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
|
||||
if (old_clk)
|
||||
old_clk->funcs->cs_power_down(old_clk);
|
||||
}
|
||||
|
@ -2270,7 +2270,7 @@ void dcn20_reset_hw_ctx_wrap(
|
||||
|
||||
dcn20_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
|
||||
if (hws->funcs.enable_stream_gating)
|
||||
hws->funcs.enable_stream_gating(dc, pipe_ctx);
|
||||
hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
|
||||
if (old_clk)
|
||||
old_clk->funcs->cs_power_down(old_clk);
|
||||
}
|
||||
|
@ -602,7 +602,7 @@ void dcn31_reset_hw_ctx_wrap(
|
||||
|
||||
dcn31_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
|
||||
if (hws->funcs.enable_stream_gating)
|
||||
hws->funcs.enable_stream_gating(dc, pipe_ctx);
|
||||
hws->funcs.enable_stream_gating(dc, pipe_ctx_old);
|
||||
if (old_clk)
|
||||
old_clk->funcs->cs_power_down(old_clk);
|
||||
}
|
||||
|
@ -1024,8 +1024,6 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
uint32_t min_freq, max_freq = 0;
|
||||
uint32_t ret = 0;
|
||||
|
||||
phm_get_sysfs_buf(&buf, &size);
|
||||
|
||||
switch (type) {
|
||||
case PP_SCLK:
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetGfxclkFrequency, &now);
|
||||
@ -1038,13 +1036,13 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
else
|
||||
i = 1;
|
||||
|
||||
size += sysfs_emit_at(buf, size, "0: %uMhz %s\n",
|
||||
size += sprintf(buf + size, "0: %uMhz %s\n",
|
||||
data->gfx_min_freq_limit/100,
|
||||
i == 0 ? "*" : "");
|
||||
size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
|
||||
size += sprintf(buf + size, "1: %uMhz %s\n",
|
||||
i == 1 ? now : SMU10_UMD_PSTATE_GFXCLK,
|
||||
i == 1 ? "*" : "");
|
||||
size += sysfs_emit_at(buf, size, "2: %uMhz %s\n",
|
||||
size += sprintf(buf + size, "2: %uMhz %s\n",
|
||||
data->gfx_max_freq_limit/100,
|
||||
i == 2 ? "*" : "");
|
||||
break;
|
||||
@ -1052,7 +1050,7 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetFclkFrequency, &now);
|
||||
|
||||
for (i = 0; i < mclk_table->count; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
i,
|
||||
mclk_table->entries[i].clk / 100,
|
||||
((mclk_table->entries[i].clk / 100)
|
||||
@ -1067,10 +1065,10 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
|
||||
size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
|
||||
size += sprintf(buf + size, "%s:\n", "OD_SCLK");
|
||||
size += sprintf(buf + size, "0: %10uMhz\n",
|
||||
(data->gfx_actual_soft_min_freq > 0) ? data->gfx_actual_soft_min_freq : min_freq);
|
||||
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
|
||||
size += sprintf(buf + size, "1: %10uMhz\n",
|
||||
(data->gfx_actual_soft_max_freq > 0) ? data->gfx_actual_soft_max_freq : max_freq);
|
||||
}
|
||||
break;
|
||||
@ -1083,8 +1081,8 @@ static int smu10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
|
||||
size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
|
||||
size += sprintf(buf + size, "%s:\n", "OD_RANGE");
|
||||
size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
|
||||
min_freq, max_freq);
|
||||
}
|
||||
break;
|
||||
|
@ -4914,8 +4914,6 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
int size = 0;
|
||||
uint32_t i, now, clock, pcie_speed;
|
||||
|
||||
phm_get_sysfs_buf(&buf, &size);
|
||||
|
||||
switch (type) {
|
||||
case PP_SCLK:
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_API_GetSclkFrequency, &clock);
|
||||
@ -4928,7 +4926,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
now = i;
|
||||
|
||||
for (i = 0; i < sclk_table->count; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
i, sclk_table->dpm_levels[i].value / 100,
|
||||
(i == now) ? "*" : "");
|
||||
break;
|
||||
@ -4943,7 +4941,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
now = i;
|
||||
|
||||
for (i = 0; i < mclk_table->count; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
i, mclk_table->dpm_levels[i].value / 100,
|
||||
(i == now) ? "*" : "");
|
||||
break;
|
||||
@ -4957,7 +4955,7 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
now = i;
|
||||
|
||||
for (i = 0; i < pcie_table->count; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %s %s\n", i,
|
||||
size += sprintf(buf + size, "%d: %s %s\n", i,
|
||||
(pcie_table->dpm_levels[i].value == 0) ? "2.5GT/s, x8" :
|
||||
(pcie_table->dpm_levels[i].value == 1) ? "5.0GT/s, x16" :
|
||||
(pcie_table->dpm_levels[i].value == 2) ? "8.0GT/s, x16" : "",
|
||||
@ -4965,32 +4963,32 @@ static int smu7_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
break;
|
||||
case OD_SCLK:
|
||||
if (hwmgr->od_enabled) {
|
||||
size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
|
||||
size += sprintf(buf + size, "%s:\n", "OD_SCLK");
|
||||
for (i = 0; i < odn_sclk_table->num_of_pl; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
|
||||
size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
|
||||
i, odn_sclk_table->entries[i].clock/100,
|
||||
odn_sclk_table->entries[i].vddc);
|
||||
}
|
||||
break;
|
||||
case OD_MCLK:
|
||||
if (hwmgr->od_enabled) {
|
||||
size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
|
||||
size += sprintf(buf + size, "%s:\n", "OD_MCLK");
|
||||
for (i = 0; i < odn_mclk_table->num_of_pl; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %10uMHz %10umV\n",
|
||||
size += sprintf(buf + size, "%d: %10uMHz %10umV\n",
|
||||
i, odn_mclk_table->entries[i].clock/100,
|
||||
odn_mclk_table->entries[i].vddc);
|
||||
}
|
||||
break;
|
||||
case OD_RANGE:
|
||||
if (hwmgr->od_enabled) {
|
||||
size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
|
||||
size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
|
||||
size += sprintf(buf + size, "%s:\n", "OD_RANGE");
|
||||
size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
|
||||
data->golden_dpm_table.sclk_table.dpm_levels[0].value/100,
|
||||
hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
|
||||
size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n",
|
||||
size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
|
||||
data->golden_dpm_table.mclk_table.dpm_levels[0].value/100,
|
||||
hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
|
||||
size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n",
|
||||
size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
|
||||
data->odn_dpm_table.min_vddc,
|
||||
data->odn_dpm_table.max_vddc);
|
||||
}
|
||||
|
@ -1550,8 +1550,6 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
uint32_t i, now;
|
||||
int size = 0;
|
||||
|
||||
phm_get_sysfs_buf(&buf, &size);
|
||||
|
||||
switch (type) {
|
||||
case PP_SCLK:
|
||||
now = PHM_GET_FIELD(cgs_read_ind_register(hwmgr->device,
|
||||
@ -1561,7 +1559,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
CURR_SCLK_INDEX);
|
||||
|
||||
for (i = 0; i < sclk_table->count; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
i, sclk_table->entries[i].clk / 100,
|
||||
(i == now) ? "*" : "");
|
||||
break;
|
||||
@ -1573,7 +1571,7 @@ static int smu8_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
CURR_MCLK_INDEX);
|
||||
|
||||
for (i = SMU8_NUM_NBPMEMORYCLOCK; i > 0; i--)
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
SMU8_NUM_NBPMEMORYCLOCK-i, data->sys_info.nbp_memory_clock[i-1] / 100,
|
||||
(SMU8_NUM_NBPMEMORYCLOCK-i == now) ? "*" : "");
|
||||
break;
|
||||
|
@ -4639,8 +4639,6 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
|
||||
int i, now, size = 0, count = 0;
|
||||
|
||||
phm_get_sysfs_buf(&buf, &size);
|
||||
|
||||
switch (type) {
|
||||
case PP_SCLK:
|
||||
if (data->registry_data.sclk_dpm_key_disabled)
|
||||
@ -4654,7 +4652,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
else
|
||||
count = sclk_table->count;
|
||||
for (i = 0; i < count; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
i, sclk_table->dpm_levels[i].value / 100,
|
||||
(i == now) ? "*" : "");
|
||||
break;
|
||||
@ -4665,7 +4663,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentUclkIndex, &now);
|
||||
|
||||
for (i = 0; i < mclk_table->count; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
i, mclk_table->dpm_levels[i].value / 100,
|
||||
(i == now) ? "*" : "");
|
||||
break;
|
||||
@ -4676,7 +4674,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_GetCurrentSocclkIndex, &now);
|
||||
|
||||
for (i = 0; i < soc_table->count; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
i, soc_table->dpm_levels[i].value / 100,
|
||||
(i == now) ? "*" : "");
|
||||
break;
|
||||
@ -4688,7 +4686,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
PPSMC_MSG_GetClockFreqMHz, CLK_DCEFCLK, &now);
|
||||
|
||||
for (i = 0; i < dcef_table->count; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
i, dcef_table->dpm_levels[i].value / 100,
|
||||
(dcef_table->dpm_levels[i].value / 100 == now) ?
|
||||
"*" : "");
|
||||
@ -4702,7 +4700,7 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
gen_speed = pptable->PcieGenSpeed[i];
|
||||
lane_width = pptable->PcieLaneCount[i];
|
||||
|
||||
size += sysfs_emit_at(buf, size, "%d: %s %s %s\n", i,
|
||||
size += sprintf(buf + size, "%d: %s %s %s\n", i,
|
||||
(gen_speed == 0) ? "2.5GT/s," :
|
||||
(gen_speed == 1) ? "5.0GT/s," :
|
||||
(gen_speed == 2) ? "8.0GT/s," :
|
||||
@ -4721,34 +4719,34 @@ static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
|
||||
case OD_SCLK:
|
||||
if (hwmgr->od_enabled) {
|
||||
size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
|
||||
size += sprintf(buf + size, "%s:\n", "OD_SCLK");
|
||||
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_sclk;
|
||||
for (i = 0; i < podn_vdd_dep->count; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
|
||||
size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
|
||||
i, podn_vdd_dep->entries[i].clk / 100,
|
||||
podn_vdd_dep->entries[i].vddc);
|
||||
}
|
||||
break;
|
||||
case OD_MCLK:
|
||||
if (hwmgr->od_enabled) {
|
||||
size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
|
||||
size += sprintf(buf + size, "%s:\n", "OD_MCLK");
|
||||
podn_vdd_dep = &data->odn_dpm_table.vdd_dep_on_mclk;
|
||||
for (i = 0; i < podn_vdd_dep->count; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %10uMhz %10umV\n",
|
||||
size += sprintf(buf + size, "%d: %10uMhz %10umV\n",
|
||||
i, podn_vdd_dep->entries[i].clk/100,
|
||||
podn_vdd_dep->entries[i].vddc);
|
||||
}
|
||||
break;
|
||||
case OD_RANGE:
|
||||
if (hwmgr->od_enabled) {
|
||||
size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
|
||||
size += sysfs_emit_at(buf, size, "SCLK: %7uMHz %10uMHz\n",
|
||||
size += sprintf(buf + size, "%s:\n", "OD_RANGE");
|
||||
size += sprintf(buf + size, "SCLK: %7uMHz %10uMHz\n",
|
||||
data->golden_dpm_table.gfx_table.dpm_levels[0].value/100,
|
||||
hwmgr->platform_descriptor.overdriveLimit.engineClock/100);
|
||||
size += sysfs_emit_at(buf, size, "MCLK: %7uMHz %10uMHz\n",
|
||||
size += sprintf(buf + size, "MCLK: %7uMHz %10uMHz\n",
|
||||
data->golden_dpm_table.mem_table.dpm_levels[0].value/100,
|
||||
hwmgr->platform_descriptor.overdriveLimit.memoryClock/100);
|
||||
size += sysfs_emit_at(buf, size, "VDDC: %7umV %11umV\n",
|
||||
size += sprintf(buf + size, "VDDC: %7umV %11umV\n",
|
||||
data->odn_dpm_table.min_vddc,
|
||||
data->odn_dpm_table.max_vddc);
|
||||
}
|
||||
|
@ -2246,8 +2246,6 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
int i, now, size = 0;
|
||||
struct pp_clock_levels_with_latency clocks;
|
||||
|
||||
phm_get_sysfs_buf(&buf, &size);
|
||||
|
||||
switch (type) {
|
||||
case PP_SCLK:
|
||||
PP_ASSERT_WITH_CODE(
|
||||
@ -2260,7 +2258,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
"Attempt to get gfx clk levels Failed!",
|
||||
return -1);
|
||||
for (i = 0; i < clocks.num_levels; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
i, clocks.data[i].clocks_in_khz / 1000,
|
||||
(clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
|
||||
break;
|
||||
@ -2276,7 +2274,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
"Attempt to get memory clk levels Failed!",
|
||||
return -1);
|
||||
for (i = 0; i < clocks.num_levels; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
i, clocks.data[i].clocks_in_khz / 1000,
|
||||
(clocks.data[i].clocks_in_khz / 1000 == now / 100) ? "*" : "");
|
||||
break;
|
||||
@ -2294,7 +2292,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
"Attempt to get soc clk levels Failed!",
|
||||
return -1);
|
||||
for (i = 0; i < clocks.num_levels; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
i, clocks.data[i].clocks_in_khz / 1000,
|
||||
(clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
|
||||
break;
|
||||
@ -2312,7 +2310,7 @@ static int vega12_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
"Attempt to get dcef clk levels Failed!",
|
||||
return -1);
|
||||
for (i = 0; i < clocks.num_levels; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
i, clocks.data[i].clocks_in_khz / 1000,
|
||||
(clocks.data[i].clocks_in_khz / 1000 == now) ? "*" : "");
|
||||
break;
|
||||
|
@ -3366,8 +3366,6 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
int ret = 0;
|
||||
uint32_t gen_speed, lane_width, current_gen_speed, current_lane_width;
|
||||
|
||||
phm_get_sysfs_buf(&buf, &size);
|
||||
|
||||
switch (type) {
|
||||
case PP_SCLK:
|
||||
ret = vega20_get_current_clk_freq(hwmgr, PPCLK_GFXCLK, &now);
|
||||
@ -3376,13 +3374,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
return ret);
|
||||
|
||||
if (vega20_get_sclks(hwmgr, &clocks)) {
|
||||
size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
|
||||
size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
|
||||
now / 100);
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < clocks.num_levels; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
i, clocks.data[i].clocks_in_khz / 1000,
|
||||
(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
|
||||
break;
|
||||
@ -3394,13 +3392,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
return ret);
|
||||
|
||||
if (vega20_get_memclocks(hwmgr, &clocks)) {
|
||||
size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
|
||||
size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
|
||||
now / 100);
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < clocks.num_levels; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
i, clocks.data[i].clocks_in_khz / 1000,
|
||||
(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
|
||||
break;
|
||||
@ -3412,13 +3410,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
return ret);
|
||||
|
||||
if (vega20_get_socclocks(hwmgr, &clocks)) {
|
||||
size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
|
||||
size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
|
||||
now / 100);
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < clocks.num_levels; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
i, clocks.data[i].clocks_in_khz / 1000,
|
||||
(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
|
||||
break;
|
||||
@ -3430,7 +3428,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
return ret);
|
||||
|
||||
for (i = 0; i < fclk_dpm_table->count; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
i, fclk_dpm_table->dpm_levels[i].value,
|
||||
fclk_dpm_table->dpm_levels[i].value == (now / 100) ? "*" : "");
|
||||
break;
|
||||
@ -3442,13 +3440,13 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
return ret);
|
||||
|
||||
if (vega20_get_dcefclocks(hwmgr, &clocks)) {
|
||||
size += sysfs_emit_at(buf, size, "0: %uMhz * (DPM disabled)\n",
|
||||
size += sprintf(buf + size, "0: %uMhz * (DPM disabled)\n",
|
||||
now / 100);
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < clocks.num_levels; i++)
|
||||
size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n",
|
||||
size += sprintf(buf + size, "%d: %uMhz %s\n",
|
||||
i, clocks.data[i].clocks_in_khz / 1000,
|
||||
(clocks.data[i].clocks_in_khz == now * 10) ? "*" : "");
|
||||
break;
|
||||
@ -3462,7 +3460,7 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
gen_speed = pptable->PcieGenSpeed[i];
|
||||
lane_width = pptable->PcieLaneCount[i];
|
||||
|
||||
size += sysfs_emit_at(buf, size, "%d: %s %s %dMhz %s\n", i,
|
||||
size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i,
|
||||
(gen_speed == 0) ? "2.5GT/s," :
|
||||
(gen_speed == 1) ? "5.0GT/s," :
|
||||
(gen_speed == 2) ? "8.0GT/s," :
|
||||
@ -3483,18 +3481,18 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
case OD_SCLK:
|
||||
if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
|
||||
od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
|
||||
size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
|
||||
size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
|
||||
size += sprintf(buf + size, "%s:\n", "OD_SCLK");
|
||||
size += sprintf(buf + size, "0: %10uMhz\n",
|
||||
od_table->GfxclkFmin);
|
||||
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
|
||||
size += sprintf(buf + size, "1: %10uMhz\n",
|
||||
od_table->GfxclkFmax);
|
||||
}
|
||||
break;
|
||||
|
||||
case OD_MCLK:
|
||||
if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
|
||||
size += sysfs_emit_at(buf, size, "%s:\n", "OD_MCLK");
|
||||
size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
|
||||
size += sprintf(buf + size, "%s:\n", "OD_MCLK");
|
||||
size += sprintf(buf + size, "1: %10uMhz\n",
|
||||
od_table->UclkFmax);
|
||||
}
|
||||
|
||||
@ -3507,14 +3505,14 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
|
||||
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
|
||||
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
|
||||
size += sysfs_emit_at(buf, size, "%s:\n", "OD_VDDC_CURVE");
|
||||
size += sysfs_emit_at(buf, size, "0: %10uMhz %10dmV\n",
|
||||
size += sprintf(buf + size, "%s:\n", "OD_VDDC_CURVE");
|
||||
size += sprintf(buf + size, "0: %10uMhz %10dmV\n",
|
||||
od_table->GfxclkFreq1,
|
||||
od_table->GfxclkVolt1 / VOLTAGE_SCALE);
|
||||
size += sysfs_emit_at(buf, size, "1: %10uMhz %10dmV\n",
|
||||
size += sprintf(buf + size, "1: %10uMhz %10dmV\n",
|
||||
od_table->GfxclkFreq2,
|
||||
od_table->GfxclkVolt2 / VOLTAGE_SCALE);
|
||||
size += sysfs_emit_at(buf, size, "2: %10uMhz %10dmV\n",
|
||||
size += sprintf(buf + size, "2: %10uMhz %10dmV\n",
|
||||
od_table->GfxclkFreq3,
|
||||
od_table->GfxclkVolt3 / VOLTAGE_SCALE);
|
||||
}
|
||||
@ -3522,17 +3520,17 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
break;
|
||||
|
||||
case OD_RANGE:
|
||||
size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
|
||||
size += sprintf(buf + size, "%s:\n", "OD_RANGE");
|
||||
|
||||
if (od8_settings[OD8_SETTING_GFXCLK_FMIN].feature_id &&
|
||||
od8_settings[OD8_SETTING_GFXCLK_FMAX].feature_id) {
|
||||
size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
|
||||
size += sprintf(buf + size, "SCLK: %7uMhz %10uMhz\n",
|
||||
od8_settings[OD8_SETTING_GFXCLK_FMIN].min_value,
|
||||
od8_settings[OD8_SETTING_GFXCLK_FMAX].max_value);
|
||||
}
|
||||
|
||||
if (od8_settings[OD8_SETTING_UCLK_FMAX].feature_id) {
|
||||
size += sysfs_emit_at(buf, size, "MCLK: %7uMhz %10uMhz\n",
|
||||
size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
|
||||
od8_settings[OD8_SETTING_UCLK_FMAX].min_value,
|
||||
od8_settings[OD8_SETTING_UCLK_FMAX].max_value);
|
||||
}
|
||||
@ -3543,22 +3541,22 @@ static int vega20_print_clock_levels(struct pp_hwmgr *hwmgr,
|
||||
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].feature_id &&
|
||||
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].feature_id &&
|
||||
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].feature_id) {
|
||||
size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
|
||||
size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
|
||||
od8_settings[OD8_SETTING_GFXCLK_FREQ1].min_value,
|
||||
od8_settings[OD8_SETTING_GFXCLK_FREQ1].max_value);
|
||||
size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
|
||||
size += sprintf(buf + size, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
|
||||
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].min_value,
|
||||
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE1].max_value);
|
||||
size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
|
||||
size += sprintf(buf + size, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
|
||||
od8_settings[OD8_SETTING_GFXCLK_FREQ2].min_value,
|
||||
od8_settings[OD8_SETTING_GFXCLK_FREQ2].max_value);
|
||||
size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
|
||||
size += sprintf(buf + size, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
|
||||
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].min_value,
|
||||
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE2].max_value);
|
||||
size += sysfs_emit_at(buf, size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
|
||||
size += sprintf(buf + size, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
|
||||
od8_settings[OD8_SETTING_GFXCLK_FREQ3].min_value,
|
||||
od8_settings[OD8_SETTING_GFXCLK_FREQ3].max_value);
|
||||
size += sysfs_emit_at(buf, size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
|
||||
size += sprintf(buf + size, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
|
||||
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].min_value,
|
||||
od8_settings[OD8_SETTING_GFXCLK_VOLTAGE3].max_value);
|
||||
}
|
||||
|
@ -291,7 +291,7 @@ vga_pw_show(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return sprintf(buf, "%u\n", reg & 1);
|
||||
return sprintf(buf, "%u\n", reg);
|
||||
}
|
||||
static DEVICE_ATTR_RO(vga_pw);
|
||||
|
||||
|
@ -225,12 +225,29 @@ static int hyperv_vmbus_remove(struct hv_device *hdev)
|
||||
{
|
||||
struct drm_device *dev = hv_get_drvdata(hdev);
|
||||
struct hyperv_drm_device *hv = to_hv(dev);
|
||||
struct pci_dev *pdev;
|
||||
|
||||
drm_dev_unplug(dev);
|
||||
drm_atomic_helper_shutdown(dev);
|
||||
vmbus_close(hdev->channel);
|
||||
hv_set_drvdata(hdev, NULL);
|
||||
vmbus_free_mmio(hv->mem->start, hv->fb_size);
|
||||
|
||||
/*
|
||||
* Free allocated MMIO memory only on Gen2 VMs.
|
||||
* On Gen1 VMs, release the PCI device
|
||||
*/
|
||||
if (efi_enabled(EFI_BOOT)) {
|
||||
vmbus_free_mmio(hv->mem->start, hv->fb_size);
|
||||
} else {
|
||||
pdev = pci_get_device(PCI_VENDOR_ID_MICROSOFT,
|
||||
PCI_DEVICE_ID_HYPERV_VIDEO, NULL);
|
||||
if (!pdev) {
|
||||
drm_err(dev, "Unable to find PCI Hyper-V video\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
pci_release_region(pdev, 0);
|
||||
pci_dev_put(pdev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -301,7 +301,7 @@ void intel_gt_suspend_prepare(struct intel_gt *gt)
|
||||
user_forcewake(gt, true);
|
||||
wait_for_suspend(gt);
|
||||
|
||||
intel_pxp_suspend(>->pxp, false);
|
||||
intel_pxp_suspend_prepare(>->pxp);
|
||||
}
|
||||
|
||||
static suspend_state_t pm_suspend_target(void)
|
||||
@ -326,6 +326,7 @@ void intel_gt_suspend_late(struct intel_gt *gt)
|
||||
GEM_BUG_ON(gt->awake);
|
||||
|
||||
intel_uc_suspend(>->uc);
|
||||
intel_pxp_suspend(>->pxp);
|
||||
|
||||
/*
|
||||
* On disabling the device, we want to turn off HW access to memory
|
||||
@ -353,7 +354,7 @@ void intel_gt_suspend_late(struct intel_gt *gt)
|
||||
|
||||
void intel_gt_runtime_suspend(struct intel_gt *gt)
|
||||
{
|
||||
intel_pxp_suspend(>->pxp, true);
|
||||
intel_pxp_runtime_suspend(>->pxp);
|
||||
intel_uc_runtime_suspend(>->uc);
|
||||
|
||||
GT_TRACE(gt, "\n");
|
||||
@ -371,7 +372,7 @@ int intel_gt_runtime_resume(struct intel_gt *gt)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_pxp_resume(>->pxp);
|
||||
intel_pxp_runtime_resume(>->pxp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -7,26 +7,29 @@
|
||||
#include "intel_pxp_irq.h"
|
||||
#include "intel_pxp_pm.h"
|
||||
#include "intel_pxp_session.h"
|
||||
#include "i915_drv.h"
|
||||
|
||||
void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime)
|
||||
void intel_pxp_suspend_prepare(struct intel_pxp *pxp)
|
||||
{
|
||||
if (!intel_pxp_is_enabled(pxp))
|
||||
return;
|
||||
|
||||
pxp->arb_is_valid = false;
|
||||
|
||||
/*
|
||||
* Contexts using protected objects keep a runtime PM reference, so we
|
||||
* can only runtime suspend when all of them have been either closed
|
||||
* or banned. Therefore, there is no need to invalidate in that
|
||||
* scenario.
|
||||
*/
|
||||
if (!runtime)
|
||||
intel_pxp_invalidate(pxp);
|
||||
intel_pxp_invalidate(pxp);
|
||||
}
|
||||
|
||||
intel_pxp_fini_hw(pxp);
|
||||
void intel_pxp_suspend(struct intel_pxp *pxp)
|
||||
{
|
||||
intel_wakeref_t wakeref;
|
||||
|
||||
pxp->hw_state_invalidated = false;
|
||||
if (!intel_pxp_is_enabled(pxp))
|
||||
return;
|
||||
|
||||
with_intel_runtime_pm(&pxp_to_gt(pxp)->i915->runtime_pm, wakeref) {
|
||||
intel_pxp_fini_hw(pxp);
|
||||
pxp->hw_state_invalidated = false;
|
||||
}
|
||||
}
|
||||
|
||||
void intel_pxp_resume(struct intel_pxp *pxp)
|
||||
@ -44,3 +47,15 @@ void intel_pxp_resume(struct intel_pxp *pxp)
|
||||
|
||||
intel_pxp_init_hw(pxp);
|
||||
}
|
||||
|
||||
void intel_pxp_runtime_suspend(struct intel_pxp *pxp)
|
||||
{
|
||||
if (!intel_pxp_is_enabled(pxp))
|
||||
return;
|
||||
|
||||
pxp->arb_is_valid = false;
|
||||
|
||||
intel_pxp_fini_hw(pxp);
|
||||
|
||||
pxp->hw_state_invalidated = false;
|
||||
}
|
||||
|
@ -9,16 +9,29 @@
|
||||
#include "intel_pxp_types.h"
|
||||
|
||||
#ifdef CONFIG_DRM_I915_PXP
|
||||
void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime);
|
||||
void intel_pxp_suspend_prepare(struct intel_pxp *pxp);
|
||||
void intel_pxp_suspend(struct intel_pxp *pxp);
|
||||
void intel_pxp_resume(struct intel_pxp *pxp);
|
||||
void intel_pxp_runtime_suspend(struct intel_pxp *pxp);
|
||||
#else
|
||||
static inline void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime)
|
||||
static inline void intel_pxp_suspend_prepare(struct intel_pxp *pxp)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void intel_pxp_suspend(struct intel_pxp *pxp)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void intel_pxp_resume(struct intel_pxp *pxp)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void intel_pxp_runtime_suspend(struct intel_pxp *pxp)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
static inline void intel_pxp_runtime_resume(struct intel_pxp *pxp)
|
||||
{
|
||||
intel_pxp_resume(pxp);
|
||||
}
|
||||
#endif /* __INTEL_PXP_PM_H__ */
|
||||
|
@ -2626,6 +2626,27 @@ nv174_chipset = {
|
||||
.fifo = { 0x00000001, ga102_fifo_new },
|
||||
};
|
||||
|
||||
static const struct nvkm_device_chip
|
||||
nv176_chipset = {
|
||||
.name = "GA106",
|
||||
.bar = { 0x00000001, tu102_bar_new },
|
||||
.bios = { 0x00000001, nvkm_bios_new },
|
||||
.devinit = { 0x00000001, ga100_devinit_new },
|
||||
.fb = { 0x00000001, ga102_fb_new },
|
||||
.gpio = { 0x00000001, ga102_gpio_new },
|
||||
.i2c = { 0x00000001, gm200_i2c_new },
|
||||
.imem = { 0x00000001, nv50_instmem_new },
|
||||
.mc = { 0x00000001, ga100_mc_new },
|
||||
.mmu = { 0x00000001, tu102_mmu_new },
|
||||
.pci = { 0x00000001, gp100_pci_new },
|
||||
.privring = { 0x00000001, gm200_privring_new },
|
||||
.timer = { 0x00000001, gk20a_timer_new },
|
||||
.top = { 0x00000001, ga100_top_new },
|
||||
.disp = { 0x00000001, ga102_disp_new },
|
||||
.dma = { 0x00000001, gv100_dma_new },
|
||||
.fifo = { 0x00000001, ga102_fifo_new },
|
||||
};
|
||||
|
||||
static const struct nvkm_device_chip
|
||||
nv177_chipset = {
|
||||
.name = "GA107",
|
||||
@ -3072,6 +3093,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func,
|
||||
case 0x168: device->chip = &nv168_chipset; break;
|
||||
case 0x172: device->chip = &nv172_chipset; break;
|
||||
case 0x174: device->chip = &nv174_chipset; break;
|
||||
case 0x176: device->chip = &nv176_chipset; break;
|
||||
case 0x177: device->chip = &nv177_chipset; break;
|
||||
default:
|
||||
if (nvkm_boolopt(device->cfgopt, "NvEnableUnsupportedChipsets", false)) {
|
||||
|
@ -207,11 +207,13 @@ int
|
||||
gm200_acr_wpr_parse(struct nvkm_acr *acr)
|
||||
{
|
||||
const struct wpr_header *hdr = (void *)acr->wpr_fw->data;
|
||||
struct nvkm_acr_lsfw *lsfw;
|
||||
|
||||
while (hdr->falcon_id != WPR_HEADER_V0_FALCON_ID_INVALID) {
|
||||
wpr_header_dump(&acr->subdev, hdr);
|
||||
if (!nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id))
|
||||
return -ENOMEM;
|
||||
lsfw = nvkm_acr_lsfw_add(NULL, acr, NULL, (hdr++)->falcon_id);
|
||||
if (IS_ERR(lsfw))
|
||||
return PTR_ERR(lsfw);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user