mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 14:12:06 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
No conflicts. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
fbeb229a66
@ -10,7 +10,7 @@ Description: A collection of all the memory tiers allocated.
|
||||
|
||||
|
||||
What: /sys/devices/virtual/memory_tiering/memory_tierN/
|
||||
/sys/devices/virtual/memory_tiering/memory_tierN/nodes
|
||||
/sys/devices/virtual/memory_tiering/memory_tierN/nodelist
|
||||
Date: August 2022
|
||||
Contact: Linux memory management mailing list <linux-mm@kvack.org>
|
||||
Description: Directory with details of a specific memory tier
|
||||
@ -21,5 +21,5 @@ Description: Directory with details of a specific memory tier
|
||||
A smaller value of N implies a higher (faster) memory tier in the
|
||||
hierarchy.
|
||||
|
||||
nodes: NUMA nodes that are part of this memory tier.
|
||||
nodelist: NUMA nodes that are part of this memory tier.
|
||||
|
||||
|
@ -120,7 +120,7 @@ You can tell you are in a softirq (or tasklet) using the
|
||||
.. warning::
|
||||
|
||||
Beware that this will return a false positive if a
|
||||
:ref:`botton half lock <local_bh_disable>` is held.
|
||||
:ref:`bottom half lock <local_bh_disable>` is held.
|
||||
|
||||
Some Basic Rules
|
||||
================
|
||||
|
@ -126,17 +126,10 @@ than one development cycle past their initial release. So, for example, the
|
||||
5.2.21 was the final stable update of the 5.2 release.
|
||||
|
||||
Some kernels are designated "long term" kernels; they will receive support
|
||||
for a longer period. As of this writing, the current long term kernels
|
||||
and their maintainers are:
|
||||
for a longer period. Please refer to the following link for the list of active
|
||||
long term kernel versions and their maintainers:
|
||||
|
||||
====== ================================ =======================
|
||||
3.16 Ben Hutchings (very long-term kernel)
|
||||
4.4 Greg Kroah-Hartman & Sasha Levin (very long-term kernel)
|
||||
4.9 Greg Kroah-Hartman & Sasha Levin
|
||||
4.14 Greg Kroah-Hartman & Sasha Levin
|
||||
4.19 Greg Kroah-Hartman & Sasha Levin
|
||||
5.4 Greg Kroah-Hartman & Sasha Levin
|
||||
====== ================================ =======================
|
||||
https://www.kernel.org/category/releases.html
|
||||
|
||||
The selection of a kernel for long-term support is purely a matter of a
|
||||
maintainer having the need and the time to maintain that release. There
|
||||
|
@ -36,7 +36,7 @@ experience, the following books are good for, if anything, reference:
|
||||
- "C: A Reference Manual" by Harbison and Steele [Prentice Hall]
|
||||
|
||||
The kernel is written using GNU C and the GNU toolchain. While it
|
||||
adheres to the ISO C89 standard, it uses a number of extensions that are
|
||||
adheres to the ISO C11 standard, it uses a number of extensions that are
|
||||
not featured in the standard. The kernel is a freestanding C
|
||||
environment, with no reliance on the standard C library, so some
|
||||
portions of the C standard are not supported. Arbitrary long long
|
||||
|
@ -39,7 +39,7 @@ Documentation written by Tom Zanussi
|
||||
will use the event's kernel stacktrace as the key. The keywords
|
||||
'keys' or 'key' can be used to specify keys, and the keywords
|
||||
'values', 'vals', or 'val' can be used to specify values. Compound
|
||||
keys consisting of up to two fields can be specified by the 'keys'
|
||||
keys consisting of up to three fields can be specified by the 'keys'
|
||||
keyword. Hashing a compound key produces a unique entry in the
|
||||
table for each unique combination of component keys, and can be
|
||||
useful for providing more fine-grained summaries of event data.
|
||||
|
@ -44,7 +44,7 @@ altro, utili riferimenti:
|
||||
- "C: A Reference Manual" di Harbison and Steele [Prentice Hall]
|
||||
|
||||
Il kernel è stato scritto usando GNU C e la toolchain GNU.
|
||||
Sebbene si attenga allo standard ISO C89, esso utilizza una serie di
|
||||
Sebbene si attenga allo standard ISO C11, esso utilizza una serie di
|
||||
estensioni che non sono previste in questo standard. Il kernel è un
|
||||
ambiente C indipendente, che non ha alcuna dipendenza dalle librerie
|
||||
C standard, così alcune parti del C standard non sono supportate.
|
||||
|
@ -65,7 +65,7 @@ Linux カーネル開発のやり方
|
||||
- 『新・詳説 C 言語 H&S リファレンス』 (サミュエル P ハービソン/ガイ L スティール共著 斉藤 信男監訳)[ソフトバンク]
|
||||
|
||||
カーネルは GNU C と GNU ツールチェインを使って書かれています。カーネル
|
||||
は ISO C89 仕様に準拠して書く一方で、標準には無い言語拡張を多く使って
|
||||
は ISO C11 仕様に準拠して書く一方で、標準には無い言語拡張を多く使って
|
||||
います。カーネルは標準 C ライブラリに依存しない、C 言語非依存環境です。
|
||||
そのため、C の標準の中で使えないものもあります。特に任意の long long
|
||||
の除算や浮動小数点は使えません。カーネルがツールチェインや C 言語拡張
|
||||
|
@ -62,7 +62,7 @@ Documentation/process/howto.rst
|
||||
- "Practical C Programming" by Steve Oualline [O'Reilly]
|
||||
- "C: A Reference Manual" by Harbison and Steele [Prentice Hall]
|
||||
|
||||
커널은 GNU C와 GNU 툴체인을 사용하여 작성되었다. 이 툴들은 ISO C89 표준을
|
||||
커널은 GNU C와 GNU 툴체인을 사용하여 작성되었다. 이 툴들은 ISO C11 표준을
|
||||
따르는 반면 표준에 있지 않은 많은 확장기능도 가지고 있다. 커널은 표준 C
|
||||
라이브러리와는 관계없이 freestanding C 환경이어서 C 표준의 일부는
|
||||
지원되지 않는다. 임의의 long long 나누기나 floating point는 지원되지 않는다.
|
||||
|
@ -45,7 +45,7 @@ Linux内核大部分是由C语言写成的,一些体系结构相关的代码
|
||||
- "C: A Reference Manual" by Harbison and Steele [Prentice Hall]
|
||||
《C语言参考手册(原书第5版)》(邱仲潘 等译)[机械工业出版社]
|
||||
|
||||
Linux内核使用GNU C和GNU工具链开发。虽然它遵循ISO C89标准,但也用到了一些
|
||||
Linux内核使用GNU C和GNU工具链开发。虽然它遵循ISO C11标准,但也用到了一些
|
||||
标准中没有定义的扩展。内核是自给自足的C环境,不依赖于标准C库的支持,所以
|
||||
并不支持C标准中的部分定义。比如long long类型的大数除法和浮点运算就不允许
|
||||
使用。有时候确实很难弄清楚内核对工具链的要求和它所使用的扩展,不幸的是目
|
||||
|
@ -48,7 +48,7 @@ Linux內核大部分是由C語言寫成的,一些體系結構相關的代碼
|
||||
- "C: A Reference Manual" by Harbison and Steele [Prentice Hall]
|
||||
《C語言參考手冊(原書第5版)》(邱仲潘 等譯)[機械工業出版社]
|
||||
|
||||
Linux內核使用GNU C和GNU工具鏈開發。雖然它遵循ISO C89標準,但也用到了一些
|
||||
Linux內核使用GNU C和GNU工具鏈開發。雖然它遵循ISO C11標準,但也用到了一些
|
||||
標準中沒有定義的擴展。內核是自給自足的C環境,不依賴於標準C庫的支持,所以
|
||||
並不支持C標準中的部分定義。比如long long類型的大數除法和浮點運算就不允許
|
||||
使用。有時候確實很難弄清楚內核對工具鏈的要求和它所使用的擴展,不幸的是目
|
||||
|
50
MAINTAINERS
50
MAINTAINERS
@ -4102,6 +4102,7 @@ N: bcm7038
|
||||
N: bcm7120
|
||||
|
||||
BROADCOM BDC DRIVER
|
||||
M: Justin Chen <justinpopo6@gmail.com>
|
||||
M: Al Cooper <alcooperx@gmail.com>
|
||||
L: linux-usb@vger.kernel.org
|
||||
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
|
||||
@ -4208,6 +4209,7 @@ F: Documentation/devicetree/bindings/serial/brcm,bcm7271-uart.yaml
|
||||
F: drivers/tty/serial/8250/8250_bcm7271.c
|
||||
|
||||
BROADCOM BRCMSTB USB EHCI DRIVER
|
||||
M: Justin Chen <justinpopo6@gmail.com>
|
||||
M: Al Cooper <alcooperx@gmail.com>
|
||||
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
|
||||
L: linux-usb@vger.kernel.org
|
||||
@ -4224,6 +4226,7 @@ F: Documentation/devicetree/bindings/usb/brcm,usb-pinmap.yaml
|
||||
F: drivers/usb/misc/brcmstb-usb-pinmap.c
|
||||
|
||||
BROADCOM BRCMSTB USB2 and USB3 PHY DRIVER
|
||||
M: Justin Chen <justinpopo6@gmail.com>
|
||||
M: Al Cooper <alcooperx@gmail.com>
|
||||
R: Broadcom internal kernel review list <bcm-kernel-feedback-list@broadcom.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
@ -5039,7 +5042,7 @@ F: drivers/scsi/snic/
|
||||
|
||||
CISCO VIC ETHERNET NIC DRIVER
|
||||
M: Christian Benvenuti <benve@cisco.com>
|
||||
M: Govindarajulu Varadarajan <_govind@gmx.com>
|
||||
M: Satish Kharat <satishkh@cisco.com>
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/cisco/enic/
|
||||
|
||||
@ -9777,7 +9780,10 @@ S: Supported
|
||||
F: drivers/pci/hotplug/rpaphp*
|
||||
|
||||
IBM Power SRIOV Virtual NIC Device Driver
|
||||
M: Dany Madden <drt@linux.ibm.com>
|
||||
M: Haren Myneni <haren@linux.ibm.com>
|
||||
M: Rick Lindsley <ricklind@linux.ibm.com>
|
||||
R: Nick Child <nnac123@linux.ibm.com>
|
||||
R: Dany Madden <danymadden@us.ibm.com>
|
||||
R: Thomas Falcon <tlfalcon@linux.ibm.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
@ -11247,7 +11253,7 @@ L: kvm@vger.kernel.org
|
||||
L: kvm-riscv@lists.infradead.org
|
||||
L: linux-riscv@lists.infradead.org
|
||||
S: Maintained
|
||||
T: git git://github.com/kvm-riscv/linux.git
|
||||
T: git https://github.com/kvm-riscv/linux.git
|
||||
F: arch/riscv/include/asm/kvm*
|
||||
F: arch/riscv/include/uapi/asm/kvm*
|
||||
F: arch/riscv/kvm/
|
||||
@ -11260,7 +11266,6 @@ M: Claudio Imbrenda <imbrenda@linux.ibm.com>
|
||||
R: David Hildenbrand <david@redhat.com>
|
||||
L: kvm@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git
|
||||
F: Documentation/virt/kvm/s390*
|
||||
F: arch/s390/include/asm/gmap.h
|
||||
@ -14527,7 +14532,7 @@ L: linux-nilfs@vger.kernel.org
|
||||
S: Supported
|
||||
W: https://nilfs.sourceforge.io/
|
||||
W: https://nilfs.osdn.jp/
|
||||
T: git git://github.com/konis/nilfs2.git
|
||||
T: git https://github.com/konis/nilfs2.git
|
||||
F: Documentation/filesystems/nilfs2.rst
|
||||
F: fs/nilfs2/
|
||||
F: include/trace/events/nilfs2.h
|
||||
@ -15631,7 +15636,7 @@ F: drivers/input/serio/gscps2.c
|
||||
F: drivers/input/serio/hp_sdc*
|
||||
F: drivers/parisc/
|
||||
F: drivers/parport/parport_gsc.*
|
||||
F: drivers/tty/serial/8250/8250_gsc.c
|
||||
F: drivers/tty/serial/8250/8250_parisc.c
|
||||
F: drivers/video/console/sti*
|
||||
F: drivers/video/fbdev/sti*
|
||||
F: drivers/video/logo/logo_parisc*
|
||||
@ -17816,7 +17821,7 @@ S: Odd Fixes
|
||||
F: drivers/tty/serial/rp2.*
|
||||
|
||||
ROHM BD99954 CHARGER IC
|
||||
R: Matti Vaittinen <mazziesaccount@gmail.com>
|
||||
M: Matti Vaittinen <mazziesaccount@gmail.com>
|
||||
S: Supported
|
||||
F: drivers/power/supply/bd99954-charger.c
|
||||
F: drivers/power/supply/bd99954-charger.h
|
||||
@ -17839,7 +17844,7 @@ F: drivers/regulator/bd9571mwv-regulator.c
|
||||
F: include/linux/mfd/bd9571mwv.h
|
||||
|
||||
ROHM POWER MANAGEMENT IC DEVICE DRIVERS
|
||||
R: Matti Vaittinen <mazziesaccount@gmail.com>
|
||||
M: Matti Vaittinen <mazziesaccount@gmail.com>
|
||||
S: Supported
|
||||
F: drivers/clk/clk-bd718x7.c
|
||||
F: drivers/gpio/gpio-bd71815.c
|
||||
@ -18001,7 +18006,6 @@ R: Christian Borntraeger <borntraeger@linux.ibm.com>
|
||||
R: Sven Schnelle <svens@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux.git
|
||||
F: Documentation/driver-api/s390-drivers.rst
|
||||
F: Documentation/s390/
|
||||
@ -18013,7 +18017,6 @@ M: Vineeth Vijayan <vneethv@linux.ibm.com>
|
||||
M: Peter Oberparleiter <oberpar@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
F: drivers/s390/cio/
|
||||
|
||||
S390 DASD DRIVER
|
||||
@ -18021,7 +18024,6 @@ M: Stefan Haberland <sth@linux.ibm.com>
|
||||
M: Jan Hoeppner <hoeppner@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
F: block/partitions/ibm.c
|
||||
F: drivers/s390/block/dasd*
|
||||
F: include/linux/dasd_mod.h
|
||||
@ -18031,7 +18033,6 @@ M: Matthew Rosato <mjrosato@linux.ibm.com>
|
||||
M: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
F: drivers/iommu/s390-iommu.c
|
||||
|
||||
S390 IUCV NETWORK LAYER
|
||||
@ -18040,7 +18041,6 @@ M: Wenjia Zhang <wenjia@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
F: drivers/s390/net/*iucv*
|
||||
F: include/net/iucv/
|
||||
F: net/iucv/
|
||||
@ -18051,7 +18051,6 @@ M: Wenjia Zhang <wenjia@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
F: drivers/s390/net/
|
||||
|
||||
S390 PCI SUBSYSTEM
|
||||
@ -18059,7 +18058,6 @@ M: Niklas Schnelle <schnelle@linux.ibm.com>
|
||||
M: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
F: arch/s390/pci/
|
||||
F: drivers/pci/hotplug/s390_pci_hpc.c
|
||||
F: Documentation/s390/pci.rst
|
||||
@ -18070,7 +18068,6 @@ M: Halil Pasic <pasic@linux.ibm.com>
|
||||
M: Jason Herne <jjherne@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
F: Documentation/s390/vfio-ap*
|
||||
F: drivers/s390/crypto/vfio_ap*
|
||||
|
||||
@ -18099,7 +18096,6 @@ S390 ZCRYPT DRIVER
|
||||
M: Harald Freudenberger <freude@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
F: drivers/s390/crypto/
|
||||
|
||||
S390 ZFCP DRIVER
|
||||
@ -18107,7 +18103,6 @@ M: Steffen Maier <maier@linux.ibm.com>
|
||||
M: Benjamin Block <bblock@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
F: drivers/s390/scsi/zfcp_*
|
||||
|
||||
S3C ADC BATTERY DRIVER
|
||||
@ -18679,7 +18674,6 @@ M: Wenjia Zhang <wenjia@linux.ibm.com>
|
||||
M: Jan Karcher <jaka@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
F: net/smc/
|
||||
|
||||
SHARP GP2AP002A00F/GP2AP002S00F SENSOR DRIVER
|
||||
@ -18790,7 +18784,7 @@ M: Palmer Dabbelt <palmer@dabbelt.com>
|
||||
M: Paul Walmsley <paul.walmsley@sifive.com>
|
||||
L: linux-riscv@lists.infradead.org
|
||||
S: Supported
|
||||
T: git git://github.com/sifive/riscv-linux.git
|
||||
T: git https://github.com/sifive/riscv-linux.git
|
||||
N: sifive
|
||||
K: [^@]sifive
|
||||
|
||||
@ -21194,15 +21188,6 @@ S: Maintained
|
||||
F: Documentation/usb/ehci.rst
|
||||
F: drivers/usb/host/ehci*
|
||||
|
||||
USB GADGET/PERIPHERAL SUBSYSTEM
|
||||
M: Felipe Balbi <balbi@kernel.org>
|
||||
L: linux-usb@vger.kernel.org
|
||||
S: Maintained
|
||||
W: http://www.linux-usb.org/gadget
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
|
||||
F: drivers/usb/gadget/
|
||||
F: include/linux/usb/gadget*
|
||||
|
||||
USB HID/HIDBP DRIVERS (USB KEYBOARDS, MICE, REMOTE CONTROLS, ...)
|
||||
M: Jiri Kosina <jikos@kernel.org>
|
||||
M: Benjamin Tissoires <benjamin.tissoires@redhat.com>
|
||||
@ -21309,13 +21294,6 @@ W: https://github.com/petkan/pegasus
|
||||
T: git https://github.com/petkan/pegasus.git
|
||||
F: drivers/net/usb/pegasus.*
|
||||
|
||||
USB PHY LAYER
|
||||
M: Felipe Balbi <balbi@kernel.org>
|
||||
L: linux-usb@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
|
||||
F: drivers/usb/phy/
|
||||
|
||||
USB PRINTER DRIVER (usblp)
|
||||
M: Pete Zaitcev <zaitcev@redhat.com>
|
||||
L: linux-usb@vger.kernel.org
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 1
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -191,7 +191,7 @@ static inline void flush_thread(void)
|
||||
unsigned long __get_wchan(struct task_struct *p);
|
||||
|
||||
#define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \
|
||||
THREAD_SIZE - 32 - sizeof(struct pt_regs))
|
||||
THREAD_SIZE - sizeof(struct pt_regs))
|
||||
#define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk))
|
||||
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->csr_era)
|
||||
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[3])
|
||||
|
@ -29,7 +29,7 @@ struct pt_regs {
|
||||
unsigned long csr_euen;
|
||||
unsigned long csr_ecfg;
|
||||
unsigned long csr_estat;
|
||||
unsigned long __last[0];
|
||||
unsigned long __last[];
|
||||
} __aligned(8);
|
||||
|
||||
static inline int regs_irqs_disabled(struct pt_regs *regs)
|
||||
@ -133,7 +133,7 @@ static inline void die_if_kernel(const char *str, struct pt_regs *regs)
|
||||
#define current_pt_regs() \
|
||||
({ \
|
||||
unsigned long sp = (unsigned long)__builtin_frame_address(0); \
|
||||
(struct pt_regs *)((sp | (THREAD_SIZE - 1)) + 1 - 32) - 1; \
|
||||
(struct pt_regs *)((sp | (THREAD_SIZE - 1)) + 1) - 1; \
|
||||
})
|
||||
|
||||
/* Helpers for working with the user stack pointer */
|
||||
|
@ -84,10 +84,9 @@ SYM_CODE_START(kernel_entry) # kernel entry point
|
||||
|
||||
la.pcrel tp, init_thread_union
|
||||
/* Set the SP after an empty pt_regs. */
|
||||
PTR_LI sp, (_THREAD_SIZE - 32 - PT_SIZE)
|
||||
PTR_LI sp, (_THREAD_SIZE - PT_SIZE)
|
||||
PTR_ADD sp, sp, tp
|
||||
set_saved_sp sp, t0, t1
|
||||
PTR_ADDI sp, sp, -4 * SZREG # init stack pointer
|
||||
|
||||
bl start_kernel
|
||||
ASM_BUG()
|
||||
|
@ -129,7 +129,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
||||
unsigned long clone_flags = args->flags;
|
||||
struct pt_regs *childregs, *regs = current_pt_regs();
|
||||
|
||||
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
|
||||
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
|
||||
|
||||
/* set up new TSS. */
|
||||
childregs = (struct pt_regs *) childksp - 1;
|
||||
@ -236,7 +236,7 @@ bool in_task_stack(unsigned long stack, struct task_struct *task,
|
||||
struct stack_info *info)
|
||||
{
|
||||
unsigned long begin = (unsigned long)task_stack_page(task);
|
||||
unsigned long end = begin + THREAD_SIZE - 32;
|
||||
unsigned long end = begin + THREAD_SIZE;
|
||||
|
||||
if (stack < begin || stack >= end)
|
||||
return false;
|
||||
|
@ -26,7 +26,7 @@ SYM_FUNC_START(__switch_to)
|
||||
move tp, a2
|
||||
cpu_restore_nonscratch a1
|
||||
|
||||
li.w t0, _THREAD_SIZE - 32
|
||||
li.w t0, _THREAD_SIZE
|
||||
PTR_ADD t0, t0, tp
|
||||
set_saved_sp t0, t1, t2
|
||||
|
||||
|
@ -279,6 +279,7 @@ static void emit_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
|
||||
const u8 t1 = LOONGARCH_GPR_T1;
|
||||
const u8 t2 = LOONGARCH_GPR_T2;
|
||||
const u8 t3 = LOONGARCH_GPR_T3;
|
||||
const u8 r0 = regmap[BPF_REG_0];
|
||||
const u8 src = regmap[insn->src_reg];
|
||||
const u8 dst = regmap[insn->dst_reg];
|
||||
const s16 off = insn->off;
|
||||
@ -359,8 +360,6 @@ static void emit_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
|
||||
break;
|
||||
/* r0 = atomic_cmpxchg(dst + off, r0, src); */
|
||||
case BPF_CMPXCHG:
|
||||
u8 r0 = regmap[BPF_REG_0];
|
||||
|
||||
move_reg(ctx, t2, r0);
|
||||
if (isdw) {
|
||||
emit_insn(ctx, lld, r0, t1, 0);
|
||||
@ -390,8 +389,11 @@ static bool is_signed_bpf_cond(u8 cond)
|
||||
|
||||
static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool extra_pass)
|
||||
{
|
||||
const bool is32 = BPF_CLASS(insn->code) == BPF_ALU ||
|
||||
BPF_CLASS(insn->code) == BPF_JMP32;
|
||||
u8 tm = -1;
|
||||
u64 func_addr;
|
||||
bool func_addr_fixed;
|
||||
int i = insn - ctx->prog->insnsi;
|
||||
int ret, jmp_offset;
|
||||
const u8 code = insn->code;
|
||||
const u8 cond = BPF_OP(code);
|
||||
const u8 t1 = LOONGARCH_GPR_T1;
|
||||
@ -400,8 +402,8 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
|
||||
const u8 dst = regmap[insn->dst_reg];
|
||||
const s16 off = insn->off;
|
||||
const s32 imm = insn->imm;
|
||||
int jmp_offset;
|
||||
int i = insn - ctx->prog->insnsi;
|
||||
const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
|
||||
const bool is32 = BPF_CLASS(insn->code) == BPF_ALU || BPF_CLASS(insn->code) == BPF_JMP32;
|
||||
|
||||
switch (code) {
|
||||
/* dst = src */
|
||||
@ -724,24 +726,23 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
|
||||
case BPF_JMP32 | BPF_JSGE | BPF_K:
|
||||
case BPF_JMP32 | BPF_JSLT | BPF_K:
|
||||
case BPF_JMP32 | BPF_JSLE | BPF_K:
|
||||
u8 t7 = -1;
|
||||
jmp_offset = bpf2la_offset(i, off, ctx);
|
||||
if (imm) {
|
||||
move_imm(ctx, t1, imm, false);
|
||||
t7 = t1;
|
||||
tm = t1;
|
||||
} else {
|
||||
/* If imm is 0, simply use zero register. */
|
||||
t7 = LOONGARCH_GPR_ZERO;
|
||||
tm = LOONGARCH_GPR_ZERO;
|
||||
}
|
||||
move_reg(ctx, t2, dst);
|
||||
if (is_signed_bpf_cond(BPF_OP(code))) {
|
||||
emit_sext_32(ctx, t7, is32);
|
||||
emit_sext_32(ctx, tm, is32);
|
||||
emit_sext_32(ctx, t2, is32);
|
||||
} else {
|
||||
emit_zext_32(ctx, t7, is32);
|
||||
emit_zext_32(ctx, tm, is32);
|
||||
emit_zext_32(ctx, t2, is32);
|
||||
}
|
||||
if (emit_cond_jmp(ctx, cond, t2, t7, jmp_offset) < 0)
|
||||
if (emit_cond_jmp(ctx, cond, t2, tm, jmp_offset) < 0)
|
||||
goto toofar;
|
||||
break;
|
||||
|
||||
@ -775,10 +776,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
|
||||
|
||||
/* function call */
|
||||
case BPF_JMP | BPF_CALL:
|
||||
int ret;
|
||||
u64 func_addr;
|
||||
bool func_addr_fixed;
|
||||
|
||||
mark_call(ctx);
|
||||
ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
|
||||
&func_addr, &func_addr_fixed);
|
||||
@ -811,8 +808,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool ext
|
||||
|
||||
/* dst = imm64 */
|
||||
case BPF_LD | BPF_IMM | BPF_DW:
|
||||
u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
|
||||
|
||||
move_imm(ctx, dst, imm64, is32);
|
||||
return 1;
|
||||
|
||||
|
@ -10,12 +10,12 @@
|
||||
#define SVERSION_ANY_ID PA_SVERSION_ANY_ID
|
||||
|
||||
struct hp_hardware {
|
||||
unsigned short hw_type:5; /* HPHW_xxx */
|
||||
unsigned short hversion;
|
||||
unsigned long sversion:28;
|
||||
unsigned short opt;
|
||||
const char name[80]; /* The hardware description */
|
||||
};
|
||||
unsigned int hw_type:8; /* HPHW_xxx */
|
||||
unsigned int hversion:12;
|
||||
unsigned int sversion:12;
|
||||
unsigned char opt;
|
||||
unsigned char name[59]; /* The hardware description */
|
||||
} __packed;
|
||||
|
||||
struct parisc_device;
|
||||
|
||||
|
@ -363,20 +363,25 @@
|
||||
|
||||
#if !defined(__ASSEMBLY__)
|
||||
|
||||
/* flags of the device_path */
|
||||
/* flags for hardware_path */
|
||||
#define PF_AUTOBOOT 0x80
|
||||
#define PF_AUTOSEARCH 0x40
|
||||
#define PF_TIMER 0x0F
|
||||
|
||||
struct device_path { /* page 1-69 */
|
||||
unsigned char flags; /* flags see above! */
|
||||
unsigned char bc[6]; /* bus converter routing info */
|
||||
unsigned char mod;
|
||||
unsigned int layers[6];/* device-specific layer-info */
|
||||
} __attribute__((aligned(8))) ;
|
||||
struct hardware_path {
|
||||
unsigned char flags; /* see bit definitions below */
|
||||
signed char bc[6]; /* Bus Converter routing info to a specific */
|
||||
/* I/O adaptor (< 0 means none, > 63 resvd) */
|
||||
signed char mod; /* fixed field of specified module */
|
||||
};
|
||||
|
||||
struct pdc_module_path { /* page 1-69 */
|
||||
struct hardware_path path;
|
||||
unsigned int layers[6]; /* device-specific info (ctlr #, unit # ...) */
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
struct pz_device {
|
||||
struct device_path dp; /* see above */
|
||||
struct pdc_module_path dp; /* see above */
|
||||
/* struct iomod *hpa; */
|
||||
unsigned int hpa; /* HPA base address */
|
||||
/* char *spa; */
|
||||
@ -611,21 +616,6 @@ struct pdc_initiator { /* PDC_INITIATOR */
|
||||
int mode;
|
||||
};
|
||||
|
||||
struct hardware_path {
|
||||
char flags; /* see bit definitions below */
|
||||
char bc[6]; /* Bus Converter routing info to a specific */
|
||||
/* I/O adaptor (< 0 means none, > 63 resvd) */
|
||||
char mod; /* fixed field of specified module */
|
||||
};
|
||||
|
||||
/*
|
||||
* Device path specifications used by PDC.
|
||||
*/
|
||||
struct pdc_module_path {
|
||||
struct hardware_path path;
|
||||
unsigned int layers[6]; /* device-specific info (ctlr #, unit # ...) */
|
||||
};
|
||||
|
||||
/* Only used on some pre-PA2.0 boxes */
|
||||
struct pdc_memory_map { /* PDC_MEMORY_MAP */
|
||||
unsigned long hpa; /* mod's register set address */
|
||||
|
@ -882,15 +882,13 @@ void __init walk_central_bus(void)
|
||||
&root);
|
||||
}
|
||||
|
||||
static void print_parisc_device(struct parisc_device *dev)
|
||||
static __init void print_parisc_device(struct parisc_device *dev)
|
||||
{
|
||||
char hw_path[64];
|
||||
static int count;
|
||||
static int count __initdata;
|
||||
|
||||
print_pa_hwpath(dev, hw_path);
|
||||
pr_info("%d. %s at %pap [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
|
||||
++count, dev->name, &(dev->hpa.start), hw_path, dev->id.hw_type,
|
||||
dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
|
||||
pr_info("%d. %s at %pap { type:%d, hv:%#x, sv:%#x, rev:%#x }",
|
||||
++count, dev->name, &(dev->hpa.start), dev->id.hw_type,
|
||||
dev->id.hversion, dev->id.sversion, dev->id.hversion_rev);
|
||||
|
||||
if (dev->num_addrs) {
|
||||
int k;
|
||||
@ -1079,7 +1077,7 @@ static __init int qemu_print_iodc_data(struct device *lin_dev, void *data)
|
||||
|
||||
|
||||
|
||||
static int print_one_device(struct device * dev, void * data)
|
||||
static __init int print_one_device(struct device * dev, void * data)
|
||||
{
|
||||
struct parisc_device * pdev = to_parisc_device(dev);
|
||||
|
||||
|
@ -147,6 +147,7 @@ config PPC
|
||||
select ARCH_MIGHT_HAVE_PC_SERIO
|
||||
select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
|
||||
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
|
||||
select ARCH_SPLIT_ARG64 if PPC32
|
||||
select ARCH_STACKWALK
|
||||
select ARCH_SUPPORTS_ATOMIC_RMW
|
||||
select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC_BOOK3S || PPC_8xx || 40x
|
||||
@ -285,7 +286,7 @@ config PPC
|
||||
#
|
||||
|
||||
config PPC_LONG_DOUBLE_128
|
||||
depends on PPC64
|
||||
depends on PPC64 && ALTIVEC
|
||||
def_bool $(success,test "$(shell,echo __LONG_DOUBLE_128__ | $(CC) -E -P -)" = 1)
|
||||
|
||||
config PPC_BARRIER_NOSPEC
|
||||
|
@ -32,6 +32,11 @@ static inline void arch_enter_lazy_mmu_mode(void)
|
||||
|
||||
if (radix_enabled())
|
||||
return;
|
||||
/*
|
||||
* apply_to_page_range can call us this preempt enabled when
|
||||
* operating on kernel page tables.
|
||||
*/
|
||||
preempt_disable();
|
||||
batch = this_cpu_ptr(&ppc64_tlb_batch);
|
||||
batch->active = 1;
|
||||
}
|
||||
@ -47,6 +52,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
|
||||
if (batch->index)
|
||||
__flush_tlb_pending(batch);
|
||||
batch->active = 0;
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#define arch_flush_lazy_mmu_mode() do {} while (0)
|
||||
|
@ -104,6 +104,13 @@ long sys_ppc_ftruncate64(unsigned int fd, u32 reg4,
|
||||
unsigned long len1, unsigned long len2);
|
||||
long sys_ppc32_fadvise64(int fd, u32 unused, u32 offset1, u32 offset2,
|
||||
size_t len, int advice);
|
||||
long sys_ppc_sync_file_range2(int fd, unsigned int flags,
|
||||
unsigned int offset1,
|
||||
unsigned int offset2,
|
||||
unsigned int nbytes1,
|
||||
unsigned int nbytes2);
|
||||
long sys_ppc_fallocate(int fd, int mode, u32 offset1, u32 offset2,
|
||||
u32 len1, u32 len2);
|
||||
#endif
|
||||
#ifdef CONFIG_COMPAT
|
||||
long compat_sys_mmap2(unsigned long addr, size_t len,
|
||||
|
@ -813,6 +813,13 @@ kernel_dbg_exc:
|
||||
EXCEPTION_COMMON(0x260)
|
||||
CHECK_NAPPING()
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
/*
|
||||
* XXX: Returning from performance_monitor_exception taken as a
|
||||
* soft-NMI (Linux irqs disabled) may be risky to use interrupt_return
|
||||
* and could cause bugs in return or elsewhere. That case should just
|
||||
* restore registers and return. There is a workaround for one known
|
||||
* problem in interrupt_exit_kernel_prepare().
|
||||
*/
|
||||
bl performance_monitor_exception
|
||||
b interrupt_return
|
||||
|
||||
|
@ -2357,9 +2357,21 @@ EXC_VIRT_END(performance_monitor, 0x4f00, 0x20)
|
||||
EXC_COMMON_BEGIN(performance_monitor_common)
|
||||
GEN_COMMON performance_monitor
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl performance_monitor_exception
|
||||
lbz r4,PACAIRQSOFTMASK(r13)
|
||||
cmpdi r4,IRQS_ENABLED
|
||||
bne 1f
|
||||
bl performance_monitor_exception_async
|
||||
b interrupt_return_srr
|
||||
1:
|
||||
bl performance_monitor_exception_nmi
|
||||
/* Clear MSR_RI before setting SRR0 and SRR1. */
|
||||
li r9,0
|
||||
mtmsrd r9,1
|
||||
|
||||
kuap_kernel_restore r9, r10
|
||||
|
||||
EXCEPTION_RESTORE_REGS hsrr=0
|
||||
RFI_TO_KERNEL
|
||||
|
||||
/**
|
||||
* Interrupt 0xf20 - Vector Unavailable Interrupt.
|
||||
|
@ -374,10 +374,18 @@ notrace unsigned long interrupt_exit_kernel_prepare(struct pt_regs *regs)
|
||||
if (regs_is_unrecoverable(regs))
|
||||
unrecoverable_exception(regs);
|
||||
/*
|
||||
* CT_WARN_ON comes here via program_check_exception,
|
||||
* so avoid recursion.
|
||||
* CT_WARN_ON comes here via program_check_exception, so avoid
|
||||
* recursion.
|
||||
*
|
||||
* Skip the assertion on PMIs on 64e to work around a problem caused
|
||||
* by NMI PMIs incorrectly taking this interrupt return path, it's
|
||||
* possible for this to hit after interrupt exit to user switches
|
||||
* context to user. See also the comment in the performance monitor
|
||||
* handler in exceptions-64e.S
|
||||
*/
|
||||
if (TRAP(regs) != INTERRUPT_PROGRAM)
|
||||
if (!IS_ENABLED(CONFIG_PPC_BOOK3E_64) &&
|
||||
TRAP(regs) != INTERRUPT_PROGRAM &&
|
||||
TRAP(regs) != INTERRUPT_PERFMON)
|
||||
CT_WARN_ON(ct_state() == CONTEXT_USER);
|
||||
|
||||
kuap = kuap_get_and_assert_locked();
|
||||
|
@ -532,15 +532,24 @@ _ASM_NOKPROBE_SYMBOL(interrupt_return_\srr\()_kernel)
|
||||
* Returning to soft-disabled context.
|
||||
* Check if a MUST_HARD_MASK interrupt has become pending, in which
|
||||
* case we need to disable MSR[EE] in the return context.
|
||||
*
|
||||
* The MSR[EE] check catches among other things the short incoherency
|
||||
* in hard_irq_disable() between clearing MSR[EE] and setting
|
||||
* PACA_IRQ_HARD_DIS.
|
||||
*/
|
||||
ld r12,_MSR(r1)
|
||||
andi. r10,r12,MSR_EE
|
||||
beq .Lfast_kernel_interrupt_return_\srr\() // EE already disabled
|
||||
lbz r11,PACAIRQHAPPENED(r13)
|
||||
andi. r10,r11,PACA_IRQ_MUST_HARD_MASK
|
||||
beq .Lfast_kernel_interrupt_return_\srr\() // No HARD_MASK pending
|
||||
bne 1f // HARD_MASK is pending
|
||||
// No HARD_MASK pending, clear possible HARD_DIS set by interrupt
|
||||
andi. r11,r11,(~PACA_IRQ_HARD_DIS)@l
|
||||
stb r11,PACAIRQHAPPENED(r13)
|
||||
b .Lfast_kernel_interrupt_return_\srr\()
|
||||
|
||||
/* Must clear MSR_EE from _MSR */
|
||||
|
||||
1: /* Must clear MSR_EE from _MSR */
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
li r10,0
|
||||
/* Clear valid before changing _MSR */
|
||||
|
@ -112,7 +112,7 @@ PPC32_SYSCALL_DEFINE6(ppc32_fadvise64,
|
||||
advice);
|
||||
}
|
||||
|
||||
COMPAT_SYSCALL_DEFINE6(ppc_sync_file_range2,
|
||||
PPC32_SYSCALL_DEFINE6(ppc_sync_file_range2,
|
||||
int, fd, unsigned int, flags,
|
||||
unsigned int, offset1, unsigned int, offset2,
|
||||
unsigned int, nbytes1, unsigned int, nbytes2)
|
||||
@ -122,3 +122,14 @@ COMPAT_SYSCALL_DEFINE6(ppc_sync_file_range2,
|
||||
|
||||
return ksys_sync_file_range(fd, offset, nbytes, flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC32
|
||||
SYSCALL_DEFINE6(ppc_fallocate,
|
||||
int, fd, int, mode,
|
||||
u32, offset1, u32, offset2, u32, len1, u32, len2)
|
||||
{
|
||||
return ksys_fallocate(fd, mode,
|
||||
merge_64(offset1, offset2),
|
||||
merge_64(len1, len2));
|
||||
}
|
||||
#endif
|
||||
|
@ -394,8 +394,11 @@
|
||||
305 common signalfd sys_signalfd compat_sys_signalfd
|
||||
306 common timerfd_create sys_timerfd_create
|
||||
307 common eventfd sys_eventfd
|
||||
308 common sync_file_range2 sys_sync_file_range2 compat_sys_ppc_sync_file_range2
|
||||
309 nospu fallocate sys_fallocate compat_sys_fallocate
|
||||
308 32 sync_file_range2 sys_ppc_sync_file_range2 compat_sys_ppc_sync_file_range2
|
||||
308 64 sync_file_range2 sys_sync_file_range2
|
||||
308 spu sync_file_range2 sys_sync_file_range2
|
||||
309 32 fallocate sys_ppc_fallocate compat_sys_fallocate
|
||||
309 64 fallocate sys_fallocate
|
||||
310 nospu subpage_prot sys_subpage_prot
|
||||
311 32 timerfd_settime sys_timerfd_settime32
|
||||
311 64 timerfd_settime sys_timerfd_settime
|
||||
|
@ -51,6 +51,7 @@ config KVM_BOOK3S_HV_POSSIBLE
|
||||
config KVM_BOOK3S_32
|
||||
tristate "KVM support for PowerPC book3s_32 processors"
|
||||
depends on PPC_BOOK3S_32 && !SMP && !PTE_64BIT
|
||||
depends on !CONTEXT_TRACKING_USER
|
||||
select KVM
|
||||
select KVM_BOOK3S_32_HANDLER
|
||||
select KVM_BOOK3S_PR_POSSIBLE
|
||||
@ -105,6 +106,7 @@ config KVM_BOOK3S_64_HV
|
||||
config KVM_BOOK3S_64_PR
|
||||
tristate "KVM support without using hypervisor mode in host"
|
||||
depends on KVM_BOOK3S_64
|
||||
depends on !CONTEXT_TRACKING_USER
|
||||
select KVM_BOOK3S_PR_POSSIBLE
|
||||
help
|
||||
Support running guest kernels in virtual machines on processors
|
||||
@ -190,6 +192,7 @@ config KVM_EXIT_TIMING
|
||||
config KVM_E500V2
|
||||
bool "KVM support for PowerPC E500v2 processors"
|
||||
depends on PPC_E500 && !PPC_E500MC
|
||||
depends on !CONTEXT_TRACKING_USER
|
||||
select KVM
|
||||
select KVM_MMIO
|
||||
select MMU_NOTIFIER
|
||||
@ -205,6 +208,7 @@ config KVM_E500V2
|
||||
config KVM_E500MC
|
||||
bool "KVM support for PowerPC E500MC/E5500/E6500 processors"
|
||||
depends on PPC_E500MC
|
||||
depends on !CONTEXT_TRACKING_USER
|
||||
select KVM
|
||||
select KVM_MMIO
|
||||
select KVM_BOOKE_HV
|
||||
|
@ -36,7 +36,17 @@ int exit_vmx_usercopy(void)
|
||||
{
|
||||
disable_kernel_altivec();
|
||||
pagefault_enable();
|
||||
preempt_enable();
|
||||
preempt_enable_no_resched();
|
||||
/*
|
||||
* Must never explicitly call schedule (including preempt_enable())
|
||||
* while in a kuap-unlocked user copy, because the AMR register will
|
||||
* not be saved and restored across context switch. However preempt
|
||||
* kernels need to be preempted as soon as possible if need_resched is
|
||||
* set and we are preemptible. The hack here is to schedule a
|
||||
* decrementer to fire here and reschedule for us if necessary.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PREEMPT) && need_resched())
|
||||
set_dec(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -43,6 +43,29 @@
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(native_tlbie_lock);
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
static struct lockdep_map hpte_lock_map =
|
||||
STATIC_LOCKDEP_MAP_INIT("hpte_lock", &hpte_lock_map);
|
||||
|
||||
static void acquire_hpte_lock(void)
|
||||
{
|
||||
lock_map_acquire(&hpte_lock_map);
|
||||
}
|
||||
|
||||
static void release_hpte_lock(void)
|
||||
{
|
||||
lock_map_release(&hpte_lock_map);
|
||||
}
|
||||
#else
|
||||
static void acquire_hpte_lock(void)
|
||||
{
|
||||
}
|
||||
|
||||
static void release_hpte_lock(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline unsigned long ___tlbie(unsigned long vpn, int psize,
|
||||
int apsize, int ssize)
|
||||
{
|
||||
@ -220,6 +243,7 @@ static inline void native_lock_hpte(struct hash_pte *hptep)
|
||||
{
|
||||
unsigned long *word = (unsigned long *)&hptep->v;
|
||||
|
||||
acquire_hpte_lock();
|
||||
while (1) {
|
||||
if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
|
||||
break;
|
||||
@ -234,6 +258,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep)
|
||||
{
|
||||
unsigned long *word = (unsigned long *)&hptep->v;
|
||||
|
||||
release_hpte_lock();
|
||||
clear_bit_unlock(HPTE_LOCK_BIT, word);
|
||||
}
|
||||
|
||||
@ -243,8 +268,11 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
|
||||
{
|
||||
struct hash_pte *hptep = htab_address + hpte_group;
|
||||
unsigned long hpte_v, hpte_r;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
if (!(vflags & HPTE_V_BOLTED)) {
|
||||
DBG_LOW(" insert(group=%lx, vpn=%016lx, pa=%016lx,"
|
||||
" rflags=%lx, vflags=%lx, psize=%d)\n",
|
||||
@ -263,8 +291,10 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
|
||||
hptep++;
|
||||
}
|
||||
|
||||
if (i == HPTES_PER_GROUP)
|
||||
if (i == HPTES_PER_GROUP) {
|
||||
local_irq_restore(flags);
|
||||
return -1;
|
||||
}
|
||||
|
||||
hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
|
||||
hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
|
||||
@ -286,10 +316,13 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
|
||||
* Now set the first dword including the valid bit
|
||||
* NOTE: this also unlocks the hpte
|
||||
*/
|
||||
release_hpte_lock();
|
||||
hptep->v = cpu_to_be64(hpte_v);
|
||||
|
||||
__asm__ __volatile__ ("ptesync" : : : "memory");
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
|
||||
}
|
||||
|
||||
@ -327,6 +360,7 @@ static long native_hpte_remove(unsigned long hpte_group)
|
||||
return -1;
|
||||
|
||||
/* Invalidate the hpte. NOTE: this also unlocks it */
|
||||
release_hpte_lock();
|
||||
hptep->v = 0;
|
||||
|
||||
return i;
|
||||
@ -339,6 +373,9 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
|
||||
struct hash_pte *hptep = htab_address + slot;
|
||||
unsigned long hpte_v, want_v;
|
||||
int ret = 0, local = 0;
|
||||
unsigned long irqflags;
|
||||
|
||||
local_irq_save(irqflags);
|
||||
|
||||
want_v = hpte_encode_avpn(vpn, bpsize, ssize);
|
||||
|
||||
@ -382,6 +419,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
|
||||
if (!(flags & HPTE_NOHPTE_UPDATE))
|
||||
tlbie(vpn, bpsize, apsize, ssize, local);
|
||||
|
||||
local_irq_restore(irqflags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -445,6 +484,9 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
|
||||
unsigned long vsid;
|
||||
long slot;
|
||||
struct hash_pte *hptep;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
vsid = get_kernel_vsid(ea, ssize);
|
||||
vpn = hpt_vpn(ea, vsid, ssize);
|
||||
@ -463,6 +505,8 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
|
||||
* actual page size will be same.
|
||||
*/
|
||||
tlbie(vpn, psize, psize, ssize, 0);
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -476,6 +520,9 @@ static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
|
||||
unsigned long vsid;
|
||||
long slot;
|
||||
struct hash_pte *hptep;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
vsid = get_kernel_vsid(ea, ssize);
|
||||
vpn = hpt_vpn(ea, vsid, ssize);
|
||||
@ -493,6 +540,9 @@ static int native_hpte_removebolted(unsigned long ea, int psize, int ssize)
|
||||
|
||||
/* Invalidate the TLB */
|
||||
tlbie(vpn, psize, psize, ssize, 0);
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -517,10 +567,11 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
|
||||
/* recheck with locks held */
|
||||
hpte_v = hpte_get_old_v(hptep);
|
||||
|
||||
if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
|
||||
if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
|
||||
/* Invalidate the hpte. NOTE: this also unlocks it */
|
||||
release_hpte_lock();
|
||||
hptep->v = 0;
|
||||
else
|
||||
} else
|
||||
native_unlock_hpte(hptep);
|
||||
}
|
||||
/*
|
||||
@ -580,10 +631,8 @@ static void native_hugepage_invalidate(unsigned long vsid,
|
||||
hpte_v = hpte_get_old_v(hptep);
|
||||
|
||||
if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) {
|
||||
/*
|
||||
* Invalidate the hpte. NOTE: this also unlocks it
|
||||
*/
|
||||
|
||||
/* Invalidate the hpte. NOTE: this also unlocks it */
|
||||
release_hpte_lock();
|
||||
hptep->v = 0;
|
||||
} else
|
||||
native_unlock_hpte(hptep);
|
||||
@ -765,8 +814,10 @@ static void native_flush_hash_range(unsigned long number, int local)
|
||||
|
||||
if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
|
||||
native_unlock_hpte(hptep);
|
||||
else
|
||||
else {
|
||||
release_hpte_lock();
|
||||
hptep->v = 0;
|
||||
}
|
||||
|
||||
} pte_iterate_hashed_end();
|
||||
}
|
||||
|
@ -404,7 +404,8 @@ EXPORT_SYMBOL_GPL(hash__has_transparent_hugepage);
|
||||
|
||||
struct change_memory_parms {
|
||||
unsigned long start, end, newpp;
|
||||
unsigned int step, nr_cpus, master_cpu;
|
||||
unsigned int step, nr_cpus;
|
||||
atomic_t master_cpu;
|
||||
atomic_t cpu_counter;
|
||||
};
|
||||
|
||||
@ -478,7 +479,8 @@ static int change_memory_range_fn(void *data)
|
||||
{
|
||||
struct change_memory_parms *parms = data;
|
||||
|
||||
if (parms->master_cpu != smp_processor_id())
|
||||
// First CPU goes through, all others wait.
|
||||
if (atomic_xchg(&parms->master_cpu, 1) == 1)
|
||||
return chmem_secondary_loop(parms);
|
||||
|
||||
// Wait for all but one CPU (this one) to call-in
|
||||
@ -516,7 +518,7 @@ static bool hash__change_memory_range(unsigned long start, unsigned long end,
|
||||
chmem_parms.end = end;
|
||||
chmem_parms.step = step;
|
||||
chmem_parms.newpp = newpp;
|
||||
chmem_parms.master_cpu = smp_processor_id();
|
||||
atomic_set(&chmem_parms.master_cpu, 0);
|
||||
|
||||
cpus_read_lock();
|
||||
|
||||
|
@ -1981,7 +1981,7 @@ repeat:
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
|
||||
static DEFINE_SPINLOCK(linear_map_hash_lock);
|
||||
static DEFINE_RAW_SPINLOCK(linear_map_hash_lock);
|
||||
|
||||
static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
|
||||
{
|
||||
@ -2005,10 +2005,10 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
|
||||
mmu_linear_psize, mmu_kernel_ssize);
|
||||
|
||||
BUG_ON (ret < 0);
|
||||
spin_lock(&linear_map_hash_lock);
|
||||
raw_spin_lock(&linear_map_hash_lock);
|
||||
BUG_ON(linear_map_hash_slots[lmi] & 0x80);
|
||||
linear_map_hash_slots[lmi] = ret | 0x80;
|
||||
spin_unlock(&linear_map_hash_lock);
|
||||
raw_spin_unlock(&linear_map_hash_lock);
|
||||
}
|
||||
|
||||
static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
|
||||
@ -2018,14 +2018,14 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
|
||||
unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
|
||||
|
||||
hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
|
||||
spin_lock(&linear_map_hash_lock);
|
||||
raw_spin_lock(&linear_map_hash_lock);
|
||||
if (!(linear_map_hash_slots[lmi] & 0x80)) {
|
||||
spin_unlock(&linear_map_hash_lock);
|
||||
raw_spin_unlock(&linear_map_hash_lock);
|
||||
return;
|
||||
}
|
||||
hidx = linear_map_hash_slots[lmi] & 0x7f;
|
||||
linear_map_hash_slots[lmi] = 0;
|
||||
spin_unlock(&linear_map_hash_lock);
|
||||
raw_spin_unlock(&linear_map_hash_lock);
|
||||
if (hidx & _PTEIDX_SECONDARY)
|
||||
hash = ~hash;
|
||||
slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include <asm/drmem.h>
|
||||
|
||||
#include "pseries.h"
|
||||
#include "vas.h" /* pseries_vas_dlpar_cpu() */
|
||||
|
||||
/*
|
||||
* This isn't a module but we expose that to userspace
|
||||
@ -748,6 +749,16 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
|
||||
return -EINVAL;
|
||||
|
||||
retval = update_ppp(new_entitled_ptr, NULL);
|
||||
|
||||
if (retval == H_SUCCESS || retval == H_CONSTRAINED) {
|
||||
/*
|
||||
* The hypervisor assigns VAS resources based
|
||||
* on entitled capacity for shared mode.
|
||||
* Reconfig VAS windows based on DLPAR CPU events.
|
||||
*/
|
||||
if (pseries_vas_dlpar_cpu() != 0)
|
||||
retval = H_HARDWARE;
|
||||
}
|
||||
} else if (!strcmp(kbuf, "capacity_weight")) {
|
||||
char *endp;
|
||||
*new_weight_ptr = (u8) simple_strtoul(tmp, &endp, 10);
|
||||
|
@ -200,16 +200,41 @@ static irqreturn_t pseries_vas_fault_thread_fn(int irq, void *data)
|
||||
struct vas_user_win_ref *tsk_ref;
|
||||
int rc;
|
||||
|
||||
rc = h_get_nx_fault(txwin->vas_win.winid, (u64)virt_to_phys(&crb));
|
||||
if (!rc) {
|
||||
tsk_ref = &txwin->vas_win.task_ref;
|
||||
vas_dump_crb(&crb);
|
||||
vas_update_csb(&crb, tsk_ref);
|
||||
while (atomic_read(&txwin->pending_faults)) {
|
||||
rc = h_get_nx_fault(txwin->vas_win.winid, (u64)virt_to_phys(&crb));
|
||||
if (!rc) {
|
||||
tsk_ref = &txwin->vas_win.task_ref;
|
||||
vas_dump_crb(&crb);
|
||||
vas_update_csb(&crb, tsk_ref);
|
||||
}
|
||||
atomic_dec(&txwin->pending_faults);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/*
|
||||
* irq_default_primary_handler() can be used only with IRQF_ONESHOT
|
||||
* which disables IRQ before executing the thread handler and enables
|
||||
* it after. But this disabling interrupt sets the VAS IRQ OFF
|
||||
* state in the hypervisor. If the NX generates fault interrupt
|
||||
* during this window, the hypervisor will not deliver this
|
||||
* interrupt to the LPAR. So use VAS specific IRQ handler instead
|
||||
* of calling the default primary handler.
|
||||
*/
|
||||
static irqreturn_t pseries_vas_irq_handler(int irq, void *data)
|
||||
{
|
||||
struct pseries_vas_window *txwin = data;
|
||||
|
||||
/*
|
||||
* The thread hanlder will process this interrupt if it is
|
||||
* already running.
|
||||
*/
|
||||
atomic_inc(&txwin->pending_faults);
|
||||
|
||||
return IRQ_WAKE_THREAD;
|
||||
}
|
||||
|
||||
/*
|
||||
* Allocate window and setup IRQ mapping.
|
||||
*/
|
||||
@ -240,8 +265,9 @@ static int allocate_setup_window(struct pseries_vas_window *txwin,
|
||||
goto out_irq;
|
||||
}
|
||||
|
||||
rc = request_threaded_irq(txwin->fault_virq, NULL,
|
||||
pseries_vas_fault_thread_fn, IRQF_ONESHOT,
|
||||
rc = request_threaded_irq(txwin->fault_virq,
|
||||
pseries_vas_irq_handler,
|
||||
pseries_vas_fault_thread_fn, 0,
|
||||
txwin->name, txwin);
|
||||
if (rc) {
|
||||
pr_err("VAS-Window[%d]: Request IRQ(%u) failed with %d\n",
|
||||
@ -826,6 +852,25 @@ int vas_reconfig_capabilties(u8 type, int new_nr_creds)
|
||||
mutex_unlock(&vas_pseries_mutex);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int pseries_vas_dlpar_cpu(void)
|
||||
{
|
||||
int new_nr_creds, rc;
|
||||
|
||||
rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES,
|
||||
vascaps[VAS_GZIP_DEF_FEAT_TYPE].feat,
|
||||
(u64)virt_to_phys(&hv_cop_caps));
|
||||
if (!rc) {
|
||||
new_nr_creds = be16_to_cpu(hv_cop_caps.target_lpar_creds);
|
||||
rc = vas_reconfig_capabilties(VAS_GZIP_DEF_FEAT_TYPE, new_nr_creds);
|
||||
}
|
||||
|
||||
if (rc)
|
||||
pr_err("Failed reconfig VAS capabilities with DLPAR\n");
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Total number of default credits available (target_credits)
|
||||
* in LPAR depends on number of cores configured. It varies based on
|
||||
@ -840,7 +885,15 @@ static int pseries_vas_notifier(struct notifier_block *nb,
|
||||
struct of_reconfig_data *rd = data;
|
||||
struct device_node *dn = rd->dn;
|
||||
const __be32 *intserv = NULL;
|
||||
int new_nr_creds, len, rc = 0;
|
||||
int len;
|
||||
|
||||
/*
|
||||
* For shared CPU partition, the hypervisor assigns total credits
|
||||
* based on entitled core capacity. So updating VAS windows will
|
||||
* be called from lparcfg_write().
|
||||
*/
|
||||
if (is_shared_processor())
|
||||
return NOTIFY_OK;
|
||||
|
||||
if ((action == OF_RECONFIG_ATTACH_NODE) ||
|
||||
(action == OF_RECONFIG_DETACH_NODE))
|
||||
@ -852,19 +905,7 @@ static int pseries_vas_notifier(struct notifier_block *nb,
|
||||
if (!intserv)
|
||||
return NOTIFY_OK;
|
||||
|
||||
rc = h_query_vas_capabilities(H_QUERY_VAS_CAPABILITIES,
|
||||
vascaps[VAS_GZIP_DEF_FEAT_TYPE].feat,
|
||||
(u64)virt_to_phys(&hv_cop_caps));
|
||||
if (!rc) {
|
||||
new_nr_creds = be16_to_cpu(hv_cop_caps.target_lpar_creds);
|
||||
rc = vas_reconfig_capabilties(VAS_GZIP_DEF_FEAT_TYPE,
|
||||
new_nr_creds);
|
||||
}
|
||||
|
||||
if (rc)
|
||||
pr_err("Failed reconfig VAS capabilities with DLPAR\n");
|
||||
|
||||
return rc;
|
||||
return pseries_vas_dlpar_cpu();
|
||||
}
|
||||
|
||||
static struct notifier_block pseries_vas_nb = {
|
||||
|
@ -132,6 +132,7 @@ struct pseries_vas_window {
|
||||
u64 flags;
|
||||
char *name;
|
||||
int fault_virq;
|
||||
atomic_t pending_faults; /* Number of pending faults */
|
||||
};
|
||||
|
||||
int sysfs_add_vas_caps(struct vas_cop_feat_caps *caps);
|
||||
@ -140,10 +141,15 @@ int __init sysfs_pseries_vas_init(struct vas_all_caps *vas_caps);
|
||||
|
||||
#ifdef CONFIG_PPC_VAS
|
||||
int vas_migration_handler(int action);
|
||||
int pseries_vas_dlpar_cpu(void);
|
||||
#else
|
||||
static inline int vas_migration_handler(int action)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int pseries_vas_dlpar_cpu(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#endif /* _VAS_H */
|
||||
|
@ -411,14 +411,16 @@ config RISCV_ISA_SVPBMT
|
||||
|
||||
If you don't know what to do here, say Y.
|
||||
|
||||
config CC_HAS_ZICBOM
|
||||
config TOOLCHAIN_HAS_ZICBOM
|
||||
bool
|
||||
default y if 64BIT && $(cc-option,-mabi=lp64 -march=rv64ima_zicbom)
|
||||
default y if 32BIT && $(cc-option,-mabi=ilp32 -march=rv32ima_zicbom)
|
||||
default y
|
||||
depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zicbom)
|
||||
depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zicbom)
|
||||
depends on LLD_VERSION >= 150000 || LD_VERSION >= 23800
|
||||
|
||||
config RISCV_ISA_ZICBOM
|
||||
bool "Zicbom extension support for non-coherent DMA operation"
|
||||
depends on CC_HAS_ZICBOM
|
||||
depends on TOOLCHAIN_HAS_ZICBOM
|
||||
depends on !XIP_KERNEL && MMU
|
||||
select RISCV_DMA_NONCOHERENT
|
||||
select RISCV_ALTERNATIVE
|
||||
@ -433,6 +435,13 @@ config RISCV_ISA_ZICBOM
|
||||
|
||||
If you don't know what to do here, say Y.
|
||||
|
||||
config TOOLCHAIN_HAS_ZIHINTPAUSE
|
||||
bool
|
||||
default y
|
||||
depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zihintpause)
|
||||
depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zihintpause)
|
||||
depends on LLD_VERSION >= 150000 || LD_VERSION >= 23600
|
||||
|
||||
config FPU
|
||||
bool "FPU support"
|
||||
default y
|
||||
|
@ -59,12 +59,10 @@ toolchain-need-zicsr-zifencei := $(call cc-option-yn, -march=$(riscv-march-y)_zi
|
||||
riscv-march-$(toolchain-need-zicsr-zifencei) := $(riscv-march-y)_zicsr_zifencei
|
||||
|
||||
# Check if the toolchain supports Zicbom extension
|
||||
toolchain-supports-zicbom := $(call cc-option-yn, -march=$(riscv-march-y)_zicbom)
|
||||
riscv-march-$(toolchain-supports-zicbom) := $(riscv-march-y)_zicbom
|
||||
riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZICBOM) := $(riscv-march-y)_zicbom
|
||||
|
||||
# Check if the toolchain supports Zihintpause extension
|
||||
toolchain-supports-zihintpause := $(call cc-option-yn, -march=$(riscv-march-y)_zihintpause)
|
||||
riscv-march-$(toolchain-supports-zihintpause) := $(riscv-march-y)_zihintpause
|
||||
riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE) := $(riscv-march-y)_zihintpause
|
||||
|
||||
KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y))
|
||||
KBUILD_AFLAGS += -march=$(riscv-march-y)
|
||||
|
@ -14,8 +14,8 @@
|
||||
|
||||
#define JUMP_LABEL_NOP_SIZE 4
|
||||
|
||||
static __always_inline bool arch_static_branch(struct static_key *key,
|
||||
bool branch)
|
||||
static __always_inline bool arch_static_branch(struct static_key * const key,
|
||||
const bool branch)
|
||||
{
|
||||
asm_volatile_goto(
|
||||
" .option push \n\t"
|
||||
@ -35,8 +35,8 @@ label:
|
||||
return true;
|
||||
}
|
||||
|
||||
static __always_inline bool arch_static_branch_jump(struct static_key *key,
|
||||
bool branch)
|
||||
static __always_inline bool arch_static_branch_jump(struct static_key * const key,
|
||||
const bool branch)
|
||||
{
|
||||
asm_volatile_goto(
|
||||
" .option push \n\t"
|
||||
|
@ -21,7 +21,7 @@ static inline void cpu_relax(void)
|
||||
* Reduce instruction retirement.
|
||||
* This assumes the PC changes.
|
||||
*/
|
||||
#ifdef __riscv_zihintpause
|
||||
#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE
|
||||
__asm__ __volatile__ ("pause");
|
||||
#else
|
||||
/* Encoding of the pause instruction */
|
||||
|
@ -213,6 +213,9 @@ static void print_mmu(struct seq_file *f)
|
||||
|
||||
static void *c_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
if (*pos == nr_cpu_ids)
|
||||
return NULL;
|
||||
|
||||
*pos = cpumask_next(*pos - 1, cpu_online_mask);
|
||||
if ((*pos) < nr_cpu_ids)
|
||||
return (void *)(uintptr_t)(1 + *pos);
|
||||
|
@ -113,6 +113,8 @@ static void __init kasan_populate_pud(pgd_t *pgd,
|
||||
base_pud = pt_ops.get_pud_virt(pfn_to_phys(_pgd_pfn(*pgd)));
|
||||
} else if (pgd_none(*pgd)) {
|
||||
base_pud = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
|
||||
memcpy(base_pud, (void *)kasan_early_shadow_pud,
|
||||
sizeof(pud_t) * PTRS_PER_PUD);
|
||||
} else {
|
||||
base_pud = (pud_t *)pgd_page_vaddr(*pgd);
|
||||
if (base_pud == lm_alias(kasan_early_shadow_pud)) {
|
||||
@ -173,8 +175,11 @@ static void __init kasan_populate_p4d(pgd_t *pgd,
|
||||
base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgd)));
|
||||
} else {
|
||||
base_p4d = (p4d_t *)pgd_page_vaddr(*pgd);
|
||||
if (base_p4d == lm_alias(kasan_early_shadow_p4d))
|
||||
if (base_p4d == lm_alias(kasan_early_shadow_p4d)) {
|
||||
base_p4d = memblock_alloc(PTRS_PER_PUD * sizeof(p4d_t), PAGE_SIZE);
|
||||
memcpy(base_p4d, (void *)kasan_early_shadow_p4d,
|
||||
sizeof(p4d_t) * PTRS_PER_P4D);
|
||||
}
|
||||
}
|
||||
|
||||
p4dp = base_p4d + p4d_index(vaddr);
|
||||
|
@ -102,8 +102,17 @@ SECTIONS
|
||||
_compressed_start = .;
|
||||
*(.vmlinux.bin.compressed)
|
||||
_compressed_end = .;
|
||||
FILL(0xff);
|
||||
. = ALIGN(4096);
|
||||
}
|
||||
|
||||
#define SB_TRAILER_SIZE 32
|
||||
/* Trailer needed for Secure Boot */
|
||||
. += SB_TRAILER_SIZE; /* make sure .sb.trailer does not overwrite the previous section */
|
||||
. = ALIGN(4096) - SB_TRAILER_SIZE;
|
||||
.sb.trailer : {
|
||||
QUAD(0)
|
||||
QUAD(0)
|
||||
QUAD(0)
|
||||
QUAD(0x000000207a49504c)
|
||||
}
|
||||
_end = .;
|
||||
|
||||
|
@ -17,7 +17,8 @@
|
||||
"3: jl 1b\n" \
|
||||
" lhi %0,0\n" \
|
||||
"4: sacf 768\n" \
|
||||
EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
|
||||
EX_TABLE(0b,4b) EX_TABLE(1b,4b) \
|
||||
EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
|
||||
: "=d" (ret), "=&d" (oldval), "=&d" (newval), \
|
||||
"=m" (*uaddr) \
|
||||
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
|
||||
|
@ -459,6 +459,7 @@ static int paiext_push_sample(void)
|
||||
raw.frag.data = cpump->save;
|
||||
raw.size = raw.frag.size;
|
||||
data.raw = &raw;
|
||||
data.sample_flags |= PERF_SAMPLE_RAW;
|
||||
}
|
||||
|
||||
overflow = perf_event_overflow(event, &data, ®s);
|
||||
|
@ -157,7 +157,7 @@ unsigned long __clear_user(void __user *to, unsigned long size)
|
||||
asm volatile(
|
||||
" lr 0,%[spec]\n"
|
||||
"0: mvcos 0(%1),0(%4),%0\n"
|
||||
" jz 4f\n"
|
||||
"6: jz 4f\n"
|
||||
"1: algr %0,%2\n"
|
||||
" slgr %1,%2\n"
|
||||
" j 0b\n"
|
||||
@ -167,11 +167,11 @@ unsigned long __clear_user(void __user *to, unsigned long size)
|
||||
" clgr %0,%3\n" /* copy crosses next page boundary? */
|
||||
" jnh 5f\n"
|
||||
"3: mvcos 0(%1),0(%4),%3\n"
|
||||
" slgr %0,%3\n"
|
||||
"7: slgr %0,%3\n"
|
||||
" j 5f\n"
|
||||
"4: slgr %0,%0\n"
|
||||
"5:\n"
|
||||
EX_TABLE(0b,2b) EX_TABLE(3b,5b)
|
||||
EX_TABLE(0b,2b) EX_TABLE(6b,2b) EX_TABLE(3b,5b) EX_TABLE(7b,5b)
|
||||
: "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
|
||||
: "a" (empty_zero_page), [spec] "d" (spec.val)
|
||||
: "cc", "memory", "0");
|
||||
|
@ -64,7 +64,7 @@ static inline int __pcistg_mio_inuser(
|
||||
asm volatile (
|
||||
" sacf 256\n"
|
||||
"0: llgc %[tmp],0(%[src])\n"
|
||||
" sllg %[val],%[val],8\n"
|
||||
"4: sllg %[val],%[val],8\n"
|
||||
" aghi %[src],1\n"
|
||||
" ogr %[val],%[tmp]\n"
|
||||
" brctg %[cnt],0b\n"
|
||||
@ -72,7 +72,7 @@ static inline int __pcistg_mio_inuser(
|
||||
"2: ipm %[cc]\n"
|
||||
" srl %[cc],28\n"
|
||||
"3: sacf 768\n"
|
||||
EX_TABLE(0b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b)
|
||||
EX_TABLE(0b, 3b) EX_TABLE(4b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b)
|
||||
:
|
||||
[src] "+a" (src), [cnt] "+d" (cnt),
|
||||
[val] "+d" (val), [tmp] "=d" (tmp),
|
||||
@ -215,10 +215,10 @@ static inline int __pcilg_mio_inuser(
|
||||
"2: ahi %[shift],-8\n"
|
||||
" srlg %[tmp],%[val],0(%[shift])\n"
|
||||
"3: stc %[tmp],0(%[dst])\n"
|
||||
" aghi %[dst],1\n"
|
||||
"5: aghi %[dst],1\n"
|
||||
" brctg %[cnt],2b\n"
|
||||
"4: sacf 768\n"
|
||||
EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b)
|
||||
EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) EX_TABLE(5b, 4b)
|
||||
:
|
||||
[ioaddr_len] "+&d" (ioaddr_len.pair),
|
||||
[cc] "+d" (cc), [val] "=d" (val),
|
||||
|
@ -27,13 +27,17 @@
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/simd.h>
|
||||
|
||||
#define POLYVAL_ALIGN 16
|
||||
#define POLYVAL_ALIGN_ATTR __aligned(POLYVAL_ALIGN)
|
||||
#define POLYVAL_ALIGN_EXTRA ((POLYVAL_ALIGN - 1) & ~(CRYPTO_MINALIGN - 1))
|
||||
#define POLYVAL_CTX_SIZE (sizeof(struct polyval_tfm_ctx) + POLYVAL_ALIGN_EXTRA)
|
||||
#define NUM_KEY_POWERS 8
|
||||
|
||||
struct polyval_tfm_ctx {
|
||||
/*
|
||||
* These powers must be in the order h^8, ..., h^1.
|
||||
*/
|
||||
u8 key_powers[NUM_KEY_POWERS][POLYVAL_BLOCK_SIZE];
|
||||
u8 key_powers[NUM_KEY_POWERS][POLYVAL_BLOCK_SIZE] POLYVAL_ALIGN_ATTR;
|
||||
};
|
||||
|
||||
struct polyval_desc_ctx {
|
||||
@ -45,6 +49,11 @@ asmlinkage void clmul_polyval_update(const struct polyval_tfm_ctx *keys,
|
||||
const u8 *in, size_t nblocks, u8 *accumulator);
|
||||
asmlinkage void clmul_polyval_mul(u8 *op1, const u8 *op2);
|
||||
|
||||
static inline struct polyval_tfm_ctx *polyval_tfm_ctx(struct crypto_shash *tfm)
|
||||
{
|
||||
return PTR_ALIGN(crypto_shash_ctx(tfm), POLYVAL_ALIGN);
|
||||
}
|
||||
|
||||
static void internal_polyval_update(const struct polyval_tfm_ctx *keys,
|
||||
const u8 *in, size_t nblocks, u8 *accumulator)
|
||||
{
|
||||
@ -72,7 +81,7 @@ static void internal_polyval_mul(u8 *op1, const u8 *op2)
|
||||
static int polyval_x86_setkey(struct crypto_shash *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
{
|
||||
struct polyval_tfm_ctx *tctx = crypto_shash_ctx(tfm);
|
||||
struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(tfm);
|
||||
int i;
|
||||
|
||||
if (keylen != POLYVAL_BLOCK_SIZE)
|
||||
@ -102,7 +111,7 @@ static int polyval_x86_update(struct shash_desc *desc,
|
||||
const u8 *src, unsigned int srclen)
|
||||
{
|
||||
struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
const struct polyval_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
|
||||
const struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(desc->tfm);
|
||||
u8 *pos;
|
||||
unsigned int nblocks;
|
||||
unsigned int n;
|
||||
@ -143,7 +152,7 @@ static int polyval_x86_update(struct shash_desc *desc,
|
||||
static int polyval_x86_final(struct shash_desc *desc, u8 *dst)
|
||||
{
|
||||
struct polyval_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
const struct polyval_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
|
||||
const struct polyval_tfm_ctx *tctx = polyval_tfm_ctx(desc->tfm);
|
||||
|
||||
if (dctx->bytes) {
|
||||
internal_polyval_mul(dctx->buffer,
|
||||
@ -167,7 +176,7 @@ static struct shash_alg polyval_alg = {
|
||||
.cra_driver_name = "polyval-clmulni",
|
||||
.cra_priority = 200,
|
||||
.cra_blocksize = POLYVAL_BLOCK_SIZE,
|
||||
.cra_ctxsize = sizeof(struct polyval_tfm_ctx),
|
||||
.cra_ctxsize = POLYVAL_CTX_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
},
|
||||
};
|
||||
|
@ -801,7 +801,7 @@ static void perf_ibs_get_mem_lvl(union ibs_op_data2 *op_data2,
|
||||
/* Extension Memory */
|
||||
if (ibs_caps & IBS_CAPS_ZEN4 &&
|
||||
ibs_data_src == IBS_DATA_SRC_EXT_EXT_MEM) {
|
||||
data_src->mem_lvl_num = PERF_MEM_LVLNUM_EXTN_MEM;
|
||||
data_src->mem_lvl_num = PERF_MEM_LVLNUM_CXL;
|
||||
if (op_data2->rmt_node) {
|
||||
data_src->mem_remote = PERF_MEM_REMOTE_REMOTE;
|
||||
/* IBS doesn't provide Remote socket detail */
|
||||
|
@ -806,7 +806,11 @@ static const struct x86_cpu_id rapl_model_match[] __initconst = {
|
||||
X86_MATCH_INTEL_FAM6_MODEL(COMETLAKE, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &model_spr),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &model_skl),
|
||||
X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &model_skl),
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(x86cpu, rapl_model_match);
|
||||
|
@ -10,10 +10,13 @@
|
||||
/* Even with __builtin_ the compiler may decide to use the out of line
|
||||
function. */
|
||||
|
||||
#if defined(__SANITIZE_MEMORY__) && defined(__NO_FORTIFY)
|
||||
#include <linux/kmsan_string.h>
|
||||
#endif
|
||||
|
||||
#define __HAVE_ARCH_MEMCPY 1
|
||||
#if defined(__SANITIZE_MEMORY__)
|
||||
#if defined(__SANITIZE_MEMORY__) && defined(__NO_FORTIFY)
|
||||
#undef memcpy
|
||||
void *__msan_memcpy(void *dst, const void *src, size_t size);
|
||||
#define memcpy __msan_memcpy
|
||||
#else
|
||||
extern void *memcpy(void *to, const void *from, size_t len);
|
||||
@ -21,7 +24,7 @@ extern void *memcpy(void *to, const void *from, size_t len);
|
||||
extern void *__memcpy(void *to, const void *from, size_t len);
|
||||
|
||||
#define __HAVE_ARCH_MEMSET
|
||||
#if defined(__SANITIZE_MEMORY__)
|
||||
#if defined(__SANITIZE_MEMORY__) && defined(__NO_FORTIFY)
|
||||
extern void *__msan_memset(void *s, int c, size_t n);
|
||||
#undef memset
|
||||
#define memset __msan_memset
|
||||
@ -67,7 +70,7 @@ static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
|
||||
}
|
||||
|
||||
#define __HAVE_ARCH_MEMMOVE
|
||||
#if defined(__SANITIZE_MEMORY__)
|
||||
#if defined(__SANITIZE_MEMORY__) && defined(__NO_FORTIFY)
|
||||
#undef memmove
|
||||
void *__msan_memmove(void *dest, const void *src, size_t len);
|
||||
#define memmove __msan_memmove
|
||||
|
@ -254,24 +254,25 @@ extern void __put_user_nocheck_8(void);
|
||||
#define __put_user_size(x, ptr, size, label) \
|
||||
do { \
|
||||
__typeof__(*(ptr)) __x = (x); /* eval x once */ \
|
||||
__chk_user_ptr(ptr); \
|
||||
__typeof__(ptr) __ptr = (ptr); /* eval ptr once */ \
|
||||
__chk_user_ptr(__ptr); \
|
||||
switch (size) { \
|
||||
case 1: \
|
||||
__put_user_goto(__x, ptr, "b", "iq", label); \
|
||||
__put_user_goto(__x, __ptr, "b", "iq", label); \
|
||||
break; \
|
||||
case 2: \
|
||||
__put_user_goto(__x, ptr, "w", "ir", label); \
|
||||
__put_user_goto(__x, __ptr, "w", "ir", label); \
|
||||
break; \
|
||||
case 4: \
|
||||
__put_user_goto(__x, ptr, "l", "ir", label); \
|
||||
__put_user_goto(__x, __ptr, "l", "ir", label); \
|
||||
break; \
|
||||
case 8: \
|
||||
__put_user_goto_u64(__x, ptr, label); \
|
||||
__put_user_goto_u64(__x, __ptr, label); \
|
||||
break; \
|
||||
default: \
|
||||
__put_user_bad(); \
|
||||
} \
|
||||
instrument_put_user(__x, ptr, size); \
|
||||
instrument_put_user(__x, __ptr, size); \
|
||||
} while (0)
|
||||
|
||||
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
|
||||
|
@ -1133,11 +1133,13 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
||||
entry->eax = max(entry->eax, 0x80000021);
|
||||
break;
|
||||
case 0x80000001:
|
||||
entry->ebx &= ~GENMASK(27, 16);
|
||||
cpuid_entry_override(entry, CPUID_8000_0001_EDX);
|
||||
cpuid_entry_override(entry, CPUID_8000_0001_ECX);
|
||||
break;
|
||||
case 0x80000006:
|
||||
/* L2 cache and TLB: pass through host info. */
|
||||
/* Drop reserved bits, pass host L2 cache and TLB info. */
|
||||
entry->edx &= ~GENMASK(17, 16);
|
||||
break;
|
||||
case 0x80000007: /* Advanced power management */
|
||||
/* invariant TSC is CPUID.80000007H:EDX[8] */
|
||||
@ -1167,6 +1169,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
||||
g_phys_as = phys_as;
|
||||
|
||||
entry->eax = g_phys_as | (virt_as << 8);
|
||||
entry->ecx &= ~(GENMASK(31, 16) | GENMASK(11, 8));
|
||||
entry->edx = 0;
|
||||
cpuid_entry_override(entry, CPUID_8000_0008_EBX);
|
||||
break;
|
||||
@ -1186,6 +1189,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
||||
entry->ecx = entry->edx = 0;
|
||||
break;
|
||||
case 0x8000001a:
|
||||
entry->eax &= GENMASK(2, 0);
|
||||
entry->ebx = entry->ecx = entry->edx = 0;
|
||||
break;
|
||||
case 0x8000001e:
|
||||
break;
|
||||
case 0x8000001F:
|
||||
@ -1193,7 +1199,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
||||
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
|
||||
} else {
|
||||
cpuid_entry_override(entry, CPUID_8000_001F_EAX);
|
||||
|
||||
/* Clear NumVMPL since KVM does not support VMPL. */
|
||||
entry->ebx &= ~GENMASK(31, 12);
|
||||
/*
|
||||
* Enumerate '0' for "PA bits reduction", the adjusted
|
||||
* MAXPHYADDR is enumerated directly (see 0x80000008).
|
||||
|
@ -158,11 +158,16 @@ out:
|
||||
static int kvm_mmu_rmaps_stat_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
struct kvm *kvm = inode->i_private;
|
||||
int r;
|
||||
|
||||
if (!kvm_get_kvm_safe(kvm))
|
||||
return -ENOENT;
|
||||
|
||||
return single_open(file, kvm_mmu_rmaps_stat_show, kvm);
|
||||
r = single_open(file, kvm_mmu_rmaps_stat_show, kvm);
|
||||
if (r < 0)
|
||||
kvm_put_kvm(kvm);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static int kvm_mmu_rmaps_stat_release(struct inode *inode, struct file *file)
|
||||
|
@ -791,8 +791,7 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
|
||||
ctxt->mode, linear);
|
||||
}
|
||||
|
||||
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
|
||||
enum x86emul_mode mode)
|
||||
static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst)
|
||||
{
|
||||
ulong linear;
|
||||
int rc;
|
||||
@ -802,41 +801,71 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
|
||||
|
||||
if (ctxt->op_bytes != sizeof(unsigned long))
|
||||
addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
|
||||
rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
|
||||
rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear);
|
||||
if (rc == X86EMUL_CONTINUE)
|
||||
ctxt->_eip = addr.ea;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
|
||||
static inline int emulator_recalc_and_set_mode(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
return assign_eip(ctxt, dst, ctxt->mode);
|
||||
u64 efer;
|
||||
struct desc_struct cs;
|
||||
u16 selector;
|
||||
u32 base3;
|
||||
|
||||
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
|
||||
|
||||
if (!(ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PE)) {
|
||||
/* Real mode. cpu must not have long mode active */
|
||||
if (efer & EFER_LMA)
|
||||
return X86EMUL_UNHANDLEABLE;
|
||||
ctxt->mode = X86EMUL_MODE_REAL;
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
if (ctxt->eflags & X86_EFLAGS_VM) {
|
||||
/* Protected/VM86 mode. cpu must not have long mode active */
|
||||
if (efer & EFER_LMA)
|
||||
return X86EMUL_UNHANDLEABLE;
|
||||
ctxt->mode = X86EMUL_MODE_VM86;
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
if (!ctxt->ops->get_segment(ctxt, &selector, &cs, &base3, VCPU_SREG_CS))
|
||||
return X86EMUL_UNHANDLEABLE;
|
||||
|
||||
if (efer & EFER_LMA) {
|
||||
if (cs.l) {
|
||||
/* Proper long mode */
|
||||
ctxt->mode = X86EMUL_MODE_PROT64;
|
||||
} else if (cs.d) {
|
||||
/* 32 bit compatibility mode*/
|
||||
ctxt->mode = X86EMUL_MODE_PROT32;
|
||||
} else {
|
||||
ctxt->mode = X86EMUL_MODE_PROT16;
|
||||
}
|
||||
} else {
|
||||
/* Legacy 32 bit / 16 bit mode */
|
||||
ctxt->mode = cs.d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
|
||||
}
|
||||
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
|
||||
const struct desc_struct *cs_desc)
|
||||
static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
|
||||
{
|
||||
enum x86emul_mode mode = ctxt->mode;
|
||||
int rc;
|
||||
return assign_eip(ctxt, dst);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if (ctxt->mode >= X86EMUL_MODE_PROT16) {
|
||||
if (cs_desc->l) {
|
||||
u64 efer = 0;
|
||||
static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst)
|
||||
{
|
||||
int rc = emulator_recalc_and_set_mode(ctxt);
|
||||
|
||||
ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
|
||||
if (efer & EFER_LMA)
|
||||
mode = X86EMUL_MODE_PROT64;
|
||||
} else
|
||||
mode = X86EMUL_MODE_PROT32; /* temporary value */
|
||||
}
|
||||
#endif
|
||||
if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
|
||||
mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
|
||||
rc = assign_eip(ctxt, dst, mode);
|
||||
if (rc == X86EMUL_CONTINUE)
|
||||
ctxt->mode = mode;
|
||||
return rc;
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
|
||||
return assign_eip(ctxt, dst);
|
||||
}
|
||||
|
||||
static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
|
||||
@ -2172,7 +2201,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
|
||||
rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
|
||||
rc = assign_eip_far(ctxt, ctxt->src.val);
|
||||
/* Error handling is not implemented. */
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return X86EMUL_UNHANDLEABLE;
|
||||
@ -2250,7 +2279,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
|
||||
&new_desc);
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
rc = assign_eip_far(ctxt, eip, &new_desc);
|
||||
rc = assign_eip_far(ctxt, eip);
|
||||
/* Error handling is not implemented. */
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return X86EMUL_UNHANDLEABLE;
|
||||
@ -2432,7 +2461,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
|
||||
ctxt->eflags = GET_SMSTATE(u32, smstate, 0x7ff4) | X86_EFLAGS_FIXED;
|
||||
ctxt->_eip = GET_SMSTATE(u32, smstate, 0x7ff0);
|
||||
|
||||
for (i = 0; i < NR_EMULATOR_GPRS; i++)
|
||||
for (i = 0; i < 8; i++)
|
||||
*reg_write(ctxt, i) = GET_SMSTATE(u32, smstate, 0x7fd0 + i * 4);
|
||||
|
||||
val = GET_SMSTATE(u32, smstate, 0x7fcc);
|
||||
@ -2489,7 +2518,7 @@ static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
|
||||
u16 selector;
|
||||
int i, r;
|
||||
|
||||
for (i = 0; i < NR_EMULATOR_GPRS; i++)
|
||||
for (i = 0; i < 16; i++)
|
||||
*reg_write(ctxt, i) = GET_SMSTATE(u64, smstate, 0x7ff8 - i * 8);
|
||||
|
||||
ctxt->_eip = GET_SMSTATE(u64, smstate, 0x7f78);
|
||||
@ -2633,7 +2662,7 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt)
|
||||
* those side effects need to be explicitly handled for both success
|
||||
* and shutdown.
|
||||
*/
|
||||
return X86EMUL_CONTINUE;
|
||||
return emulator_recalc_and_set_mode(ctxt);
|
||||
|
||||
emulate_shutdown:
|
||||
ctxt->ops->triple_fault(ctxt);
|
||||
@ -2876,6 +2905,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
|
||||
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
|
||||
|
||||
ctxt->_eip = rdx;
|
||||
ctxt->mode = usermode;
|
||||
*reg_write(ctxt, VCPU_REGS_RSP) = rcx;
|
||||
|
||||
return X86EMUL_CONTINUE;
|
||||
@ -3469,7 +3499,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
return rc;
|
||||
|
||||
rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
|
||||
rc = assign_eip_far(ctxt, ctxt->src.val);
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
goto fail;
|
||||
|
||||
@ -3611,11 +3641,25 @@ static int em_movbe(struct x86_emulate_ctxt *ctxt)
|
||||
|
||||
static int em_cr_write(struct x86_emulate_ctxt *ctxt)
|
||||
{
|
||||
if (ctxt->ops->set_cr(ctxt, ctxt->modrm_reg, ctxt->src.val))
|
||||
int cr_num = ctxt->modrm_reg;
|
||||
int r;
|
||||
|
||||
if (ctxt->ops->set_cr(ctxt, cr_num, ctxt->src.val))
|
||||
return emulate_gp(ctxt, 0);
|
||||
|
||||
/* Disable writeback. */
|
||||
ctxt->dst.type = OP_NONE;
|
||||
|
||||
if (cr_num == 0) {
|
||||
/*
|
||||
* CR0 write might have updated CR0.PE and/or CR0.PG
|
||||
* which can affect the cpu's execution mode.
|
||||
*/
|
||||
r = emulator_recalc_and_set_mode(ctxt);
|
||||
if (r != X86EMUL_CONTINUE)
|
||||
return r;
|
||||
}
|
||||
|
||||
return X86EMUL_CONTINUE;
|
||||
}
|
||||
|
||||
|
@ -8263,6 +8263,11 @@ static __init int hardware_setup(void)
|
||||
if (!cpu_has_virtual_nmis())
|
||||
enable_vnmi = 0;
|
||||
|
||||
#ifdef CONFIG_X86_SGX_KVM
|
||||
if (!cpu_has_vmx_encls_vmexit())
|
||||
enable_sgx = false;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* set_apic_access_page_addr() is used to reload apic access
|
||||
* page upon invalidation. No need to do anything if not
|
||||
|
@ -2315,11 +2315,11 @@ static void kvm_write_system_time(struct kvm_vcpu *vcpu, gpa_t system_time,
|
||||
|
||||
/* we verify if the enable bit is set... */
|
||||
if (system_time & 1) {
|
||||
kvm_gfn_to_pfn_cache_init(vcpu->kvm, &vcpu->arch.pv_time, vcpu,
|
||||
KVM_HOST_USES_PFN, system_time & ~1ULL,
|
||||
sizeof(struct pvclock_vcpu_time_info));
|
||||
kvm_gpc_activate(vcpu->kvm, &vcpu->arch.pv_time, vcpu,
|
||||
KVM_HOST_USES_PFN, system_time & ~1ULL,
|
||||
sizeof(struct pvclock_vcpu_time_info));
|
||||
} else {
|
||||
kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, &vcpu->arch.pv_time);
|
||||
kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.pv_time);
|
||||
}
|
||||
|
||||
return;
|
||||
@ -3388,7 +3388,7 @@ static int kvm_pv_enable_async_pf_int(struct kvm_vcpu *vcpu, u64 data)
|
||||
|
||||
static void kvmclock_reset(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, &vcpu->arch.pv_time);
|
||||
kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.pv_time);
|
||||
vcpu->arch.time = 0;
|
||||
}
|
||||
|
||||
@ -10044,7 +10044,20 @@ static int kvm_check_and_inject_events(struct kvm_vcpu *vcpu,
|
||||
kvm_x86_ops.nested_ops->has_events(vcpu))
|
||||
*req_immediate_exit = true;
|
||||
|
||||
WARN_ON(kvm_is_exception_pending(vcpu));
|
||||
/*
|
||||
* KVM must never queue a new exception while injecting an event; KVM
|
||||
* is done emulating and should only propagate the to-be-injected event
|
||||
* to the VMCS/VMCB. Queueing a new exception can put the vCPU into an
|
||||
* infinite loop as KVM will bail from VM-Enter to inject the pending
|
||||
* exception and start the cycle all over.
|
||||
*
|
||||
* Exempt triple faults as they have special handling and won't put the
|
||||
* vCPU into an infinite loop. Triple fault can be queued when running
|
||||
* VMX without unrestricted guest, as that requires KVM to emulate Real
|
||||
* Mode events (see kvm_inject_realmode_interrupt()).
|
||||
*/
|
||||
WARN_ON_ONCE(vcpu->arch.exception.pending ||
|
||||
vcpu->arch.exception_vmexit.pending);
|
||||
return 0;
|
||||
|
||||
out:
|
||||
@ -11816,6 +11829,8 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.regs_avail = ~0;
|
||||
vcpu->arch.regs_dirty = ~0;
|
||||
|
||||
kvm_gpc_init(&vcpu->arch.pv_time);
|
||||
|
||||
if (!irqchip_in_kernel(vcpu->kvm) || kvm_vcpu_is_reset_bsp(vcpu))
|
||||
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
||||
else
|
||||
|
@ -42,13 +42,13 @@ static int kvm_xen_shared_info_init(struct kvm *kvm, gfn_t gfn)
|
||||
int idx = srcu_read_lock(&kvm->srcu);
|
||||
|
||||
if (gfn == GPA_INVALID) {
|
||||
kvm_gfn_to_pfn_cache_destroy(kvm, gpc);
|
||||
kvm_gpc_deactivate(kvm, gpc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
do {
|
||||
ret = kvm_gfn_to_pfn_cache_init(kvm, gpc, NULL, KVM_HOST_USES_PFN,
|
||||
gpa, PAGE_SIZE);
|
||||
ret = kvm_gpc_activate(kvm, gpc, NULL, KVM_HOST_USES_PFN, gpa,
|
||||
PAGE_SIZE);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@ -554,15 +554,15 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
||||
offsetof(struct compat_vcpu_info, time));
|
||||
|
||||
if (data->u.gpa == GPA_INVALID) {
|
||||
kvm_gfn_to_pfn_cache_destroy(vcpu->kvm, &vcpu->arch.xen.vcpu_info_cache);
|
||||
kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.vcpu_info_cache);
|
||||
r = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
r = kvm_gfn_to_pfn_cache_init(vcpu->kvm,
|
||||
&vcpu->arch.xen.vcpu_info_cache,
|
||||
NULL, KVM_HOST_USES_PFN, data->u.gpa,
|
||||
sizeof(struct vcpu_info));
|
||||
r = kvm_gpc_activate(vcpu->kvm,
|
||||
&vcpu->arch.xen.vcpu_info_cache, NULL,
|
||||
KVM_HOST_USES_PFN, data->u.gpa,
|
||||
sizeof(struct vcpu_info));
|
||||
if (!r)
|
||||
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
|
||||
|
||||
@ -570,16 +570,16 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
||||
|
||||
case KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO:
|
||||
if (data->u.gpa == GPA_INVALID) {
|
||||
kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
|
||||
&vcpu->arch.xen.vcpu_time_info_cache);
|
||||
kvm_gpc_deactivate(vcpu->kvm,
|
||||
&vcpu->arch.xen.vcpu_time_info_cache);
|
||||
r = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
r = kvm_gfn_to_pfn_cache_init(vcpu->kvm,
|
||||
&vcpu->arch.xen.vcpu_time_info_cache,
|
||||
NULL, KVM_HOST_USES_PFN, data->u.gpa,
|
||||
sizeof(struct pvclock_vcpu_time_info));
|
||||
r = kvm_gpc_activate(vcpu->kvm,
|
||||
&vcpu->arch.xen.vcpu_time_info_cache,
|
||||
NULL, KVM_HOST_USES_PFN, data->u.gpa,
|
||||
sizeof(struct pvclock_vcpu_time_info));
|
||||
if (!r)
|
||||
kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
|
||||
break;
|
||||
@ -590,16 +590,15 @@ int kvm_xen_vcpu_set_attr(struct kvm_vcpu *vcpu, struct kvm_xen_vcpu_attr *data)
|
||||
break;
|
||||
}
|
||||
if (data->u.gpa == GPA_INVALID) {
|
||||
kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
|
||||
&vcpu->arch.xen.runstate_cache);
|
||||
kvm_gpc_deactivate(vcpu->kvm,
|
||||
&vcpu->arch.xen.runstate_cache);
|
||||
r = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
r = kvm_gfn_to_pfn_cache_init(vcpu->kvm,
|
||||
&vcpu->arch.xen.runstate_cache,
|
||||
NULL, KVM_HOST_USES_PFN, data->u.gpa,
|
||||
sizeof(struct vcpu_runstate_info));
|
||||
r = kvm_gpc_activate(vcpu->kvm, &vcpu->arch.xen.runstate_cache,
|
||||
NULL, KVM_HOST_USES_PFN, data->u.gpa,
|
||||
sizeof(struct vcpu_runstate_info));
|
||||
break;
|
||||
|
||||
case KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT:
|
||||
@ -1667,18 +1666,18 @@ static int kvm_xen_eventfd_assign(struct kvm *kvm,
|
||||
case EVTCHNSTAT_ipi:
|
||||
/* IPI must map back to the same port# */
|
||||
if (data->u.evtchn.deliver.port.port != data->u.evtchn.send_port)
|
||||
goto out; /* -EINVAL */
|
||||
goto out_noeventfd; /* -EINVAL */
|
||||
break;
|
||||
|
||||
case EVTCHNSTAT_interdomain:
|
||||
if (data->u.evtchn.deliver.port.port) {
|
||||
if (data->u.evtchn.deliver.port.port >= max_evtchn_port(kvm))
|
||||
goto out; /* -EINVAL */
|
||||
goto out_noeventfd; /* -EINVAL */
|
||||
} else {
|
||||
eventfd = eventfd_ctx_fdget(data->u.evtchn.deliver.eventfd.fd);
|
||||
if (IS_ERR(eventfd)) {
|
||||
ret = PTR_ERR(eventfd);
|
||||
goto out;
|
||||
goto out_noeventfd;
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -1718,6 +1717,7 @@ static int kvm_xen_eventfd_assign(struct kvm *kvm,
|
||||
out:
|
||||
if (eventfd)
|
||||
eventfd_ctx_put(eventfd);
|
||||
out_noeventfd:
|
||||
kfree(evtchnfd);
|
||||
return ret;
|
||||
}
|
||||
@ -1816,7 +1816,12 @@ void kvm_xen_init_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.xen.vcpu_id = vcpu->vcpu_idx;
|
||||
vcpu->arch.xen.poll_evtchn = 0;
|
||||
|
||||
timer_setup(&vcpu->arch.xen.poll_timer, cancel_evtchn_poll, 0);
|
||||
|
||||
kvm_gpc_init(&vcpu->arch.xen.runstate_cache);
|
||||
kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache);
|
||||
kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache);
|
||||
}
|
||||
|
||||
void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
|
||||
@ -1824,18 +1829,17 @@ void kvm_xen_destroy_vcpu(struct kvm_vcpu *vcpu)
|
||||
if (kvm_xen_timer_enabled(vcpu))
|
||||
kvm_xen_stop_timer(vcpu);
|
||||
|
||||
kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
|
||||
&vcpu->arch.xen.runstate_cache);
|
||||
kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
|
||||
&vcpu->arch.xen.vcpu_info_cache);
|
||||
kvm_gfn_to_pfn_cache_destroy(vcpu->kvm,
|
||||
&vcpu->arch.xen.vcpu_time_info_cache);
|
||||
kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.runstate_cache);
|
||||
kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.vcpu_info_cache);
|
||||
kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.vcpu_time_info_cache);
|
||||
|
||||
del_timer_sync(&vcpu->arch.xen.poll_timer);
|
||||
}
|
||||
|
||||
void kvm_xen_init_vm(struct kvm *kvm)
|
||||
{
|
||||
idr_init(&kvm->arch.xen.evtchn_ports);
|
||||
kvm_gpc_init(&kvm->arch.xen.shinfo_cache);
|
||||
}
|
||||
|
||||
void kvm_xen_destroy_vm(struct kvm *kvm)
|
||||
@ -1843,7 +1847,7 @@ void kvm_xen_destroy_vm(struct kvm *kvm)
|
||||
struct evtchnfd *evtchnfd;
|
||||
int i;
|
||||
|
||||
kvm_gfn_to_pfn_cache_destroy(kvm, &kvm->arch.xen.shinfo_cache);
|
||||
kvm_gpc_deactivate(kvm, &kvm->arch.xen.shinfo_cache);
|
||||
|
||||
idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) {
|
||||
if (!evtchnfd->deliver.port.port)
|
||||
|
@ -26,6 +26,7 @@ GCOV_PROFILE := n
|
||||
KASAN_SANITIZE := n
|
||||
UBSAN_SANITIZE := n
|
||||
KCSAN_SANITIZE := n
|
||||
KMSAN_SANITIZE := n
|
||||
KCOV_INSTRUMENT := n
|
||||
|
||||
# These are adjustments to the compiler flags used for objects that
|
||||
|
@ -611,6 +611,7 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
|
||||
.nr_tags = 1,
|
||||
};
|
||||
u64 alloc_time_ns = 0;
|
||||
struct request *rq;
|
||||
unsigned int cpu;
|
||||
unsigned int tag;
|
||||
int ret;
|
||||
@ -660,8 +661,12 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q,
|
||||
tag = blk_mq_get_tag(&data);
|
||||
if (tag == BLK_MQ_NO_TAG)
|
||||
goto out_queue_exit;
|
||||
return blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag,
|
||||
rq = blk_mq_rq_ctx_init(&data, blk_mq_tags_from_data(&data), tag,
|
||||
alloc_time_ns);
|
||||
rq->__data_len = 0;
|
||||
rq->__sector = (sector_t) -1;
|
||||
rq->bio = rq->biotail = NULL;
|
||||
return rq;
|
||||
|
||||
out_queue_exit:
|
||||
blk_queue_exit(q);
|
||||
|
@ -410,9 +410,10 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
|
||||
* Otherwise just allocate the device numbers for both the whole device
|
||||
* and all partitions from the extended dev_t space.
|
||||
*/
|
||||
ret = -EINVAL;
|
||||
if (disk->major) {
|
||||
if (WARN_ON(!disk->minors))
|
||||
return -EINVAL;
|
||||
goto out_exit_elevator;
|
||||
|
||||
if (disk->minors > DISK_MAX_PARTS) {
|
||||
pr_err("block: can't allocate more than %d partitions\n",
|
||||
@ -420,14 +421,14 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
|
||||
disk->minors = DISK_MAX_PARTS;
|
||||
}
|
||||
if (disk->first_minor + disk->minors > MINORMASK + 1)
|
||||
return -EINVAL;
|
||||
goto out_exit_elevator;
|
||||
} else {
|
||||
if (WARN_ON(disk->minors))
|
||||
return -EINVAL;
|
||||
goto out_exit_elevator;
|
||||
|
||||
ret = blk_alloc_ext_minor();
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto out_exit_elevator;
|
||||
disk->major = BLOCK_EXT_MAJOR;
|
||||
disk->first_minor = ret;
|
||||
}
|
||||
@ -540,6 +541,9 @@ out_device_del:
|
||||
out_free_ext_minor:
|
||||
if (disk->major == BLOCK_EXT_MAJOR)
|
||||
blk_free_ext_minor(disk->first_minor);
|
||||
out_exit_elevator:
|
||||
if (disk->queue->elevator)
|
||||
elevator_exit(disk->queue);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(device_add_disk);
|
||||
|
@ -27,7 +27,7 @@
|
||||
* Arbitrary retries in case the remote processor is slow to respond
|
||||
* to PCC commands
|
||||
*/
|
||||
#define PCC_CMD_WAIT_RETRIES_NUM 500
|
||||
#define PCC_CMD_WAIT_RETRIES_NUM 500ULL
|
||||
|
||||
struct pcc_data {
|
||||
struct pcc_mbox_chan *pcc_chan;
|
||||
|
@ -425,6 +425,13 @@ static const struct dmi_system_id asus_laptop[] = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "S5402ZA"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "Asus Vivobook S5602ZA",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@ -789,6 +789,7 @@ static bool acpi_info_matches_ids(struct acpi_device_info *info,
|
||||
static const char * const acpi_ignore_dep_ids[] = {
|
||||
"PNP0D80", /* Windows-compatible System Power Management Controller */
|
||||
"INT33BD", /* Intel Baytrail Mailbox Device */
|
||||
"LATT2021", /* Lattice FW Update Client Driver */
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -2952,6 +2952,10 @@ static int genpd_iterate_idle_states(struct device_node *dn,
|
||||
np = it.node;
|
||||
if (!of_match_node(idle_state_match, np))
|
||||
continue;
|
||||
|
||||
if (!of_device_is_available(np))
|
||||
continue;
|
||||
|
||||
if (states) {
|
||||
ret = genpd_parse_state(&states[i], np);
|
||||
if (ret) {
|
||||
|
@ -229,7 +229,7 @@ EXPORT_SYMBOL_GPL(device_property_read_string);
|
||||
* Find a given string in a string array and if it is found return the
|
||||
* index back.
|
||||
*
|
||||
* Return: %0 if the property was found (success),
|
||||
* Return: index, starting from %0, if the property was found (success),
|
||||
* %-EINVAL if given arguments are not valid,
|
||||
* %-ENODATA if the property does not have a value,
|
||||
* %-EPROTO if the property is not an array of strings,
|
||||
@ -450,7 +450,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_string);
|
||||
* Find a given string in a string array and if it is found return the
|
||||
* index back.
|
||||
*
|
||||
* Return: %0 if the property was found (success),
|
||||
* Return: index, starting from %0, if the property was found (success),
|
||||
* %-EINVAL if given arguments are not valid,
|
||||
* %-ENODATA if the property does not have a value,
|
||||
* %-EPROTO if the property is not an array of strings,
|
||||
|
@ -7222,8 +7222,10 @@ static int __init rbd_sysfs_init(void)
|
||||
int ret;
|
||||
|
||||
ret = device_register(&rbd_root_dev);
|
||||
if (ret < 0)
|
||||
if (ret < 0) {
|
||||
put_device(&rbd_root_dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = bus_register(&rbd_bus_type);
|
||||
if (ret < 0)
|
||||
|
@ -219,7 +219,7 @@ static void virtbt_rx_work(struct work_struct *work)
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
skb->len = len;
|
||||
skb_put(skb, len);
|
||||
virtbt_rx_handle(vbt, skb);
|
||||
|
||||
if (virtbt_add_inbuf(vbt) < 0)
|
||||
|
@ -791,13 +791,13 @@ void __init random_init_early(const char *command_line)
|
||||
#endif
|
||||
|
||||
for (i = 0, arch_bits = sizeof(entropy) * 8; i < ARRAY_SIZE(entropy);) {
|
||||
longs = arch_get_random_seed_longs(entropy, ARRAY_SIZE(entropy) - i);
|
||||
longs = arch_get_random_seed_longs_early(entropy, ARRAY_SIZE(entropy) - i);
|
||||
if (longs) {
|
||||
_mix_pool_bytes(entropy, sizeof(*entropy) * longs);
|
||||
i += longs;
|
||||
continue;
|
||||
}
|
||||
longs = arch_get_random_longs(entropy, ARRAY_SIZE(entropy) - i);
|
||||
longs = arch_get_random_longs_early(entropy, ARRAY_SIZE(entropy) - i);
|
||||
if (longs) {
|
||||
_mix_pool_bytes(entropy, sizeof(*entropy) * longs);
|
||||
i += longs;
|
||||
|
@ -232,34 +232,45 @@ static const enum counter_function quad8_count_functions_list[] = {
|
||||
COUNTER_FUNCTION_QUADRATURE_X4,
|
||||
};
|
||||
|
||||
static int quad8_function_get(const struct quad8 *const priv, const size_t id,
|
||||
enum counter_function *const function)
|
||||
{
|
||||
if (!priv->quadrature_mode[id]) {
|
||||
*function = COUNTER_FUNCTION_PULSE_DIRECTION;
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (priv->quadrature_scale[id]) {
|
||||
case 0:
|
||||
*function = COUNTER_FUNCTION_QUADRATURE_X1_A;
|
||||
return 0;
|
||||
case 1:
|
||||
*function = COUNTER_FUNCTION_QUADRATURE_X2_A;
|
||||
return 0;
|
||||
case 2:
|
||||
*function = COUNTER_FUNCTION_QUADRATURE_X4;
|
||||
return 0;
|
||||
default:
|
||||
/* should never reach this path */
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static int quad8_function_read(struct counter_device *counter,
|
||||
struct counter_count *count,
|
||||
enum counter_function *function)
|
||||
{
|
||||
struct quad8 *const priv = counter_priv(counter);
|
||||
const int id = count->id;
|
||||
unsigned long irqflags;
|
||||
int retval;
|
||||
|
||||
spin_lock_irqsave(&priv->lock, irqflags);
|
||||
|
||||
if (priv->quadrature_mode[id])
|
||||
switch (priv->quadrature_scale[id]) {
|
||||
case 0:
|
||||
*function = COUNTER_FUNCTION_QUADRATURE_X1_A;
|
||||
break;
|
||||
case 1:
|
||||
*function = COUNTER_FUNCTION_QUADRATURE_X2_A;
|
||||
break;
|
||||
case 2:
|
||||
*function = COUNTER_FUNCTION_QUADRATURE_X4;
|
||||
break;
|
||||
}
|
||||
else
|
||||
*function = COUNTER_FUNCTION_PULSE_DIRECTION;
|
||||
retval = quad8_function_get(priv, count->id, function);
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, irqflags);
|
||||
|
||||
return 0;
|
||||
return retval;
|
||||
}
|
||||
|
||||
static int quad8_function_write(struct counter_device *counter,
|
||||
@ -359,6 +370,7 @@ static int quad8_action_read(struct counter_device *counter,
|
||||
enum counter_synapse_action *action)
|
||||
{
|
||||
struct quad8 *const priv = counter_priv(counter);
|
||||
unsigned long irqflags;
|
||||
int err;
|
||||
enum counter_function function;
|
||||
const size_t signal_a_id = count->synapses[0].signal->id;
|
||||
@ -374,9 +386,21 @@ static int quad8_action_read(struct counter_device *counter,
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = quad8_function_read(counter, count, &function);
|
||||
if (err)
|
||||
spin_lock_irqsave(&priv->lock, irqflags);
|
||||
|
||||
/* Get Count function and direction atomically */
|
||||
err = quad8_function_get(priv, count->id, &function);
|
||||
if (err) {
|
||||
spin_unlock_irqrestore(&priv->lock, irqflags);
|
||||
return err;
|
||||
}
|
||||
err = quad8_direction_read(counter, count, &direction);
|
||||
if (err) {
|
||||
spin_unlock_irqrestore(&priv->lock, irqflags);
|
||||
return err;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, irqflags);
|
||||
|
||||
/* Default action mode */
|
||||
*action = COUNTER_SYNAPSE_ACTION_NONE;
|
||||
@ -389,10 +413,6 @@ static int quad8_action_read(struct counter_device *counter,
|
||||
return 0;
|
||||
case COUNTER_FUNCTION_QUADRATURE_X1_A:
|
||||
if (synapse->signal->id == signal_a_id) {
|
||||
err = quad8_direction_read(counter, count, &direction);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (direction == COUNTER_COUNT_DIRECTION_FORWARD)
|
||||
*action = COUNTER_SYNAPSE_ACTION_RISING_EDGE;
|
||||
else
|
||||
|
@ -28,7 +28,6 @@ struct mchp_tc_data {
|
||||
int qdec_mode;
|
||||
int num_channels;
|
||||
int channel[2];
|
||||
bool trig_inverted;
|
||||
};
|
||||
|
||||
static const enum counter_function mchp_tc_count_functions[] = {
|
||||
@ -153,7 +152,7 @@ static int mchp_tc_count_signal_read(struct counter_device *counter,
|
||||
|
||||
regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], SR), &sr);
|
||||
|
||||
if (priv->trig_inverted)
|
||||
if (signal->id == 1)
|
||||
sigstatus = (sr & ATMEL_TC_MTIOB);
|
||||
else
|
||||
sigstatus = (sr & ATMEL_TC_MTIOA);
|
||||
@ -171,6 +170,17 @@ static int mchp_tc_count_action_read(struct counter_device *counter,
|
||||
struct mchp_tc_data *const priv = counter_priv(counter);
|
||||
u32 cmr;
|
||||
|
||||
if (priv->qdec_mode) {
|
||||
*action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Only TIOA signal is evaluated in non-QDEC mode */
|
||||
if (synapse->signal->id != 0) {
|
||||
*action = COUNTER_SYNAPSE_ACTION_NONE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], CMR), &cmr);
|
||||
|
||||
switch (cmr & ATMEL_TC_ETRGEDG) {
|
||||
@ -199,8 +209,8 @@ static int mchp_tc_count_action_write(struct counter_device *counter,
|
||||
struct mchp_tc_data *const priv = counter_priv(counter);
|
||||
u32 edge = ATMEL_TC_ETRGEDG_NONE;
|
||||
|
||||
/* QDEC mode is rising edge only */
|
||||
if (priv->qdec_mode)
|
||||
/* QDEC mode is rising edge only; only TIOA handled in non-QDEC mode */
|
||||
if (priv->qdec_mode || synapse->signal->id != 0)
|
||||
return -EINVAL;
|
||||
|
||||
switch (action) {
|
||||
|
@ -377,7 +377,8 @@ static const enum counter_signal_polarity ecap_cnt_pol_avail[] = {
|
||||
COUNTER_SIGNAL_POLARITY_NEGATIVE,
|
||||
};
|
||||
|
||||
static DEFINE_COUNTER_ARRAY_POLARITY(ecap_cnt_pol_array, ecap_cnt_pol_avail, ECAP_NB_CEVT);
|
||||
static DEFINE_COUNTER_AVAILABLE(ecap_cnt_pol_available, ecap_cnt_pol_avail);
|
||||
static DEFINE_COUNTER_ARRAY_POLARITY(ecap_cnt_pol_array, ecap_cnt_pol_available, ECAP_NB_CEVT);
|
||||
|
||||
static struct counter_comp ecap_cnt_signal_ext[] = {
|
||||
COUNTER_COMP_ARRAY_POLARITY(ecap_cnt_pol_read, ecap_cnt_pol_write, ecap_cnt_pol_array),
|
||||
@ -479,8 +480,8 @@ static int ecap_cnt_probe(struct platform_device *pdev)
|
||||
int ret;
|
||||
|
||||
counter_dev = devm_counter_alloc(dev, sizeof(*ecap_dev));
|
||||
if (IS_ERR(counter_dev))
|
||||
return PTR_ERR(counter_dev);
|
||||
if (!counter_dev)
|
||||
return -ENOMEM;
|
||||
|
||||
counter_dev->name = ECAP_DRV_NAME;
|
||||
counter_dev->parent = dev;
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <linux/pm_qos.h>
|
||||
#include <trace/events/power.h>
|
||||
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/div64.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
@ -280,10 +281,10 @@ static struct cpudata **all_cpu_data;
|
||||
* structure is used to store those callbacks.
|
||||
*/
|
||||
struct pstate_funcs {
|
||||
int (*get_max)(void);
|
||||
int (*get_max_physical)(void);
|
||||
int (*get_min)(void);
|
||||
int (*get_turbo)(void);
|
||||
int (*get_max)(int cpu);
|
||||
int (*get_max_physical)(int cpu);
|
||||
int (*get_min)(int cpu);
|
||||
int (*get_turbo)(int cpu);
|
||||
int (*get_scaling)(void);
|
||||
int (*get_cpu_scaling)(int cpu);
|
||||
int (*get_aperf_mperf_shift)(void);
|
||||
@ -398,16 +399,6 @@ static int intel_pstate_get_cppc_guaranteed(int cpu)
|
||||
|
||||
return cppc_perf.nominal_perf;
|
||||
}
|
||||
|
||||
static u32 intel_pstate_cppc_nominal(int cpu)
|
||||
{
|
||||
u64 nominal_perf;
|
||||
|
||||
if (cppc_get_nominal_perf(cpu, &nominal_perf))
|
||||
return 0;
|
||||
|
||||
return nominal_perf;
|
||||
}
|
||||
#else /* CONFIG_ACPI_CPPC_LIB */
|
||||
static inline void intel_pstate_set_itmt_prio(int cpu)
|
||||
{
|
||||
@ -531,35 +522,18 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
|
||||
{
|
||||
int perf_ctl_max_phys = cpu->pstate.max_pstate_physical;
|
||||
int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
|
||||
int perf_ctl_turbo = pstate_funcs.get_turbo();
|
||||
int turbo_freq = perf_ctl_turbo * perf_ctl_scaling;
|
||||
int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu);
|
||||
int scaling = cpu->pstate.scaling;
|
||||
|
||||
pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
|
||||
pr_debug("CPU%d: perf_ctl_max = %d\n", cpu->cpu, pstate_funcs.get_max());
|
||||
pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
|
||||
pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling);
|
||||
pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate);
|
||||
pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate);
|
||||
pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling);
|
||||
|
||||
/*
|
||||
* If the product of the HWP performance scaling factor and the HWP_CAP
|
||||
* highest performance is greater than the maximum turbo frequency
|
||||
* corresponding to the pstate_funcs.get_turbo() return value, the
|
||||
* scaling factor is too high, so recompute it to make the HWP_CAP
|
||||
* highest performance correspond to the maximum turbo frequency.
|
||||
*/
|
||||
cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling;
|
||||
if (turbo_freq < cpu->pstate.turbo_freq) {
|
||||
cpu->pstate.turbo_freq = turbo_freq;
|
||||
scaling = DIV_ROUND_UP(turbo_freq, cpu->pstate.turbo_pstate);
|
||||
cpu->pstate.scaling = scaling;
|
||||
|
||||
pr_debug("CPU%d: refined HWP-to-frequency scaling factor: %d\n",
|
||||
cpu->cpu, scaling);
|
||||
}
|
||||
|
||||
cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_pstate * scaling,
|
||||
perf_ctl_scaling);
|
||||
cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
|
||||
perf_ctl_scaling);
|
||||
|
||||
@ -1740,7 +1714,7 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
|
||||
intel_pstate_update_epp_defaults(cpudata);
|
||||
}
|
||||
|
||||
static int atom_get_min_pstate(void)
|
||||
static int atom_get_min_pstate(int not_used)
|
||||
{
|
||||
u64 value;
|
||||
|
||||
@ -1748,7 +1722,7 @@ static int atom_get_min_pstate(void)
|
||||
return (value >> 8) & 0x7F;
|
||||
}
|
||||
|
||||
static int atom_get_max_pstate(void)
|
||||
static int atom_get_max_pstate(int not_used)
|
||||
{
|
||||
u64 value;
|
||||
|
||||
@ -1756,7 +1730,7 @@ static int atom_get_max_pstate(void)
|
||||
return (value >> 16) & 0x7F;
|
||||
}
|
||||
|
||||
static int atom_get_turbo_pstate(void)
|
||||
static int atom_get_turbo_pstate(int not_used)
|
||||
{
|
||||
u64 value;
|
||||
|
||||
@ -1834,23 +1808,23 @@ static void atom_get_vid(struct cpudata *cpudata)
|
||||
cpudata->vid.turbo = value & 0x7f;
|
||||
}
|
||||
|
||||
static int core_get_min_pstate(void)
|
||||
static int core_get_min_pstate(int cpu)
|
||||
{
|
||||
u64 value;
|
||||
|
||||
rdmsrl(MSR_PLATFORM_INFO, value);
|
||||
rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
|
||||
return (value >> 40) & 0xFF;
|
||||
}
|
||||
|
||||
static int core_get_max_pstate_physical(void)
|
||||
static int core_get_max_pstate_physical(int cpu)
|
||||
{
|
||||
u64 value;
|
||||
|
||||
rdmsrl(MSR_PLATFORM_INFO, value);
|
||||
rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value);
|
||||
return (value >> 8) & 0xFF;
|
||||
}
|
||||
|
||||
static int core_get_tdp_ratio(u64 plat_info)
|
||||
static int core_get_tdp_ratio(int cpu, u64 plat_info)
|
||||
{
|
||||
/* Check how many TDP levels present */
|
||||
if (plat_info & 0x600000000) {
|
||||
@ -1860,13 +1834,13 @@ static int core_get_tdp_ratio(u64 plat_info)
|
||||
int err;
|
||||
|
||||
/* Get the TDP level (0, 1, 2) to get ratios */
|
||||
err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
|
||||
err = rdmsrl_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* TDP MSR are continuous starting at 0x648 */
|
||||
tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03);
|
||||
err = rdmsrl_safe(tdp_msr, &tdp_ratio);
|
||||
err = rdmsrl_safe_on_cpu(cpu, tdp_msr, &tdp_ratio);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -1883,7 +1857,7 @@ static int core_get_tdp_ratio(u64 plat_info)
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static int core_get_max_pstate(void)
|
||||
static int core_get_max_pstate(int cpu)
|
||||
{
|
||||
u64 tar;
|
||||
u64 plat_info;
|
||||
@ -1891,10 +1865,10 @@ static int core_get_max_pstate(void)
|
||||
int tdp_ratio;
|
||||
int err;
|
||||
|
||||
rdmsrl(MSR_PLATFORM_INFO, plat_info);
|
||||
rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info);
|
||||
max_pstate = (plat_info >> 8) & 0xFF;
|
||||
|
||||
tdp_ratio = core_get_tdp_ratio(plat_info);
|
||||
tdp_ratio = core_get_tdp_ratio(cpu, plat_info);
|
||||
if (tdp_ratio <= 0)
|
||||
return max_pstate;
|
||||
|
||||
@ -1903,7 +1877,7 @@ static int core_get_max_pstate(void)
|
||||
return tdp_ratio;
|
||||
}
|
||||
|
||||
err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar);
|
||||
err = rdmsrl_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar);
|
||||
if (!err) {
|
||||
int tar_levels;
|
||||
|
||||
@ -1918,13 +1892,13 @@ static int core_get_max_pstate(void)
|
||||
return max_pstate;
|
||||
}
|
||||
|
||||
static int core_get_turbo_pstate(void)
|
||||
static int core_get_turbo_pstate(int cpu)
|
||||
{
|
||||
u64 value;
|
||||
int nont, ret;
|
||||
|
||||
rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
|
||||
nont = core_get_max_pstate();
|
||||
rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
|
||||
nont = core_get_max_pstate(cpu);
|
||||
ret = (value) & 255;
|
||||
if (ret <= nont)
|
||||
ret = nont;
|
||||
@ -1952,51 +1926,38 @@ static int knl_get_aperf_mperf_shift(void)
|
||||
return 10;
|
||||
}
|
||||
|
||||
static int knl_get_turbo_pstate(void)
|
||||
static int knl_get_turbo_pstate(int cpu)
|
||||
{
|
||||
u64 value;
|
||||
int nont, ret;
|
||||
|
||||
rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
|
||||
nont = core_get_max_pstate();
|
||||
rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value);
|
||||
nont = core_get_max_pstate(cpu);
|
||||
ret = (((value) >> 8) & 0xFF);
|
||||
if (ret <= nont)
|
||||
ret = nont;
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI_CPPC_LIB
|
||||
static u32 hybrid_ref_perf;
|
||||
static void hybrid_get_type(void *data)
|
||||
{
|
||||
u8 *cpu_type = data;
|
||||
|
||||
*cpu_type = get_this_hybrid_cpu_type();
|
||||
}
|
||||
|
||||
static int hybrid_get_cpu_scaling(int cpu)
|
||||
{
|
||||
return DIV_ROUND_UP(core_get_scaling() * hybrid_ref_perf,
|
||||
intel_pstate_cppc_nominal(cpu));
|
||||
u8 cpu_type = 0;
|
||||
|
||||
smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
|
||||
/* P-cores have a smaller perf level-to-freqency scaling factor. */
|
||||
if (cpu_type == 0x40)
|
||||
return 78741;
|
||||
|
||||
return core_get_scaling();
|
||||
}
|
||||
|
||||
static void intel_pstate_cppc_set_cpu_scaling(void)
|
||||
{
|
||||
u32 min_nominal_perf = U32_MAX;
|
||||
int cpu;
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
u32 nominal_perf = intel_pstate_cppc_nominal(cpu);
|
||||
|
||||
if (nominal_perf && nominal_perf < min_nominal_perf)
|
||||
min_nominal_perf = nominal_perf;
|
||||
}
|
||||
|
||||
if (min_nominal_perf < U32_MAX) {
|
||||
hybrid_ref_perf = min_nominal_perf;
|
||||
pstate_funcs.get_cpu_scaling = hybrid_get_cpu_scaling;
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline void intel_pstate_cppc_set_cpu_scaling(void)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_ACPI_CPPC_LIB */
|
||||
|
||||
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
|
||||
{
|
||||
trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
|
||||
@ -2025,10 +1986,10 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu)
|
||||
|
||||
static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
|
||||
{
|
||||
int perf_ctl_max_phys = pstate_funcs.get_max_physical();
|
||||
int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu);
|
||||
int perf_ctl_scaling = pstate_funcs.get_scaling();
|
||||
|
||||
cpu->pstate.min_pstate = pstate_funcs.get_min();
|
||||
cpu->pstate.min_pstate = pstate_funcs.get_min(cpu->cpu);
|
||||
cpu->pstate.max_pstate_physical = perf_ctl_max_phys;
|
||||
cpu->pstate.perf_ctl_scaling = perf_ctl_scaling;
|
||||
|
||||
@ -2044,8 +2005,8 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
|
||||
}
|
||||
} else {
|
||||
cpu->pstate.scaling = perf_ctl_scaling;
|
||||
cpu->pstate.max_pstate = pstate_funcs.get_max();
|
||||
cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
|
||||
cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu);
|
||||
cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(cpu->cpu);
|
||||
}
|
||||
|
||||
if (cpu->pstate.scaling == perf_ctl_scaling) {
|
||||
@ -3221,9 +3182,9 @@ static unsigned int force_load __initdata;
|
||||
|
||||
static int __init intel_pstate_msrs_not_valid(void)
|
||||
{
|
||||
if (!pstate_funcs.get_max() ||
|
||||
!pstate_funcs.get_min() ||
|
||||
!pstate_funcs.get_turbo())
|
||||
if (!pstate_funcs.get_max(0) ||
|
||||
!pstate_funcs.get_min(0) ||
|
||||
!pstate_funcs.get_turbo(0))
|
||||
return -ENODEV;
|
||||
|
||||
return 0;
|
||||
@ -3450,7 +3411,7 @@ static int __init intel_pstate_init(void)
|
||||
default_driver = &intel_pstate;
|
||||
|
||||
if (boot_cpu_has(X86_FEATURE_HYBRID_CPU))
|
||||
intel_pstate_cppc_set_cpu_scaling();
|
||||
pstate_funcs.get_cpu_scaling = hybrid_get_cpu_scaling;
|
||||
|
||||
goto hwp_cpu_matched;
|
||||
}
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
#include <linux/pinctrl/consumer.h>
|
||||
@ -94,7 +95,6 @@ struct tegra_gpio_info {
|
||||
struct tegra_gpio_bank *bank_info;
|
||||
const struct tegra_gpio_soc_config *soc;
|
||||
struct gpio_chip gc;
|
||||
struct irq_chip ic;
|
||||
u32 bank_count;
|
||||
unsigned int *irqs;
|
||||
};
|
||||
@ -288,6 +288,7 @@ static void tegra_gpio_irq_mask(struct irq_data *d)
|
||||
unsigned int gpio = d->hwirq;
|
||||
|
||||
tegra_gpio_mask_write(tgi, GPIO_MSK_INT_ENB(tgi, gpio), gpio, 0);
|
||||
gpiochip_disable_irq(chip, gpio);
|
||||
}
|
||||
|
||||
static void tegra_gpio_irq_unmask(struct irq_data *d)
|
||||
@ -296,6 +297,7 @@ static void tegra_gpio_irq_unmask(struct irq_data *d)
|
||||
struct tegra_gpio_info *tgi = gpiochip_get_data(chip);
|
||||
unsigned int gpio = d->hwirq;
|
||||
|
||||
gpiochip_enable_irq(chip, gpio);
|
||||
tegra_gpio_mask_write(tgi, GPIO_MSK_INT_ENB(tgi, gpio), gpio, 1);
|
||||
}
|
||||
|
||||
@ -598,10 +600,47 @@ static void tegra_gpio_irq_release_resources(struct irq_data *d)
|
||||
tegra_gpio_enable(tgi, d->hwirq);
|
||||
}
|
||||
|
||||
static void tegra_gpio_irq_print_chip(struct irq_data *d, struct seq_file *s)
|
||||
{
|
||||
struct gpio_chip *chip = irq_data_get_irq_chip_data(d);
|
||||
|
||||
seq_printf(s, dev_name(chip->parent));
|
||||
}
|
||||
|
||||
static const struct irq_chip tegra_gpio_irq_chip = {
|
||||
.irq_shutdown = tegra_gpio_irq_shutdown,
|
||||
.irq_ack = tegra_gpio_irq_ack,
|
||||
.irq_mask = tegra_gpio_irq_mask,
|
||||
.irq_unmask = tegra_gpio_irq_unmask,
|
||||
.irq_set_type = tegra_gpio_irq_set_type,
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
.irq_set_wake = tegra_gpio_irq_set_wake,
|
||||
#endif
|
||||
.irq_print_chip = tegra_gpio_irq_print_chip,
|
||||
.irq_request_resources = tegra_gpio_irq_request_resources,
|
||||
.irq_release_resources = tegra_gpio_irq_release_resources,
|
||||
.flags = IRQCHIP_IMMUTABLE,
|
||||
};
|
||||
|
||||
static const struct irq_chip tegra210_gpio_irq_chip = {
|
||||
.irq_shutdown = tegra_gpio_irq_shutdown,
|
||||
.irq_ack = tegra_gpio_irq_ack,
|
||||
.irq_mask = tegra_gpio_irq_mask,
|
||||
.irq_unmask = tegra_gpio_irq_unmask,
|
||||
.irq_set_affinity = tegra_gpio_irq_set_affinity,
|
||||
.irq_set_type = tegra_gpio_irq_set_type,
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
.irq_set_wake = tegra_gpio_irq_set_wake,
|
||||
#endif
|
||||
.irq_print_chip = tegra_gpio_irq_print_chip,
|
||||
.irq_request_resources = tegra_gpio_irq_request_resources,
|
||||
.irq_release_resources = tegra_gpio_irq_release_resources,
|
||||
.flags = IRQCHIP_IMMUTABLE,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
static int tegra_dbg_gpio_show(struct seq_file *s, void *unused)
|
||||
{
|
||||
@ -689,18 +728,6 @@ static int tegra_gpio_probe(struct platform_device *pdev)
|
||||
tgi->gc.ngpio = tgi->bank_count * 32;
|
||||
tgi->gc.parent = &pdev->dev;
|
||||
|
||||
tgi->ic.name = "GPIO";
|
||||
tgi->ic.irq_ack = tegra_gpio_irq_ack;
|
||||
tgi->ic.irq_mask = tegra_gpio_irq_mask;
|
||||
tgi->ic.irq_unmask = tegra_gpio_irq_unmask;
|
||||
tgi->ic.irq_set_type = tegra_gpio_irq_set_type;
|
||||
tgi->ic.irq_shutdown = tegra_gpio_irq_shutdown;
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
tgi->ic.irq_set_wake = tegra_gpio_irq_set_wake;
|
||||
#endif
|
||||
tgi->ic.irq_request_resources = tegra_gpio_irq_request_resources;
|
||||
tgi->ic.irq_release_resources = tegra_gpio_irq_release_resources;
|
||||
|
||||
platform_set_drvdata(pdev, tgi);
|
||||
|
||||
if (tgi->soc->debounce_supported)
|
||||
@ -733,7 +760,6 @@ static int tegra_gpio_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
irq = &tgi->gc.irq;
|
||||
irq->chip = &tgi->ic;
|
||||
irq->fwnode = of_node_to_fwnode(pdev->dev.of_node);
|
||||
irq->child_to_parent_hwirq = tegra_gpio_child_to_parent_hwirq;
|
||||
irq->populate_parent_alloc_arg = tegra_gpio_populate_parent_fwspec;
|
||||
@ -752,7 +778,9 @@ static int tegra_gpio_probe(struct platform_device *pdev)
|
||||
if (!irq->parent_domain)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
tgi->ic.irq_set_affinity = tegra_gpio_irq_set_affinity;
|
||||
gpio_irq_chip_set_chip(irq, &tegra210_gpio_irq_chip);
|
||||
} else {
|
||||
gpio_irq_chip_set_chip(irq, &tegra_gpio_irq_chip);
|
||||
}
|
||||
|
||||
tgi->regs = devm_platform_ioremap_resource(pdev, 0);
|
||||
|
@ -510,13 +510,13 @@ kfd_mem_dmamap_userptr(struct kgd_mem *mem,
|
||||
struct ttm_tt *ttm = bo->tbo.ttm;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
|
||||
return -EINVAL;
|
||||
|
||||
ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL);
|
||||
if (unlikely(!ttm->sg))
|
||||
return -ENOMEM;
|
||||
|
||||
if (WARN_ON(ttm->num_pages != src_ttm->num_pages))
|
||||
return -EINVAL;
|
||||
|
||||
/* Same sequence as in amdgpu_ttm_tt_pin_userptr */
|
||||
ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages,
|
||||
ttm->num_pages, 0,
|
||||
|
@ -326,7 +326,10 @@ static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority,
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
ctx->stable_pstate = current_stable_pstate;
|
||||
if (mgr->adev->pm.stable_pstate_ctx)
|
||||
ctx->stable_pstate = mgr->adev->pm.stable_pstate_ctx->stable_pstate;
|
||||
else
|
||||
ctx->stable_pstate = current_stable_pstate;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3210,6 +3210,15 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev)
|
||||
return r;
|
||||
}
|
||||
adev->ip_blocks[i].status.hw = true;
|
||||
|
||||
if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
|
||||
/* disable gfxoff for IP resume. The gfxoff will be re-enabled in
|
||||
* amdgpu_device_resume() after IP resume.
|
||||
*/
|
||||
amdgpu_gfx_off_ctrl(adev, false);
|
||||
DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n");
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -4185,6 +4194,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
|
||||
/* Make sure IB tests flushed */
|
||||
flush_delayed_work(&adev->delayed_init_work);
|
||||
|
||||
if (adev->in_s0ix) {
|
||||
/* re-enable gfxoff after IP resume. This re-enables gfxoff after
|
||||
* it was disabled for IP resume in amdgpu_device_ip_resume_phase2().
|
||||
*/
|
||||
amdgpu_gfx_off_ctrl(adev, true);
|
||||
DRM_DEBUG("will enable gfxoff for the mission mode\n");
|
||||
}
|
||||
if (fbcon)
|
||||
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false);
|
||||
|
||||
@ -5381,7 +5397,7 @@ skip_hw_reset:
|
||||
drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
|
||||
}
|
||||
|
||||
if (adev->enable_mes)
|
||||
if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))
|
||||
amdgpu_mes_self_test(tmp_adev);
|
||||
|
||||
if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
|
||||
|
@ -344,6 +344,10 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
|
||||
fw_info->ver = adev->mes.ucode_fw_version[1];
|
||||
fw_info->feature = 0;
|
||||
break;
|
||||
case AMDGPU_INFO_FW_IMU:
|
||||
fw_info->ver = adev->gfx.imu_fw_version;
|
||||
fw_info->feature = 0;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1520,6 +1524,15 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused)
|
||||
fw_info.feature, fw_info.ver);
|
||||
}
|
||||
|
||||
/* IMU */
|
||||
query_fw.fw_type = AMDGPU_INFO_FW_IMU;
|
||||
query_fw.index = 0;
|
||||
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
seq_printf(m, "IMU feature version: %u, firmware version: 0x%08x\n",
|
||||
fw_info.feature, fw_info.ver);
|
||||
|
||||
/* PSP SOS */
|
||||
query_fw.fw_type = AMDGPU_INFO_FW_SOS;
|
||||
ret = amdgpu_firmware_info(&fw_info, &query_fw, adev);
|
||||
|
@ -698,6 +698,7 @@ FW_VERSION_ATTR(rlc_srlg_fw_version, 0444, gfx.rlc_srlg_fw_version);
|
||||
FW_VERSION_ATTR(rlc_srls_fw_version, 0444, gfx.rlc_srls_fw_version);
|
||||
FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version);
|
||||
FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version);
|
||||
FW_VERSION_ATTR(imu_fw_version, 0444, gfx.imu_fw_version);
|
||||
FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos.fw_version);
|
||||
FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_context.bin_desc.fw_version);
|
||||
FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras_context.context.bin_desc.fw_version);
|
||||
@ -719,7 +720,8 @@ static struct attribute *fw_attrs[] = {
|
||||
&dev_attr_ta_ras_fw_version.attr, &dev_attr_ta_xgmi_fw_version.attr,
|
||||
&dev_attr_smc_fw_version.attr, &dev_attr_sdma_fw_version.attr,
|
||||
&dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr,
|
||||
&dev_attr_dmcu_fw_version.attr, NULL
|
||||
&dev_attr_dmcu_fw_version.attr, &dev_attr_imu_fw_version.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static const struct attribute_group fw_attr_group = {
|
||||
|
@ -547,6 +547,7 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_IMU, adev->gfx.imu_fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version);
|
||||
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,
|
||||
adev->psp.asd_context.bin_desc.fw_version);
|
||||
|
@ -70,6 +70,7 @@ enum amd_sriov_ucode_engine_id {
|
||||
AMD_SRIOV_UCODE_ID_RLC_SRLS,
|
||||
AMD_SRIOV_UCODE_ID_MEC,
|
||||
AMD_SRIOV_UCODE_ID_MEC2,
|
||||
AMD_SRIOV_UCODE_ID_IMU,
|
||||
AMD_SRIOV_UCODE_ID_SOS,
|
||||
AMD_SRIOV_UCODE_ID_ASD,
|
||||
AMD_SRIOV_UCODE_ID_TA_RAS,
|
||||
|
@ -5051,6 +5051,7 @@ static int gfx_v11_0_set_powergating_state(void *handle,
|
||||
switch (adev->ip_versions[GC_HWIP][0]) {
|
||||
case IP_VERSION(11, 0, 0):
|
||||
case IP_VERSION(11, 0, 2):
|
||||
case IP_VERSION(11, 0, 3):
|
||||
amdgpu_gfx_off_ctrl(adev, enable);
|
||||
break;
|
||||
case IP_VERSION(11, 0, 1):
|
||||
|
@ -98,7 +98,14 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
|
||||
struct amdgpu_device *adev = mes->adev;
|
||||
struct amdgpu_ring *ring = &mes->ring;
|
||||
unsigned long flags;
|
||||
signed long timeout = adev->usec_timeout;
|
||||
|
||||
if (amdgpu_emu_mode) {
|
||||
timeout *= 100;
|
||||
} else if (amdgpu_sriov_vf(adev)) {
|
||||
/* Worst case in sriov where all other 15 VF timeout, each VF needs about 600ms */
|
||||
timeout = 15 * 600 * 1000;
|
||||
}
|
||||
BUG_ON(size % 4 != 0);
|
||||
|
||||
spin_lock_irqsave(&mes->ring_lock, flags);
|
||||
@ -118,7 +125,7 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes,
|
||||
DRM_DEBUG("MES msg=%d was emitted\n", x_pkt->header.opcode);
|
||||
|
||||
r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq,
|
||||
adev->usec_timeout * (amdgpu_emu_mode ? 100 : 1));
|
||||
timeout);
|
||||
if (r < 1) {
|
||||
DRM_ERROR("MES failed to response msg=%d\n",
|
||||
x_pkt->header.opcode);
|
||||
|
@ -32,8 +32,6 @@
|
||||
#include "gc/gc_10_1_0_offset.h"
|
||||
#include "soc15_common.h"
|
||||
|
||||
#define mmMM_ATC_L2_MISC_CG_Sienna_Cichlid 0x064d
|
||||
#define mmMM_ATC_L2_MISC_CG_Sienna_Cichlid_BASE_IDX 0
|
||||
#define mmDAGB0_CNTL_MISC2_Sienna_Cichlid 0x0070
|
||||
#define mmDAGB0_CNTL_MISC2_Sienna_Cichlid_BASE_IDX 0
|
||||
|
||||
@ -574,7 +572,6 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
|
||||
case IP_VERSION(2, 1, 0):
|
||||
case IP_VERSION(2, 1, 1):
|
||||
case IP_VERSION(2, 1, 2):
|
||||
def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid);
|
||||
def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid);
|
||||
break;
|
||||
default:
|
||||
@ -608,8 +605,6 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
|
||||
case IP_VERSION(2, 1, 0):
|
||||
case IP_VERSION(2, 1, 1):
|
||||
case IP_VERSION(2, 1, 2):
|
||||
if (def != data)
|
||||
WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid, data);
|
||||
if (def1 != data1)
|
||||
WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid, data1);
|
||||
break;
|
||||
@ -634,8 +629,8 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
|
||||
case IP_VERSION(2, 1, 0):
|
||||
case IP_VERSION(2, 1, 1):
|
||||
case IP_VERSION(2, 1, 2):
|
||||
def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid);
|
||||
break;
|
||||
/* There is no ATCL2 in MMHUB for 2.1.x */
|
||||
return;
|
||||
default:
|
||||
def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG);
|
||||
break;
|
||||
@ -646,18 +641,8 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
|
||||
else
|
||||
data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
|
||||
|
||||
if (def != data) {
|
||||
switch (adev->ip_versions[MMHUB_HWIP][0]) {
|
||||
case IP_VERSION(2, 1, 0):
|
||||
case IP_VERSION(2, 1, 1):
|
||||
case IP_VERSION(2, 1, 2):
|
||||
WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid, data);
|
||||
break;
|
||||
default:
|
||||
WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (def != data)
|
||||
WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data);
|
||||
}
|
||||
|
||||
static int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
|
||||
@ -695,7 +680,10 @@ static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
|
||||
case IP_VERSION(2, 1, 0):
|
||||
case IP_VERSION(2, 1, 1):
|
||||
case IP_VERSION(2, 1, 2):
|
||||
data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid);
|
||||
/* There is no ATCL2 in MMHUB for 2.1.x. Keep the status
|
||||
* based on DAGB
|
||||
*/
|
||||
data = MM_ATC_L2_MISC_CG__ENABLE_MASK;
|
||||
data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid);
|
||||
break;
|
||||
default:
|
||||
|
@ -795,6 +795,102 @@ static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
|
||||
},
|
||||
};
|
||||
|
||||
static struct kfd_gpu_cache_info gfx1037_cache_info[] = {
|
||||
{
|
||||
/* TCP L1 Cache per CU */
|
||||
.cache_size = 16,
|
||||
.cache_level = 1,
|
||||
.flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE),
|
||||
.num_cu_shared = 1,
|
||||
},
|
||||
{
|
||||
/* Scalar L1 Instruction Cache per SQC */
|
||||
.cache_size = 32,
|
||||
.cache_level = 1,
|
||||
.flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_INST_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE),
|
||||
.num_cu_shared = 2,
|
||||
},
|
||||
{
|
||||
/* Scalar L1 Data Cache per SQC */
|
||||
.cache_size = 16,
|
||||
.cache_level = 1,
|
||||
.flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE),
|
||||
.num_cu_shared = 2,
|
||||
},
|
||||
{
|
||||
/* GL1 Data Cache per SA */
|
||||
.cache_size = 128,
|
||||
.cache_level = 1,
|
||||
.flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE),
|
||||
.num_cu_shared = 2,
|
||||
},
|
||||
{
|
||||
/* L2 Data Cache per GPU (Total Tex Cache) */
|
||||
.cache_size = 256,
|
||||
.cache_level = 2,
|
||||
.flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE),
|
||||
.num_cu_shared = 2,
|
||||
},
|
||||
};
|
||||
|
||||
static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = {
|
||||
{
|
||||
/* TCP L1 Cache per CU */
|
||||
.cache_size = 16,
|
||||
.cache_level = 1,
|
||||
.flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE),
|
||||
.num_cu_shared = 1,
|
||||
},
|
||||
{
|
||||
/* Scalar L1 Instruction Cache per SQC */
|
||||
.cache_size = 32,
|
||||
.cache_level = 1,
|
||||
.flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_INST_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE),
|
||||
.num_cu_shared = 2,
|
||||
},
|
||||
{
|
||||
/* Scalar L1 Data Cache per SQC */
|
||||
.cache_size = 16,
|
||||
.cache_level = 1,
|
||||
.flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE),
|
||||
.num_cu_shared = 2,
|
||||
},
|
||||
{
|
||||
/* GL1 Data Cache per SA */
|
||||
.cache_size = 128,
|
||||
.cache_level = 1,
|
||||
.flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE),
|
||||
.num_cu_shared = 2,
|
||||
},
|
||||
{
|
||||
/* L2 Data Cache per GPU (Total Tex Cache) */
|
||||
.cache_size = 256,
|
||||
.cache_level = 2,
|
||||
.flags = (CRAT_CACHE_FLAGS_ENABLED |
|
||||
CRAT_CACHE_FLAGS_DATA_CACHE |
|
||||
CRAT_CACHE_FLAGS_SIMD_CACHE),
|
||||
.num_cu_shared = 2,
|
||||
},
|
||||
};
|
||||
|
||||
static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
|
||||
struct crat_subtype_computeunit *cu)
|
||||
{
|
||||
@ -1514,11 +1610,17 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
|
||||
num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info);
|
||||
break;
|
||||
case IP_VERSION(10, 3, 3):
|
||||
case IP_VERSION(10, 3, 6): /* TODO: Double check these on production silicon */
|
||||
case IP_VERSION(10, 3, 7): /* TODO: Double check these on production silicon */
|
||||
pcache_info = yellow_carp_cache_info;
|
||||
num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info);
|
||||
break;
|
||||
case IP_VERSION(10, 3, 6):
|
||||
pcache_info = gc_10_3_6_cache_info;
|
||||
num_of_cache_types = ARRAY_SIZE(gc_10_3_6_cache_info);
|
||||
break;
|
||||
case IP_VERSION(10, 3, 7):
|
||||
pcache_info = gfx1037_cache_info;
|
||||
num_of_cache_types = ARRAY_SIZE(gfx1037_cache_info);
|
||||
break;
|
||||
case IP_VERSION(11, 0, 0):
|
||||
case IP_VERSION(11, 0, 1):
|
||||
case IP_VERSION(11, 0, 2):
|
||||
|
@ -1369,7 +1369,7 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
|
||||
{
|
||||
struct amdgpu_device *adev = drm_to_adev(plane->dev);
|
||||
const struct drm_format_info *info = drm_format_info(format);
|
||||
struct hw_asic_id asic_id = adev->dm.dc->ctx->asic_id;
|
||||
int i;
|
||||
|
||||
enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
|
||||
|
||||
@ -1386,49 +1386,13 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
|
||||
return true;
|
||||
}
|
||||
|
||||
/* check if swizzle mode is supported by this version of DCN */
|
||||
switch (asic_id.chip_family) {
|
||||
case FAMILY_SI:
|
||||
case FAMILY_CI:
|
||||
case FAMILY_KV:
|
||||
case FAMILY_CZ:
|
||||
case FAMILY_VI:
|
||||
/* asics before AI does not have modifier support */
|
||||
return false;
|
||||
case FAMILY_AI:
|
||||
case FAMILY_RV:
|
||||
case FAMILY_NV:
|
||||
case FAMILY_VGH:
|
||||
case FAMILY_YELLOW_CARP:
|
||||
case AMDGPU_FAMILY_GC_10_3_6:
|
||||
case AMDGPU_FAMILY_GC_10_3_7:
|
||||
switch (AMD_FMT_MOD_GET(TILE, modifier)) {
|
||||
case AMD_FMT_MOD_TILE_GFX9_64K_R_X:
|
||||
case AMD_FMT_MOD_TILE_GFX9_64K_D_X:
|
||||
case AMD_FMT_MOD_TILE_GFX9_64K_S_X:
|
||||
case AMD_FMT_MOD_TILE_GFX9_64K_D:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
case AMDGPU_FAMILY_GC_11_0_0:
|
||||
case AMDGPU_FAMILY_GC_11_0_1:
|
||||
switch (AMD_FMT_MOD_GET(TILE, modifier)) {
|
||||
case AMD_FMT_MOD_TILE_GFX11_256K_R_X:
|
||||
case AMD_FMT_MOD_TILE_GFX9_64K_R_X:
|
||||
case AMD_FMT_MOD_TILE_GFX9_64K_D_X:
|
||||
case AMD_FMT_MOD_TILE_GFX9_64K_S_X:
|
||||
case AMD_FMT_MOD_TILE_GFX9_64K_D:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
ASSERT(0); /* Unknown asic */
|
||||
break;
|
||||
/* Check that the modifier is on the list of the plane's supported modifiers. */
|
||||
for (i = 0; i < plane->modifier_count; i++) {
|
||||
if (modifier == plane->modifiers[i])
|
||||
break;
|
||||
}
|
||||
if (i == plane->modifier_count)
|
||||
return false;
|
||||
|
||||
/*
|
||||
* For D swizzle the canonical modifier depends on the bpp, so check
|
||||
|
@ -1270,16 +1270,6 @@ void dcn20_pipe_control_lock(
|
||||
lock,
|
||||
&hw_locks,
|
||||
&inst_flags);
|
||||
} else if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN) {
|
||||
union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 };
|
||||
hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK;
|
||||
hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER;
|
||||
hw_lock_cmd.bits.lock_pipe = 1;
|
||||
hw_lock_cmd.bits.otg_inst = pipe->stream_res.tg->inst;
|
||||
hw_lock_cmd.bits.lock = lock;
|
||||
if (!lock)
|
||||
hw_lock_cmd.bits.should_release = 1;
|
||||
dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
|
||||
} else if (pipe->plane_state != NULL && pipe->plane_state->triplebuffer_flips) {
|
||||
if (lock)
|
||||
pipe->stream_res.tg->funcs->triplebuffer_lock(pipe->stream_res.tg);
|
||||
@ -1856,7 +1846,7 @@ void dcn20_post_unlock_program_front_end(
|
||||
|
||||
for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000
|
||||
&& hubp->funcs->hubp_is_flip_pending(hubp); j++)
|
||||
mdelay(1);
|
||||
udelay(1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -200,7 +200,7 @@ bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc,
|
||||
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
|
||||
|
||||
if (!pipe->stream)
|
||||
return false;
|
||||
continue;
|
||||
|
||||
if (!pipe->plane_state)
|
||||
return false;
|
||||
|
@ -25,7 +25,7 @@
|
||||
#define SMU13_DRIVER_IF_V13_0_0_H
|
||||
|
||||
//Increment this version if SkuTable_t or BoardTable_t change
|
||||
#define PPTABLE_VERSION 0x24
|
||||
#define PPTABLE_VERSION 0x26
|
||||
|
||||
#define NUM_GFXCLK_DPM_LEVELS 16
|
||||
#define NUM_SOCCLK_DPM_LEVELS 8
|
||||
@ -109,6 +109,22 @@
|
||||
#define FEATURE_SPARE_63_BIT 63
|
||||
#define NUM_FEATURES 64
|
||||
|
||||
#define ALLOWED_FEATURE_CTRL_DEFAULT 0xFFFFFFFFFFFFFFFFULL
|
||||
#define ALLOWED_FEATURE_CTRL_SCPM ((1 << FEATURE_DPM_GFXCLK_BIT) | \
|
||||
(1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
|
||||
(1 << FEATURE_DPM_UCLK_BIT) | \
|
||||
(1 << FEATURE_DPM_FCLK_BIT) | \
|
||||
(1 << FEATURE_DPM_SOCCLK_BIT) | \
|
||||
(1 << FEATURE_DPM_MP0CLK_BIT) | \
|
||||
(1 << FEATURE_DPM_LINK_BIT) | \
|
||||
(1 << FEATURE_DPM_DCN_BIT) | \
|
||||
(1 << FEATURE_DS_GFXCLK_BIT) | \
|
||||
(1 << FEATURE_DS_SOCCLK_BIT) | \
|
||||
(1 << FEATURE_DS_FCLK_BIT) | \
|
||||
(1 << FEATURE_DS_LCLK_BIT) | \
|
||||
(1 << FEATURE_DS_DCFCLK_BIT) | \
|
||||
(1 << FEATURE_DS_UCLK_BIT))
|
||||
|
||||
//For use with feature control messages
|
||||
typedef enum {
|
||||
FEATURE_PWR_ALL,
|
||||
@ -133,6 +149,7 @@ typedef enum {
|
||||
#define DEBUG_OVERRIDE_DISABLE_DFLL 0x00000200
|
||||
#define DEBUG_OVERRIDE_ENABLE_RLC_VF_BRINGUP_MODE 0x00000400
|
||||
#define DEBUG_OVERRIDE_DFLL_MASTER_MODE 0x00000800
|
||||
#define DEBUG_OVERRIDE_ENABLE_PROFILING_MODE 0x00001000
|
||||
|
||||
// VR Mapping Bit Defines
|
||||
#define VR_MAPPING_VR_SELECT_MASK 0x01
|
||||
@ -262,15 +279,15 @@ typedef enum {
|
||||
} I2cControllerPort_e;
|
||||
|
||||
typedef enum {
|
||||
I2C_CONTROLLER_NAME_VR_GFX = 0,
|
||||
I2C_CONTROLLER_NAME_VR_SOC,
|
||||
I2C_CONTROLLER_NAME_VR_VMEMP,
|
||||
I2C_CONTROLLER_NAME_VR_VDDIO,
|
||||
I2C_CONTROLLER_NAME_LIQUID0,
|
||||
I2C_CONTROLLER_NAME_LIQUID1,
|
||||
I2C_CONTROLLER_NAME_PLX,
|
||||
I2C_CONTROLLER_NAME_OTHER,
|
||||
I2C_CONTROLLER_NAME_COUNT,
|
||||
I2C_CONTROLLER_NAME_VR_GFX = 0,
|
||||
I2C_CONTROLLER_NAME_VR_SOC,
|
||||
I2C_CONTROLLER_NAME_VR_VMEMP,
|
||||
I2C_CONTROLLER_NAME_VR_VDDIO,
|
||||
I2C_CONTROLLER_NAME_LIQUID0,
|
||||
I2C_CONTROLLER_NAME_LIQUID1,
|
||||
I2C_CONTROLLER_NAME_PLX,
|
||||
I2C_CONTROLLER_NAME_FAN_INTAKE,
|
||||
I2C_CONTROLLER_NAME_COUNT,
|
||||
} I2cControllerName_e;
|
||||
|
||||
typedef enum {
|
||||
@ -282,16 +299,17 @@ typedef enum {
|
||||
I2C_CONTROLLER_THROTTLER_LIQUID0,
|
||||
I2C_CONTROLLER_THROTTLER_LIQUID1,
|
||||
I2C_CONTROLLER_THROTTLER_PLX,
|
||||
I2C_CONTROLLER_THROTTLER_FAN_INTAKE,
|
||||
I2C_CONTROLLER_THROTTLER_INA3221,
|
||||
I2C_CONTROLLER_THROTTLER_COUNT,
|
||||
} I2cControllerThrottler_e;
|
||||
|
||||
typedef enum {
|
||||
I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5,
|
||||
I2C_CONTROLLER_PROTOCOL_VR_IR35217,
|
||||
I2C_CONTROLLER_PROTOCOL_TMP_TMP102A,
|
||||
I2C_CONTROLLER_PROTOCOL_INA3221,
|
||||
I2C_CONTROLLER_PROTOCOL_COUNT,
|
||||
I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5,
|
||||
I2C_CONTROLLER_PROTOCOL_VR_IR35217,
|
||||
I2C_CONTROLLER_PROTOCOL_TMP_MAX31875,
|
||||
I2C_CONTROLLER_PROTOCOL_INA3221,
|
||||
I2C_CONTROLLER_PROTOCOL_COUNT,
|
||||
} I2cControllerProtocol_e;
|
||||
|
||||
typedef struct {
|
||||
@ -658,13 +676,20 @@ typedef struct {
|
||||
|
||||
#define PP_NUM_OD_VF_CURVE_POINTS PP_NUM_RTAVFS_PWL_ZONES + 1
|
||||
|
||||
typedef enum {
|
||||
FAN_MODE_AUTO = 0,
|
||||
FAN_MODE_MANUAL_LINEAR,
|
||||
} FanMode_e;
|
||||
|
||||
typedef struct {
|
||||
uint32_t FeatureCtrlMask;
|
||||
|
||||
//Voltage control
|
||||
int16_t VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS];
|
||||
uint16_t reserved[2];
|
||||
uint16_t VddGfxVmax; // in mV
|
||||
|
||||
uint8_t IdlePwrSavingFeaturesCtrl;
|
||||
uint8_t RuntimePwrSavingFeaturesCtrl;
|
||||
|
||||
//Frequency changes
|
||||
int16_t GfxclkFmin; // MHz
|
||||
@ -674,7 +699,7 @@ typedef struct {
|
||||
|
||||
//PPT
|
||||
int16_t Ppt; // %
|
||||
int16_t reserved1;
|
||||
int16_t Tdc;
|
||||
|
||||
//Fan control
|
||||
uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS];
|
||||
@ -701,16 +726,19 @@ typedef struct {
|
||||
uint32_t FeatureCtrlMask;
|
||||
|
||||
int16_t VoltageOffsetPerZoneBoundary;
|
||||
uint16_t reserved[2];
|
||||
uint16_t VddGfxVmax; // in mV
|
||||
|
||||
uint16_t GfxclkFmin; // MHz
|
||||
uint16_t GfxclkFmax; // MHz
|
||||
uint8_t IdlePwrSavingFeaturesCtrl;
|
||||
uint8_t RuntimePwrSavingFeaturesCtrl;
|
||||
|
||||
int16_t GfxclkFmin; // MHz
|
||||
int16_t GfxclkFmax; // MHz
|
||||
uint16_t UclkFmin; // MHz
|
||||
uint16_t UclkFmax; // MHz
|
||||
|
||||
//PPT
|
||||
int16_t Ppt; // %
|
||||
int16_t reserved1;
|
||||
int16_t Tdc;
|
||||
|
||||
uint8_t FanLinearPwmPoints;
|
||||
uint8_t FanLinearTempPoints;
|
||||
@ -857,7 +885,8 @@ typedef struct {
|
||||
uint16_t FanStartTempMin;
|
||||
uint16_t FanStartTempMax;
|
||||
|
||||
uint32_t Spare[12];
|
||||
uint16_t PowerMinPpt0[POWER_SOURCE_COUNT];
|
||||
uint32_t Spare[11];
|
||||
|
||||
} MsgLimits_t;
|
||||
|
||||
@ -1041,7 +1070,17 @@ typedef struct {
|
||||
uint32_t GfxoffSpare[15];
|
||||
|
||||
// GFX GPO
|
||||
uint32_t GfxGpoSpare[16];
|
||||
uint32_t DfllBtcMasterScalerM;
|
||||
int32_t DfllBtcMasterScalerB;
|
||||
uint32_t DfllBtcSlaveScalerM;
|
||||
int32_t DfllBtcSlaveScalerB;
|
||||
|
||||
uint32_t DfllPccAsWaitCtrl; //GDFLL_AS_WAIT_CTRL_PCC register value to be passed to RLC msg
|
||||
uint32_t DfllPccAsStepCtrl; //GDFLL_AS_STEP_CTRL_PCC register value to be passed to RLC msg
|
||||
|
||||
uint32_t DfllL2FrequencyBoostM; //Unitless (float)
|
||||
uint32_t DfllL2FrequencyBoostB; //In MHz (integer)
|
||||
uint32_t GfxGpoSpare[8];
|
||||
|
||||
// GFX DCS
|
||||
|
||||
@ -1114,12 +1153,14 @@ typedef struct {
|
||||
uint16_t IntakeTempHighIntakeAcousticLimit;
|
||||
uint16_t IntakeTempAcouticLimitReleaseRate;
|
||||
|
||||
uint16_t FanStalledTempLimitOffset;
|
||||
int16_t FanAbnormalTempLimitOffset;
|
||||
uint16_t FanStalledTriggerRpm;
|
||||
uint16_t FanAbnormalTriggerRpm;
|
||||
uint16_t FanPadding;
|
||||
uint16_t FanAbnormalTriggerRpmCoeff;
|
||||
uint16_t FanAbnormalDetectionEnable;
|
||||
|
||||
uint32_t FanSpare[14];
|
||||
uint8_t FanIntakeSensorSupport;
|
||||
uint8_t FanIntakePadding[3];
|
||||
uint32_t FanSpare[13];
|
||||
|
||||
// SECTION: VDD_GFX AVFS
|
||||
|
||||
@ -1198,8 +1239,13 @@ typedef struct {
|
||||
int16_t TotalBoardPowerM;
|
||||
int16_t TotalBoardPowerB;
|
||||
|
||||
//PMFW-11158
|
||||
QuadraticInt_t qFeffCoeffGameClock[POWER_SOURCE_COUNT];
|
||||
QuadraticInt_t qFeffCoeffBaseClock[POWER_SOURCE_COUNT];
|
||||
QuadraticInt_t qFeffCoeffBoostClock[POWER_SOURCE_COUNT];
|
||||
|
||||
// SECTION: Sku Reserved
|
||||
uint32_t Spare[61];
|
||||
uint32_t Spare[43];
|
||||
|
||||
// Padding for MMHUB - do not modify this
|
||||
uint32_t MmHubPadding[8];
|
||||
@ -1288,8 +1334,11 @@ typedef struct {
|
||||
uint32_t PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued
|
||||
uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS
|
||||
|
||||
uint8_t FuseWritePowerMuxPresent;
|
||||
uint8_t FuseWritePadding[3];
|
||||
|
||||
// SECTION: Board Reserved
|
||||
uint32_t BoardSpare[64];
|
||||
uint32_t BoardSpare[63];
|
||||
|
||||
// SECTION: Structure Padding
|
||||
|
||||
@ -1381,7 +1430,7 @@ typedef struct {
|
||||
uint16_t AverageTotalBoardPower;
|
||||
|
||||
uint16_t AvgTemperature[TEMP_COUNT];
|
||||
uint16_t TempPadding;
|
||||
uint16_t AvgTemperatureFanIntake;
|
||||
|
||||
uint8_t PcieRate ;
|
||||
uint8_t PcieWidth ;
|
||||
@ -1550,5 +1599,7 @@ typedef struct {
|
||||
#define IH_INTERRUPT_CONTEXT_ID_AUDIO_D0 0x5
|
||||
#define IH_INTERRUPT_CONTEXT_ID_AUDIO_D3 0x6
|
||||
#define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7
|
||||
#define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8
|
||||
#define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9
|
||||
|
||||
#endif
|
||||
|
@ -30,7 +30,7 @@
|
||||
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x30
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C
|
||||
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D
|
||||
|
||||
|
@ -289,7 +289,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
|
||||
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE;
|
||||
break;
|
||||
case IP_VERSION(13, 0, 0):
|
||||
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_0;
|
||||
case IP_VERSION(13, 0, 10):
|
||||
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10;
|
||||
break;
|
||||
case IP_VERSION(13, 0, 7):
|
||||
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_7;
|
||||
@ -305,9 +306,6 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
|
||||
case IP_VERSION(13, 0, 5):
|
||||
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_5;
|
||||
break;
|
||||
case IP_VERSION(13, 0, 10):
|
||||
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_10;
|
||||
break;
|
||||
default:
|
||||
dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
|
||||
adev->ip_versions[MP1_HWIP][0]);
|
||||
@ -842,6 +840,7 @@ int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
|
||||
case IP_VERSION(13, 0, 5):
|
||||
case IP_VERSION(13, 0, 7):
|
||||
case IP_VERSION(13, 0, 8):
|
||||
case IP_VERSION(13, 0, 10):
|
||||
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
|
||||
return 0;
|
||||
if (enable)
|
||||
|
@ -105,6 +105,7 @@ struct ps8640 {
|
||||
struct gpio_desc *gpio_powerdown;
|
||||
struct device_link *link;
|
||||
bool pre_enabled;
|
||||
bool need_post_hpd_delay;
|
||||
};
|
||||
|
||||
static const struct regmap_config ps8640_regmap_config[] = {
|
||||
@ -173,14 +174,31 @@ static int _ps8640_wait_hpd_asserted(struct ps8640 *ps_bridge, unsigned long wai
|
||||
{
|
||||
struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL];
|
||||
int status;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Apparently something about the firmware in the chip signals that
|
||||
* HPD goes high by reporting GPIO9 as high (even though HPD isn't
|
||||
* actually connected to GPIO9).
|
||||
*/
|
||||
return regmap_read_poll_timeout(map, PAGE2_GPIO_H, status,
|
||||
status & PS_GPIO9, wait_us / 10, wait_us);
|
||||
ret = regmap_read_poll_timeout(map, PAGE2_GPIO_H, status,
|
||||
status & PS_GPIO9, wait_us / 10, wait_us);
|
||||
|
||||
/*
|
||||
* The first time we see HPD go high after a reset we delay an extra
|
||||
* 50 ms. The best guess is that the MCU is doing "stuff" during this
|
||||
* time (maybe talking to the panel) and we don't want to interrupt it.
|
||||
*
|
||||
* No locking is done around "need_post_hpd_delay". If we're here we
|
||||
* know we're holding a PM Runtime reference and the only other place
|
||||
* that touches this is PM Runtime resume.
|
||||
*/
|
||||
if (!ret && ps_bridge->need_post_hpd_delay) {
|
||||
ps_bridge->need_post_hpd_delay = false;
|
||||
msleep(50);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ps8640_wait_hpd_asserted(struct drm_dp_aux *aux, unsigned long wait_us)
|
||||
@ -381,6 +399,9 @@ static int __maybe_unused ps8640_resume(struct device *dev)
|
||||
msleep(50);
|
||||
gpiod_set_value(ps_bridge->gpio_reset, 0);
|
||||
|
||||
/* We just reset things, so we need a delay after the first HPD */
|
||||
ps_bridge->need_post_hpd_delay = true;
|
||||
|
||||
/*
|
||||
* Mystery 200 ms delay for the "MCU to be ready". It's unclear if
|
||||
* this is truly necessary since the MCU will already signal that
|
||||
|
@ -3957,6 +3957,8 @@ intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp)
|
||||
|
||||
drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base);
|
||||
|
||||
intel_dp->frl.is_trained = false;
|
||||
|
||||
/* Restart FRL training or fall back to TMDS mode */
|
||||
intel_dp_check_frl_training(intel_dp);
|
||||
}
|
||||
|
@ -2293,11 +2293,11 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
|
||||
}
|
||||
|
||||
if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) ||
|
||||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
|
||||
IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) {
|
||||
/*
|
||||
* Wa_1607030317:tgl
|
||||
* Wa_1607186500:tgl
|
||||
* Wa_1607297627:tgl,rkl,dg1[a0]
|
||||
* Wa_1607297627:tgl,rkl,dg1[a0],adlp
|
||||
*
|
||||
* On TGL and RKL there are multiple entries for this WA in the
|
||||
* BSpec; some indicate this is an A0-only WA, others indicate
|
||||
|
@ -591,8 +591,15 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
|
||||
pm_runtime_use_autosuspend(kdev);
|
||||
}
|
||||
|
||||
/* Enable by default */
|
||||
pm_runtime_allow(kdev);
|
||||
/*
|
||||
* FIXME: Temp hammer to keep autosupend disable on lmem supported platforms.
|
||||
* As per PCIe specs 5.3.1.4.1, all iomem read write request over a PCIe
|
||||
* function will be unsupported in case PCIe endpoint function is in D3.
|
||||
* Let's keep i915 autosuspend control 'on' till we fix all known issue
|
||||
* with lmem access in D3.
|
||||
*/
|
||||
if (!IS_DGFX(i915))
|
||||
pm_runtime_allow(kdev);
|
||||
|
||||
/*
|
||||
* The core calls the driver load handler with an RPM reference held.
|
||||
|
@ -155,7 +155,7 @@ config DRM_MSM_HDMI
|
||||
Compile in support for the HDMI output MSM DRM driver. It can
|
||||
be a primary or a secondary display on device. Note that this is used
|
||||
only for the direct HDMI output. If the device outputs HDMI data
|
||||
throught some kind of DSI-to-HDMI bridge, this option can be disabled.
|
||||
through some kind of DSI-to-HDMI bridge, this option can be disabled.
|
||||
|
||||
config DRM_MSM_HDMI_HDCP
|
||||
bool "Enable HDMI HDCP support in MSM DRM driver"
|
||||
|
@ -91,7 +91,7 @@ struct a6xx_state_memobj {
|
||||
static void *state_kcalloc(struct a6xx_gpu_state *a6xx_state, int nr, size_t objsize)
|
||||
{
|
||||
struct a6xx_state_memobj *obj =
|
||||
kzalloc((nr * objsize) + sizeof(*obj), GFP_KERNEL);
|
||||
kvzalloc((nr * objsize) + sizeof(*obj), GFP_KERNEL);
|
||||
|
||||
if (!obj)
|
||||
return NULL;
|
||||
@ -813,6 +813,9 @@ static struct msm_gpu_state_bo *a6xx_snapshot_gmu_bo(
|
||||
{
|
||||
struct msm_gpu_state_bo *snapshot;
|
||||
|
||||
if (!bo->size)
|
||||
return NULL;
|
||||
|
||||
snapshot = state_kcalloc(a6xx_state, 1, sizeof(*snapshot));
|
||||
if (!snapshot)
|
||||
return NULL;
|
||||
@ -1040,8 +1043,13 @@ static void a6xx_gpu_state_destroy(struct kref *kref)
|
||||
if (a6xx_state->gmu_hfi)
|
||||
kvfree(a6xx_state->gmu_hfi->data);
|
||||
|
||||
list_for_each_entry_safe(obj, tmp, &a6xx_state->objs, node)
|
||||
kfree(obj);
|
||||
if (a6xx_state->gmu_debug)
|
||||
kvfree(a6xx_state->gmu_debug->data);
|
||||
|
||||
list_for_each_entry_safe(obj, tmp, &a6xx_state->objs, node) {
|
||||
list_del(&obj->node);
|
||||
kvfree(obj);
|
||||
}
|
||||
|
||||
adreno_gpu_state_destroy(state);
|
||||
kfree(a6xx_state);
|
||||
|
@ -679,6 +679,9 @@ static int adreno_system_suspend(struct device *dev)
|
||||
struct msm_gpu *gpu = dev_to_gpu(dev);
|
||||
int remaining, ret;
|
||||
|
||||
if (!gpu)
|
||||
return 0;
|
||||
|
||||
suspend_scheduler(gpu);
|
||||
|
||||
remaining = wait_event_timeout(gpu->retire_event,
|
||||
@ -700,7 +703,12 @@ out:
|
||||
|
||||
static int adreno_system_resume(struct device *dev)
|
||||
{
|
||||
resume_scheduler(dev_to_gpu(dev));
|
||||
struct msm_gpu *gpu = dev_to_gpu(dev);
|
||||
|
||||
if (!gpu)
|
||||
return 0;
|
||||
|
||||
resume_scheduler(gpu);
|
||||
return pm_runtime_force_resume(dev);
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user