Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

No conflicts.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski 2021-12-09 12:04:19 -08:00
commit 3150a73366
294 changed files with 2604 additions and 1244 deletions

View File

@ -25,6 +25,6 @@ Sub graphs of DRBD's state transitions
:alt: disk-states-8.dot
:align: center
.. kernel-figure:: node-states-8.dot
:alt: node-states-8.dot
.. kernel-figure:: peer-states-8.dot
:alt: peer-states-8.dot
:align: center

View File

@ -1,8 +1,3 @@
digraph node_states {
Secondary -> Primary [ label = "ioctl_set_state()" ]
Primary -> Secondary [ label = "ioctl_set_state()" ]
}
digraph peer_states {
Secondary -> Primary [ label = "recv state packet" ]
Primary -> Secondary [ label = "recv state packet" ]

View File

@ -53,11 +53,10 @@ The number of bits that the PAC occupies in a pointer is 55 minus the
virtual address size configured by the kernel. For example, with a
virtual address size of 48, the PAC is 7 bits wide.
Recent versions of GCC can compile code with APIAKey-based return
address protection when passed the -msign-return-address option. This
uses instructions in the HINT space (unless -march=armv8.3-a or higher
is also passed), and such code can run on systems without the pointer
authentication extension.
When ARM64_PTR_AUTH_KERNEL is selected, the kernel will be compiled
with HINT space pointer authentication instructions protecting
function returns. Kernels built with this option will work on hardware
with or without pointer authentication support.
In addition to exec(), keys can also be reinitialized to random values
using the PR_PAC_RESET_KEYS prctl. A bitmask of PR_PAC_APIAKEY,

View File

@ -249,11 +249,16 @@ except ImportError:
html_static_path = ['sphinx-static']
html_context = {
'css_files': [
'_static/theme_overrides.css',
],
}
html_css_files = [
'theme_overrides.css',
]
if major <= 1 and minor < 8:
html_context = {
'css_files': [
'_static/theme_overrides.css',
],
}
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied

View File

@ -73,12 +73,12 @@ CPUFREQ_POSTCHANGE.
The third argument is a struct cpufreq_freqs with the following
values:
===== ===========================
cpu number of the affected CPU
====== ======================================
policy a pointer to the struct cpufreq_policy
old old frequency
new new frequency
flags flags of the cpufreq driver
===== ===========================
====== ======================================
3. CPUFreq Table Generation with Operating Performance Point (OPP)
==================================================================

View File

@ -33,6 +33,7 @@ properties:
- rockchip,rk3328-spi
- rockchip,rk3368-spi
- rockchip,rk3399-spi
- rockchip,rk3568-spi
- rockchip,rv1126-spi
- const: rockchip,rk3066-spi

View File

@ -439,11 +439,9 @@ preemption. The following substitution works on both kernels::
spin_lock(&p->lock);
p->count += this_cpu_read(var2);
On a non-PREEMPT_RT kernel migrate_disable() maps to preempt_disable()
which makes the above code fully equivalent. On a PREEMPT_RT kernel
migrate_disable() ensures that the task is pinned on the current CPU which
in turn guarantees that the per-CPU access to var1 and var2 are staying on
the same CPU.
the same CPU while the task remains preemptible.
The migrate_disable() substitution is not valid for the following
scenario::
@ -456,9 +454,8 @@ scenario::
p = this_cpu_ptr(&var1);
p->val = func2();
While correct on a non-PREEMPT_RT kernel, this breaks on PREEMPT_RT because
here migrate_disable() does not protect against reentrancy from a
preempting task. A correct substitution for this case is::
This breaks because migrate_disable() does not protect against reentrancy from
a preempting task. A correct substitution for this case is::
func()
{

View File

@ -35,6 +35,7 @@ GNU make 3.81 make --version
binutils 2.23 ld -v
flex 2.5.35 flex --version
bison 2.0 bison --version
pahole 1.16 pahole --version
util-linux 2.10o fdformat --version
kmod 13 depmod -V
e2fsprogs 1.41.4 e2fsck -V
@ -108,6 +109,16 @@ Bison
Since Linux 4.16, the build system generates parsers
during build. This requires bison 2.0 or later.
pahole:
-------
Since Linux 5.2, if CONFIG_DEBUG_INFO_BTF is selected, the build system
generates BTF (BPF Type Format) from DWARF in vmlinux, a bit later from kernel
modules as well. This requires pahole v1.16 or later.
It is found in the 'dwarves' or 'pahole' distro packages or from
https://fedorapeople.org/~acme/dwarves/.
Perl
----

View File

@ -14,7 +14,8 @@ works, see Documentation/process/development-process.rst. Also, read
Documentation/process/submit-checklist.rst
for a list of items to check before submitting code. If you are submitting
a driver, also read Documentation/process/submitting-drivers.rst; for device
tree binding patches, read Documentation/process/submitting-patches.rst.
tree binding patches, read
Documentation/devicetree/bindings/submitting-patches.rst.
This documentation assumes that you're using ``git`` to prepare your patches.
If you're unfamiliar with ``git``, you would be well-advised to learn how to

View File

@ -12180,8 +12180,8 @@ F: drivers/net/ethernet/mellanox/mlx5/core/fpga/*
F: include/linux/mlx5/mlx5_ifc_fpga.h
MELLANOX ETHERNET SWITCH DRIVERS
M: Jiri Pirko <jiri@nvidia.com>
M: Ido Schimmel <idosch@nvidia.com>
M: Petr Machata <petrm@nvidia.com>
L: netdev@vger.kernel.org
S: Supported
W: http://www.mellanox.com
@ -16517,6 +16517,12 @@ T: git git://linuxtv.org/media_tree.git
F: Documentation/devicetree/bindings/media/allwinner,sun8i-a83t-de2-rotate.yaml
F: drivers/media/platform/sunxi/sun8i-rotate/
RPMSG TTY DRIVER
M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
L: linux-remoteproc@vger.kernel.org
S: Maintained
F: drivers/tty/rpmsg_tty.c
RTL2830 MEDIA DRIVER
M: Antti Palosaari <crope@iki.fi>
L: linux-media@vger.kernel.org
@ -16638,7 +16644,6 @@ W: http://www.ibm.com/developerworks/linux/linux390/
F: drivers/iommu/s390-iommu.c
S390 IUCV NETWORK LAYER
M: Julian Wiedmann <jwi@linux.ibm.com>
M: Alexandra Winter <wintera@linux.ibm.com>
M: Wenjia Zhang <wenjia@linux.ibm.com>
L: linux-s390@vger.kernel.org
@ -16650,7 +16655,6 @@ F: include/net/iucv/
F: net/iucv/
S390 NETWORK DRIVERS
M: Julian Wiedmann <jwi@linux.ibm.com>
M: Alexandra Winter <wintera@linux.ibm.com>
M: Wenjia Zhang <wenjia@linux.ibm.com>
L: linux-s390@vger.kernel.org

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 16
SUBLEVEL = 0
EXTRAVERSION = -rc3
EXTRAVERSION = -rc4
NAME = Gobble Gobble
# *DOCUMENTATION*
@ -789,7 +789,7 @@ stackp-flags-$(CONFIG_STACKPROTECTOR_STRONG) := -fstack-protector-strong
KBUILD_CFLAGS += $(stackp-flags-y)
KBUILD_CFLAGS-$(CONFIG_WERROR) += -Werror
KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH)
KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH:"%"=%)
ifdef CONFIG_CC_IS_CLANG
KBUILD_CPPFLAGS += -Qunused-arguments

View File

@ -77,11 +77,17 @@
.endm
SYM_CODE_START(ftrace_regs_caller)
#ifdef BTI_C
BTI_C
#endif
ftrace_regs_entry 1
b ftrace_common
SYM_CODE_END(ftrace_regs_caller)
SYM_CODE_START(ftrace_caller)
#ifdef BTI_C
BTI_C
#endif
ftrace_regs_entry 0
b ftrace_common
SYM_CODE_END(ftrace_caller)

View File

@ -147,7 +147,7 @@ int machine_kexec_post_load(struct kimage *kimage)
if (rc)
return rc;
kimage->arch.ttbr1 = __pa(trans_pgd);
kimage->arch.zero_page = __pa(empty_zero_page);
kimage->arch.zero_page = __pa_symbol(empty_zero_page);
reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start;
memcpy(reloc_code, __relocate_new_kernel_start, reloc_size);

View File

@ -98,7 +98,7 @@ do { \
#define emit(...) __emit(__VA_ARGS__)
/* Workaround for R10000 ll/sc errata */
#ifdef CONFIG_WAR_R10000
#ifdef CONFIG_WAR_R10000_LLSC
#define LLSC_beqz beqzl
#else
#define LLSC_beqz beqz

View File

@ -15,7 +15,12 @@
# Mike Shaver, Helge Deller and Martin K. Petersen
#
ifdef CONFIG_PARISC_SELF_EXTRACT
boot := arch/parisc/boot
KBUILD_IMAGE := $(boot)/bzImage
else
KBUILD_IMAGE := vmlinuz
endif
NM = sh $(srctree)/arch/parisc/nm
CHECKFLAGS += -D__hppa__=1

View File

@ -1,7 +1,9 @@
CONFIG_LOCALVERSION="-64bit"
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_KERNEL_LZ4=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_TASKSTATS=y
@ -35,6 +37,7 @@ CONFIG_MODVERSIONS=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BINFMT_MISC=m
# CONFIG_COMPACTION is not set
CONFIG_MEMORY_FAILURE=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@ -65,12 +68,15 @@ CONFIG_SCSI_ISCSI_ATTRS=y
CONFIG_SCSI_SRP_ATTRS=y
CONFIG_ISCSI_BOOT_SYSFS=y
CONFIG_SCSI_MPT2SAS=y
CONFIG_SCSI_LASI700=m
CONFIG_SCSI_LASI700=y
CONFIG_SCSI_SYM53C8XX_2=y
CONFIG_SCSI_ZALON=y
CONFIG_SCSI_QLA_ISCSI=m
CONFIG_SCSI_DH=y
CONFIG_ATA=y
CONFIG_SATA_SIL=y
CONFIG_SATA_SIS=y
CONFIG_SATA_VIA=y
CONFIG_PATA_NS87415=y
CONFIG_PATA_SIL680=y
CONFIG_ATA_GENERIC=y
@ -79,6 +85,7 @@ CONFIG_MD_LINEAR=m
CONFIG_BLK_DEV_DM=m
CONFIG_DM_RAID=m
CONFIG_DM_UEVENT=y
CONFIG_DM_AUDIT=y
CONFIG_FUSION=y
CONFIG_FUSION_SPI=y
CONFIG_FUSION_SAS=y
@ -196,10 +203,15 @@ CONFIG_FB_MATROX_G=y
CONFIG_FB_MATROX_I2C=y
CONFIG_FB_MATROX_MAVEN=y
CONFIG_FB_RADEON=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_CLUT224 is not set
CONFIG_HIDRAW=y
CONFIG_HID_PID=y
CONFIG_USB_HIDDEV=y
CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_UIO=y
CONFIG_UIO_PDRV_GENIRQ=m
CONFIG_UIO_AEC=m

View File

@ -39,6 +39,7 @@ verify "$3"
if [ -n "${INSTALLKERNEL}" ]; then
if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
if [ -x /usr/sbin/${INSTALLKERNEL} ]; then exec /usr/sbin/${INSTALLKERNEL} "$@"; fi
fi
# Default install

View File

@ -249,30 +249,16 @@ void __init time_init(void)
static int __init init_cr16_clocksource(void)
{
/*
* The cr16 interval timers are not syncronized across CPUs on
* different sockets, so mark them unstable and lower rating on
* multi-socket SMP systems.
* The cr16 interval timers are not syncronized across CPUs, even if
* they share the same socket.
*/
if (num_online_cpus() > 1 && !running_on_qemu) {
int cpu;
unsigned long cpu0_loc;
cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
/* mark sched_clock unstable */
clear_sched_clock_stable();
for_each_online_cpu(cpu) {
if (cpu == 0)
continue;
if ((cpu0_loc != 0) &&
(cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
continue;
/* mark sched_clock unstable */
clear_sched_clock_stable();
clocksource_cr16.name = "cr16_unstable";
clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
clocksource_cr16.rating = 0;
break;
}
clocksource_cr16.name = "cr16_unstable";
clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
clocksource_cr16.rating = 0;
}
/* register at clocksource framework */

View File

@ -403,7 +403,6 @@ CONFIG_DEVTMPFS=y
CONFIG_CONNECTOR=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
@ -476,6 +475,7 @@ CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_VXLAN=m
CONFIG_BAREUDP=m
CONFIG_AMT=m
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
@ -489,6 +489,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_AMD is not set
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_ASIX is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_BROCADE is not set
@ -571,6 +572,7 @@ CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=m
CONFIG_DIAG288_WATCHDOG=m
# CONFIG_DRM_DEBUG_MODESET_LOCK is not set
CONFIG_FB=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
@ -775,12 +777,14 @@ CONFIG_CRC4=m
CONFIG_CRC7=m
CONFIG_CRC8=m
CONFIG_RANDOM32_SELFTEST=y
CONFIG_XZ_DEC_MICROLZMA=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_DEBUG_INFO_BTF=y
CONFIG_GDB_SCRIPTS=y
CONFIG_HEADERS_INSTALL=y
CONFIG_DEBUG_SECTION_MISMATCH=y
@ -807,6 +811,7 @@ CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
CONFIG_DEBUG_PER_CPU_MAPS=y
CONFIG_KFENCE=y
CONFIG_KFENCE_STATIC_KEYS=y
CONFIG_DEBUG_SHIRQ=y
CONFIG_PANIC_ON_OOPS=y
CONFIG_DETECT_HUNG_TASK=y
@ -842,6 +847,7 @@ CONFIG_FTRACE_STARTUP_TEST=y
CONFIG_SAMPLES=y
CONFIG_SAMPLE_TRACE_PRINTK=m
CONFIG_SAMPLE_FTRACE_DIRECT=m
CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m
CONFIG_DEBUG_ENTRY=y
CONFIG_CIO_INJECT=y
CONFIG_KUNIT=m
@ -860,7 +866,7 @@ CONFIG_FAIL_FUNCTION=y
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
CONFIG_LKDTM=m
CONFIG_TEST_MIN_HEAP=y
CONFIG_KPROBES_SANITY_TEST=y
CONFIG_KPROBES_SANITY_TEST=m
CONFIG_RBTREE_TEST=y
CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m

View File

@ -394,7 +394,6 @@ CONFIG_DEVTMPFS=y
CONFIG_CONNECTOR=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_RAM=y
@ -467,6 +466,7 @@ CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
CONFIG_VXLAN=m
CONFIG_BAREUDP=m
CONFIG_AMT=m
CONFIG_TUN=m
CONFIG_VETH=m
CONFIG_VIRTIO_NET=m
@ -480,6 +480,7 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_AMD is not set
# CONFIG_NET_VENDOR_AQUANTIA is not set
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_VENDOR_ASIX is not set
# CONFIG_NET_VENDOR_ATHEROS is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_BROCADE is not set
@ -762,12 +763,14 @@ CONFIG_PRIME_NUMBERS=m
CONFIG_CRC4=m
CONFIG_CRC7=m
CONFIG_CRC8=m
CONFIG_XZ_DEC_MICROLZMA=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0
CONFIG_PRINTK_TIME=y
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_DEBUG_INFO_BTF=y
CONFIG_GDB_SCRIPTS=y
CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_MAGIC_SYSRQ=y
@ -792,9 +795,11 @@ CONFIG_HIST_TRIGGERS=y
CONFIG_SAMPLES=y
CONFIG_SAMPLE_TRACE_PRINTK=m
CONFIG_SAMPLE_FTRACE_DIRECT=m
CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m
CONFIG_KUNIT=m
CONFIG_KUNIT_DEBUGFS=y
CONFIG_LKDTM=m
CONFIG_KPROBES_SANITY_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m

View File

@ -65,9 +65,11 @@ CONFIG_ZFCP=y
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_LSM="yama,loadpin,safesetid,integrity"
# CONFIG_ZLIB_DFLTCC is not set
CONFIG_XZ_DEC_MICROLZMA=y
CONFIG_PRINTK_TIME=y
# CONFIG_SYMBOLIC_ERRNAME is not set
CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_INFO_BTF=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y
CONFIG_PANIC_ON_OOPS=y

View File

@ -14,12 +14,13 @@
/* I/O Map */
#define ZPCI_IOMAP_SHIFT 48
#define ZPCI_IOMAP_ADDR_BASE 0x8000000000000000UL
#define ZPCI_IOMAP_ADDR_SHIFT 62
#define ZPCI_IOMAP_ADDR_BASE (1UL << ZPCI_IOMAP_ADDR_SHIFT)
#define ZPCI_IOMAP_ADDR_OFF_MASK ((1UL << ZPCI_IOMAP_SHIFT) - 1)
#define ZPCI_IOMAP_MAX_ENTRIES \
((ULONG_MAX - ZPCI_IOMAP_ADDR_BASE + 1) / (1UL << ZPCI_IOMAP_SHIFT))
(1UL << (ZPCI_IOMAP_ADDR_SHIFT - ZPCI_IOMAP_SHIFT))
#define ZPCI_IOMAP_ADDR_IDX_MASK \
(~ZPCI_IOMAP_ADDR_OFF_MASK - ZPCI_IOMAP_ADDR_BASE)
((ZPCI_IOMAP_ADDR_BASE - 1) & ~ZPCI_IOMAP_ADDR_OFF_MASK)
struct zpci_iomap_entry {
u32 fh;

View File

@ -173,10 +173,11 @@ static noinline int unwindme_func4(struct unwindme *u)
}
/*
* trigger specification exception
* Trigger operation exception; use insn notation to bypass
* llvm's integrated assembler sanity checks.
*/
asm volatile(
" mvcl %%r1,%%r1\n"
" .insn e,0x0000\n" /* illegal opcode */
"0: nopr %%r7\n"
EX_TABLE(0b, 0b)
:);

View File

@ -1932,6 +1932,7 @@ config EFI
depends on ACPI
select UCS2_STRING
select EFI_RUNTIME_WRAPPERS
select ARCH_USE_MEMREMAP_PROT
help
This enables the kernel to use EFI runtime services that are
available (such as the EFI variable services).

View File

@ -574,6 +574,10 @@ SYM_INNER_LABEL(swapgs_restore_regs_and_return_to_usermode, SYM_L_GLOBAL)
ud2
1:
#endif
#ifdef CONFIG_XEN_PV
ALTERNATIVE "", "jmp xenpv_restore_regs_and_return_to_usermode", X86_FEATURE_XENPV
#endif
POP_REGS pop_rdi=0
/*
@ -890,6 +894,7 @@ SYM_CODE_START_LOCAL(paranoid_entry)
.Lparanoid_entry_checkgs:
/* EBX = 1 -> kernel GSBASE active, no restore required */
movl $1, %ebx
/*
* The kernel-enforced convention is a negative GSBASE indicates
* a kernel value. No SWAPGS needed on entry and exit.
@ -897,21 +902,14 @@ SYM_CODE_START_LOCAL(paranoid_entry)
movl $MSR_GS_BASE, %ecx
rdmsr
testl %edx, %edx
jns .Lparanoid_entry_swapgs
ret
.Lparanoid_entry_swapgs:
swapgs
/*
* The above SAVE_AND_SWITCH_TO_KERNEL_CR3 macro doesn't do an
* unconditional CR3 write, even in the PTI case. So do an lfence
* to prevent GS speculation, regardless of whether PTI is enabled.
*/
FENCE_SWAPGS_KERNEL_ENTRY
js .Lparanoid_kernel_gsbase
/* EBX = 0 -> SWAPGS required on exit */
xorl %ebx, %ebx
swapgs
.Lparanoid_kernel_gsbase:
FENCE_SWAPGS_KERNEL_ENTRY
ret
SYM_CODE_END(paranoid_entry)
@ -993,11 +991,6 @@ SYM_CODE_START_LOCAL(error_entry)
pushq %r12
ret
.Lerror_entry_done_lfence:
FENCE_SWAPGS_KERNEL_ENTRY
.Lerror_entry_done:
ret
/*
* There are two places in the kernel that can potentially fault with
* usergs. Handle them here. B stepping K8s sometimes report a
@ -1020,8 +1013,14 @@ SYM_CODE_START_LOCAL(error_entry)
* .Lgs_change's error handler with kernel gsbase.
*/
SWAPGS
FENCE_SWAPGS_USER_ENTRY
jmp .Lerror_entry_done
/*
* Issue an LFENCE to prevent GS speculation, regardless of whether it is a
* kernel or user gsbase.
*/
.Lerror_entry_done_lfence:
FENCE_SWAPGS_KERNEL_ENTRY
ret
.Lbstep_iret:
/* Fix truncated RIP */

View File

@ -108,7 +108,7 @@
#define INTEL_FAM6_ALDERLAKE 0x97 /* Golden Cove / Gracemont */
#define INTEL_FAM6_ALDERLAKE_L 0x9A /* Golden Cove / Gracemont */
#define INTEL_FAM6_RAPTOR_LAKE 0xB7
#define INTEL_FAM6_RAPTORLAKE 0xB7
/* "Small Core" Processors (Atom) */

View File

@ -1036,6 +1036,7 @@ struct kvm_x86_msr_filter {
#define APICV_INHIBIT_REASON_PIT_REINJ 4
#define APICV_INHIBIT_REASON_X2APIC 5
#define APICV_INHIBIT_REASON_BLOCKIRQ 6
#define APICV_INHIBIT_REASON_ABSENT 7
struct kvm_arch {
unsigned long n_used_mmu_pages;

View File

@ -73,4 +73,15 @@
#define GHCB_RESP_CODE(v) ((v) & GHCB_MSR_INFO_MASK)
/*
* Error codes related to GHCB input that can be communicated back to the guest
* by setting the lower 32-bits of the GHCB SW_EXITINFO1 field to 2.
*/
#define GHCB_ERR_NOT_REGISTERED 1
#define GHCB_ERR_INVALID_USAGE 2
#define GHCB_ERR_INVALID_SCRATCH_AREA 3
#define GHCB_ERR_MISSING_INPUT 4
#define GHCB_ERR_INVALID_INPUT 5
#define GHCB_ERR_INVALID_EVENT 6
#endif

View File

@ -118,7 +118,7 @@ static inline bool save_xstate_epilog(void __user *buf, int ia32_frame,
struct fpstate *fpstate)
{
struct xregs_state __user *x = buf;
struct _fpx_sw_bytes sw_bytes;
struct _fpx_sw_bytes sw_bytes = {};
u32 xfeatures;
int err;

View File

@ -294,11 +294,6 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
char *dst, char *buf, size_t size)
{
unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
char __user *target = (char __user *)dst;
u64 d8;
u32 d4;
u16 d2;
u8 d1;
/*
* This function uses __put_user() independent of whether kernel or user
@ -320,26 +315,42 @@ static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
* instructions here would cause infinite nesting.
*/
switch (size) {
case 1:
case 1: {
u8 d1;
u8 __user *target = (u8 __user *)dst;
memcpy(&d1, buf, 1);
if (__put_user(d1, target))
goto fault;
break;
case 2:
}
case 2: {
u16 d2;
u16 __user *target = (u16 __user *)dst;
memcpy(&d2, buf, 2);
if (__put_user(d2, target))
goto fault;
break;
case 4:
}
case 4: {
u32 d4;
u32 __user *target = (u32 __user *)dst;
memcpy(&d4, buf, 4);
if (__put_user(d4, target))
goto fault;
break;
case 8:
}
case 8: {
u64 d8;
u64 __user *target = (u64 __user *)dst;
memcpy(&d8, buf, 8);
if (__put_user(d8, target))
goto fault;
break;
}
default:
WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
return ES_UNSUPPORTED;
@ -362,11 +373,6 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
char *src, char *buf, size_t size)
{
unsigned long error_code = X86_PF_PROT;
char __user *s = (char __user *)src;
u64 d8;
u32 d4;
u16 d2;
u8 d1;
/*
* This function uses __get_user() independent of whether kernel or user
@ -388,26 +394,41 @@ static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
* instructions here would cause infinite nesting.
*/
switch (size) {
case 1:
case 1: {
u8 d1;
u8 __user *s = (u8 __user *)src;
if (__get_user(d1, s))
goto fault;
memcpy(buf, &d1, 1);
break;
case 2:
}
case 2: {
u16 d2;
u16 __user *s = (u16 __user *)src;
if (__get_user(d2, s))
goto fault;
memcpy(buf, &d2, 2);
break;
case 4:
}
case 4: {
u32 d4;
u32 __user *s = (u32 __user *)src;
if (__get_user(d4, s))
goto fault;
memcpy(buf, &d4, 4);
break;
case 8:
}
case 8: {
u64 d8;
u64 __user *s = (u64 __user *)src;
if (__get_user(d8, s))
goto fault;
memcpy(buf, &d8, 8);
break;
}
default:
WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
return ES_UNSUPPORTED;

View File

@ -1180,6 +1180,12 @@ void mark_tsc_unstable(char *reason)
EXPORT_SYMBOL_GPL(mark_tsc_unstable);
static void __init tsc_disable_clocksource_watchdog(void)
{
clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
}
static void __init check_system_tsc_reliable(void)
{
#if defined(CONFIG_MGEODEGX1) || defined(CONFIG_MGEODE_LX) || defined(CONFIG_X86_GENERIC)
@ -1196,6 +1202,23 @@ static void __init check_system_tsc_reliable(void)
#endif
if (boot_cpu_has(X86_FEATURE_TSC_RELIABLE))
tsc_clocksource_reliable = 1;
/*
* Disable the clocksource watchdog when the system has:
* - TSC running at constant frequency
* - TSC which does not stop in C-States
* - the TSC_ADJUST register which allows to detect even minimal
* modifications
* - not more than two sockets. As the number of sockets cannot be
* evaluated at the early boot stage where this has to be
* invoked, check the number of online memory nodes as a
* fallback solution which is an reasonable estimate.
*/
if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) &&
boot_cpu_has(X86_FEATURE_NONSTOP_TSC) &&
boot_cpu_has(X86_FEATURE_TSC_ADJUST) &&
nr_online_nodes <= 2)
tsc_disable_clocksource_watchdog();
}
/*
@ -1387,9 +1410,6 @@ static int __init init_tsc_clocksource(void)
if (tsc_unstable)
goto unreg;
if (tsc_clocksource_reliable || no_tsc_watchdog)
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
@ -1527,7 +1547,7 @@ void __init tsc_init(void)
}
if (tsc_clocksource_reliable || no_tsc_watchdog)
clocksource_tsc_early.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
tsc_disable_clocksource_watchdog();
clocksource_register_khz(&clocksource_tsc_early, tsc_khz);
detect_art();

View File

@ -30,6 +30,7 @@ struct tsc_adjust {
};
static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
static struct timer_list tsc_sync_check_timer;
/*
* TSC's on different sockets may be reset asynchronously.
@ -77,6 +78,46 @@ void tsc_verify_tsc_adjust(bool resume)
}
}
/*
* Normally the tsc_sync will be checked every time system enters idle
* state, but there is still caveat that a system won't enter idle,
* either because it's too busy or configured purposely to not enter
* idle.
*
* So setup a periodic timer (every 10 minutes) to make sure the check
* is always on.
*/
#define SYNC_CHECK_INTERVAL (HZ * 600)
static void tsc_sync_check_timer_fn(struct timer_list *unused)
{
int next_cpu;
tsc_verify_tsc_adjust(false);
/* Run the check for all onlined CPUs in turn */
next_cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask);
if (next_cpu >= nr_cpu_ids)
next_cpu = cpumask_first(cpu_online_mask);
tsc_sync_check_timer.expires += SYNC_CHECK_INTERVAL;
add_timer_on(&tsc_sync_check_timer, next_cpu);
}
static int __init start_sync_check_timer(void)
{
if (!cpu_feature_enabled(X86_FEATURE_TSC_ADJUST) || tsc_clocksource_reliable)
return 0;
timer_setup(&tsc_sync_check_timer, tsc_sync_check_timer_fn, 0);
tsc_sync_check_timer.expires = jiffies + SYNC_CHECK_INTERVAL;
add_timer(&tsc_sync_check_timer);
return 0;
}
late_initcall(start_sync_check_timer);
static void tsc_sanitize_first_cpu(struct tsc_adjust *cur, s64 bootval,
unsigned int cpu, bool bootcpu)
{

View File

@ -1936,7 +1936,11 @@ static void mmu_audit_disable(void) { }
static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
{
return sp->role.invalid ||
if (sp->role.invalid)
return true;
/* TDP MMU pages due not use the MMU generation. */
return !sp->tdp_mmu_page &&
unlikely(sp->mmu_valid_gen != kvm->arch.mmu_valid_gen);
}
@ -3976,6 +3980,20 @@ out_retry:
return true;
}
/*
* Returns true if the page fault is stale and needs to be retried, i.e. if the
* root was invalidated by a memslot update or a relevant mmu_notifier fired.
*/
static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
struct kvm_page_fault *fault, int mmu_seq)
{
if (is_obsolete_sp(vcpu->kvm, to_shadow_page(vcpu->arch.mmu->root_hpa)))
return true;
return fault->slot &&
mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva);
}
static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{
bool is_tdp_mmu_fault = is_tdp_mmu(vcpu->arch.mmu);
@ -4013,8 +4031,9 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
else
write_lock(&vcpu->kvm->mmu_lock);
if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva))
if (is_page_fault_stale(vcpu, fault, mmu_seq))
goto out_unlock;
r = make_mmu_pages_available(vcpu);
if (r)
goto out_unlock;

View File

@ -911,7 +911,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
r = RET_PF_RETRY;
write_lock(&vcpu->kvm->mmu_lock);
if (fault->slot && mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva))
if (is_page_fault_stale(vcpu, fault, mmu_seq))
goto out_unlock;
kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);

View File

@ -900,6 +900,7 @@ out:
bool svm_check_apicv_inhibit_reasons(ulong bit)
{
ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
BIT(APICV_INHIBIT_REASON_ABSENT) |
BIT(APICV_INHIBIT_REASON_HYPERV) |
BIT(APICV_INHIBIT_REASON_NESTED) |
BIT(APICV_INHIBIT_REASON_IRQWIN) |

View File

@ -281,7 +281,7 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
pmu->reserved_bits = 0xffffffff00200000ull;
pmu->reserved_bits = 0xfffffff000280000ull;
pmu->version = 1;
/* not applicable to AMD; but clean them to prevent any fall out */
pmu->counter_bitmask[KVM_PMC_FIXED] = 0;

View File

@ -2260,7 +2260,7 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu)
__free_page(virt_to_page(svm->sev_es.vmsa));
if (svm->sev_es.ghcb_sa_free)
kfree(svm->sev_es.ghcb_sa);
kvfree(svm->sev_es.ghcb_sa);
}
static void dump_ghcb(struct vcpu_svm *svm)
@ -2352,24 +2352,29 @@ static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
}
static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
static bool sev_es_validate_vmgexit(struct vcpu_svm *svm)
{
struct kvm_vcpu *vcpu;
struct ghcb *ghcb;
u64 exit_code = 0;
u64 exit_code;
u64 reason;
ghcb = svm->sev_es.ghcb;
/* Only GHCB Usage code 0 is supported */
if (ghcb->ghcb_usage)
goto vmgexit_err;
/*
* Retrieve the exit code now even though is may not be marked valid
* Retrieve the exit code now even though it may not be marked valid
* as it could help with debugging.
*/
exit_code = ghcb_get_sw_exit_code(ghcb);
/* Only GHCB Usage code 0 is supported */
if (ghcb->ghcb_usage) {
reason = GHCB_ERR_INVALID_USAGE;
goto vmgexit_err;
}
reason = GHCB_ERR_MISSING_INPUT;
if (!ghcb_sw_exit_code_is_valid(ghcb) ||
!ghcb_sw_exit_info_1_is_valid(ghcb) ||
!ghcb_sw_exit_info_2_is_valid(ghcb))
@ -2448,30 +2453,34 @@ static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
case SVM_VMGEXIT_UNSUPPORTED_EVENT:
break;
default:
reason = GHCB_ERR_INVALID_EVENT;
goto vmgexit_err;
}
return 0;
return true;
vmgexit_err:
vcpu = &svm->vcpu;
if (ghcb->ghcb_usage) {
if (reason == GHCB_ERR_INVALID_USAGE) {
vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
ghcb->ghcb_usage);
} else if (reason == GHCB_ERR_INVALID_EVENT) {
vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n",
exit_code);
} else {
vcpu_unimpl(vcpu, "vmgexit: exit reason %#llx is not valid\n",
vcpu_unimpl(vcpu, "vmgexit: exit code %#llx input is not valid\n",
exit_code);
dump_ghcb(svm);
}
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_UNEXPECTED_EXIT_REASON;
vcpu->run->internal.ndata = 2;
vcpu->run->internal.data[0] = exit_code;
vcpu->run->internal.data[1] = vcpu->arch.last_vmentry_cpu;
/* Clear the valid entries fields */
memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
return -EINVAL;
ghcb_set_sw_exit_info_1(ghcb, 2);
ghcb_set_sw_exit_info_2(ghcb, reason);
return false;
}
void sev_es_unmap_ghcb(struct vcpu_svm *svm)
@ -2493,7 +2502,7 @@ void sev_es_unmap_ghcb(struct vcpu_svm *svm)
svm->sev_es.ghcb_sa_sync = false;
}
kfree(svm->sev_es.ghcb_sa);
kvfree(svm->sev_es.ghcb_sa);
svm->sev_es.ghcb_sa = NULL;
svm->sev_es.ghcb_sa_free = false;
}
@ -2541,14 +2550,14 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
scratch_gpa_beg = ghcb_get_sw_scratch(ghcb);
if (!scratch_gpa_beg) {
pr_err("vmgexit: scratch gpa not provided\n");
return false;
goto e_scratch;
}
scratch_gpa_end = scratch_gpa_beg + len;
if (scratch_gpa_end < scratch_gpa_beg) {
pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
len, scratch_gpa_beg);
return false;
goto e_scratch;
}
if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
@ -2566,7 +2575,7 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
scratch_gpa_end > ghcb_scratch_end) {
pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
scratch_gpa_beg, scratch_gpa_end);
return false;
goto e_scratch;
}
scratch_va = (void *)svm->sev_es.ghcb;
@ -2579,18 +2588,18 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
if (len > GHCB_SCRATCH_AREA_LIMIT) {
pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
len, GHCB_SCRATCH_AREA_LIMIT);
return false;
goto e_scratch;
}
scratch_va = kzalloc(len, GFP_KERNEL_ACCOUNT);
scratch_va = kvzalloc(len, GFP_KERNEL_ACCOUNT);
if (!scratch_va)
return false;
goto e_scratch;
if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
/* Unable to copy scratch area from guest */
pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
kfree(scratch_va);
return false;
kvfree(scratch_va);
goto e_scratch;
}
/*
@ -2607,6 +2616,12 @@ static bool setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
svm->sev_es.ghcb_sa_len = len;
return true;
e_scratch:
ghcb_set_sw_exit_info_1(ghcb, 2);
ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_SCRATCH_AREA);
return false;
}
static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
@ -2657,7 +2672,7 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
if (!ret) {
ret = -EINVAL;
/* Error, keep GHCB MSR value as-is */
break;
}
@ -2693,10 +2708,13 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
GHCB_MSR_TERM_REASON_POS);
pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
reason_set, reason_code);
fallthrough;
ret = -EINVAL;
break;
}
default:
ret = -EINVAL;
/* Error, keep GHCB MSR value as-is */
break;
}
trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
@ -2720,14 +2738,18 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
if (!ghcb_gpa) {
vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
return -EINVAL;
/* Without a GHCB, just return right back to the guest */
return 1;
}
if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
/* Unable to map GHCB from guest */
vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
ghcb_gpa);
return -EINVAL;
/* Without a GHCB, just return right back to the guest */
return 1;
}
svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
@ -2737,15 +2759,14 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
exit_code = ghcb_get_sw_exit_code(ghcb);
ret = sev_es_validate_vmgexit(svm);
if (ret)
return ret;
if (!sev_es_validate_vmgexit(svm))
return 1;
sev_es_sync_from_ghcb(svm);
ghcb_set_sw_exit_info_1(ghcb, 0);
ghcb_set_sw_exit_info_2(ghcb, 0);
ret = -EINVAL;
ret = 1;
switch (exit_code) {
case SVM_VMGEXIT_MMIO_READ:
if (!setup_vmgexit_scratch(svm, true, control->exit_info_2))
@ -2786,20 +2807,17 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
default:
pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
control->exit_info_1);
ghcb_set_sw_exit_info_1(ghcb, 1);
ghcb_set_sw_exit_info_2(ghcb,
X86_TRAP_UD |
SVM_EVTINJ_TYPE_EXEPT |
SVM_EVTINJ_VALID);
ghcb_set_sw_exit_info_1(ghcb, 2);
ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_INPUT);
}
ret = 1;
break;
}
case SVM_VMGEXIT_UNSUPPORTED_EVENT:
vcpu_unimpl(vcpu,
"vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
control->exit_info_1, control->exit_info_2);
ret = -EINVAL;
break;
default:
ret = svm_invoke_exit_handler(vcpu, exit_code);
@ -2821,7 +2839,7 @@ int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
return -EINVAL;
if (!setup_vmgexit_scratch(svm, in, bytes))
return -EINVAL;
return 1;
return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
count, in);

View File

@ -2591,8 +2591,10 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
vmcs12->guest_ia32_perf_global_ctrl)))
vmcs12->guest_ia32_perf_global_ctrl))) {
*entry_failure_code = ENTRY_FAIL_DEFAULT;
return -EINVAL;
}
kvm_rsp_write(vcpu, vmcs12->guest_rsp);
kvm_rip_write(vcpu, vmcs12->guest_rip);

View File

@ -7525,6 +7525,7 @@ static void hardware_unsetup(void)
static bool vmx_check_apicv_inhibit_reasons(ulong bit)
{
ulong supported = BIT(APICV_INHIBIT_REASON_DISABLE) |
BIT(APICV_INHIBIT_REASON_ABSENT) |
BIT(APICV_INHIBIT_REASON_HYPERV) |
BIT(APICV_INHIBIT_REASON_BLOCKIRQ);

View File

@ -5740,6 +5740,7 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
smp_wmb();
kvm->arch.irqchip_mode = KVM_IRQCHIP_SPLIT;
kvm->arch.nr_reserved_ioapic_pins = cap->args[0];
kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT);
r = 0;
split_irqchip_unlock:
mutex_unlock(&kvm->lock);
@ -6120,6 +6121,7 @@ set_identity_unlock:
/* Write kvm->irq_routing before enabling irqchip_in_kernel. */
smp_wmb();
kvm->arch.irqchip_mode = KVM_IRQCHIP_KERNEL;
kvm_request_apicv_update(kvm, true, APICV_INHIBIT_REASON_ABSENT);
create_irqchip_unlock:
mutex_unlock(&kvm->lock);
break;
@ -8818,10 +8820,9 @@ static void kvm_apicv_init(struct kvm *kvm)
{
init_rwsem(&kvm->arch.apicv_update_lock);
if (enable_apicv)
clear_bit(APICV_INHIBIT_REASON_DISABLE,
&kvm->arch.apicv_inhibit_reasons);
else
set_bit(APICV_INHIBIT_REASON_ABSENT,
&kvm->arch.apicv_inhibit_reasons);
if (!enable_apicv)
set_bit(APICV_INHIBIT_REASON_DISABLE,
&kvm->arch.apicv_inhibit_reasons);
}

View File

@ -277,7 +277,8 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
return;
}
new = early_memremap(data.phys_map, data.size);
new = early_memremap_prot(data.phys_map, data.size,
pgprot_val(pgprot_encrypted(FIXMAP_PAGE_NORMAL)));
if (!new) {
pr_err("Failed to map new boot services memmap\n");
return;

View File

@ -72,6 +72,7 @@ static void __init setup_real_mode(void)
#ifdef CONFIG_X86_64
u64 *trampoline_pgd;
u64 efer;
int i;
#endif
base = (unsigned char *)real_mode_header;
@ -128,8 +129,17 @@ static void __init setup_real_mode(void)
trampoline_header->flags = 0;
trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
/* Map the real mode stub as virtual == physical */
trampoline_pgd[0] = trampoline_pgd_entry.pgd;
trampoline_pgd[511] = init_top_pgt[511].pgd;
/*
* Include the entirety of the kernel mapping into the trampoline
* PGD. This way, all mappings present in the normal kernel page
* tables are usable while running on trampoline_pgd.
*/
for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++)
trampoline_pgd[i] = init_top_pgt[i].pgd;
#endif
sme_sev_setup_real_mode(trampoline_header);

View File

@ -20,6 +20,7 @@
#include <linux/init.h>
#include <linux/linkage.h>
#include <../entry/calling.h>
.pushsection .noinstr.text, "ax"
/*
@ -192,6 +193,25 @@ SYM_CODE_START(xen_iret)
jmp hypercall_iret
SYM_CODE_END(xen_iret)
/*
* XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is
* also the kernel stack. Reusing swapgs_restore_regs_and_return_to_usermode()
* in XEN pv would cause %rsp to move up to the top of the kernel stack and
* leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI
* interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET
* frame at the same address is useless.
*/
SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode)
UNWIND_HINT_REGS
POP_REGS
/* stackleak_erase() can work safely on the kernel stack. */
STACKLEAK_ERASE_NOCLOBBER
addq $8, %rsp /* skip regs->orig_ax */
jmp xen_iret
SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode)
/*
* Xen handles syscall callbacks much like ordinary exceptions, which
* means we have:

View File

@ -15,6 +15,7 @@
#include <linux/falloc.h>
#include <linux/suspend.h>
#include <linux/fs.h>
#include <linux/module.h>
#include "blk.h"
static inline struct inode *bdev_file_inode(struct file *file)

View File

@ -827,7 +827,7 @@ static ssize_t ata_scsi_lpm_show(struct device *dev,
if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names))
return -EINVAL;
return snprintf(buf, PAGE_SIZE, "%s\n",
return sysfs_emit(buf, "%s\n",
ata_lpm_policy_names[ap->target_lpm_policy]);
}
DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR,

View File

@ -55,14 +55,14 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
/* Transfer multiple of 2 bytes */
if (rw == READ) {
if (swap)
raw_insw_swapw((u16 *)data_addr, (u16 *)buf, words);
raw_insw_swapw(data_addr, (u16 *)buf, words);
else
raw_insw((u16 *)data_addr, (u16 *)buf, words);
raw_insw(data_addr, (u16 *)buf, words);
} else {
if (swap)
raw_outsw_swapw((u16 *)data_addr, (u16 *)buf, words);
raw_outsw_swapw(data_addr, (u16 *)buf, words);
else
raw_outsw((u16 *)data_addr, (u16 *)buf, words);
raw_outsw(data_addr, (u16 *)buf, words);
}
/* Transfer trailing byte, if any. */
@ -74,16 +74,16 @@ static unsigned int pata_falcon_data_xfer(struct ata_queued_cmd *qc,
if (rw == READ) {
if (swap)
raw_insw_swapw((u16 *)data_addr, (u16 *)pad, 1);
raw_insw_swapw(data_addr, (u16 *)pad, 1);
else
raw_insw((u16 *)data_addr, (u16 *)pad, 1);
raw_insw(data_addr, (u16 *)pad, 1);
*buf = pad[0];
} else {
pad[0] = *buf;
if (swap)
raw_outsw_swapw((u16 *)data_addr, (u16 *)pad, 1);
raw_outsw_swapw(data_addr, (u16 *)pad, 1);
else
raw_outsw((u16 *)data_addr, (u16 *)pad, 1);
raw_outsw(data_addr, (u16 *)pad, 1);
}
words++;
}

View File

@ -1394,6 +1394,14 @@ static int sata_fsl_init_controller(struct ata_host *host)
return 0;
}
static void sata_fsl_host_stop(struct ata_host *host)
{
struct sata_fsl_host_priv *host_priv = host->private_data;
iounmap(host_priv->hcr_base);
kfree(host_priv);
}
/*
* scsi mid-layer and libata interface structures
*/
@ -1426,6 +1434,8 @@ static struct ata_port_operations sata_fsl_ops = {
.port_start = sata_fsl_port_start,
.port_stop = sata_fsl_port_stop,
.host_stop = sata_fsl_host_stop,
.pmp_attach = sata_fsl_pmp_attach,
.pmp_detach = sata_fsl_pmp_detach,
};
@ -1480,9 +1490,9 @@ static int sata_fsl_probe(struct platform_device *ofdev)
host_priv->ssr_base = ssr_base;
host_priv->csr_base = csr_base;
irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
if (!irq) {
dev_err(&ofdev->dev, "invalid irq from platform\n");
irq = platform_get_irq(ofdev, 0);
if (irq < 0) {
retval = irq;
goto error_exit_with_cleanup;
}
host_priv->irq = irq;
@ -1557,10 +1567,6 @@ static int sata_fsl_remove(struct platform_device *ofdev)
ata_host_detach(host);
irq_dispose_mapping(host_priv->irq);
iounmap(host_priv->hcr_base);
kfree(host_priv);
return 0;
}

View File

@ -2103,7 +2103,7 @@ static int loop_control_remove(int idx)
int ret;
if (idx < 0) {
pr_warn("deleting an unspecified loop device is not supported.\n");
pr_warn_once("deleting an unspecified loop device is not supported.\n");
return -EINVAL;
}

View File

@ -281,7 +281,7 @@ agp_ioc_init(void __iomem *ioc_regs)
return 0;
}
static int
static int __init
lba_find_capability(int cap)
{
struct _parisc_agp_info *info = &parisc_agp_info;
@ -366,7 +366,7 @@ fail:
return error;
}
static int
static int __init
find_quicksilver(struct device *dev, void *data)
{
struct parisc_device **lba = data;
@ -378,7 +378,7 @@ find_quicksilver(struct device *dev, void *data)
return 0;
}
static int
static int __init
parisc_agp_init(void)
{
extern struct sba_device *sba_list;

View File

@ -1004,10 +1004,9 @@ static struct kobj_type ktype_cpufreq = {
.release = cpufreq_sysfs_release,
};
static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu)
static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
struct device *dev)
{
struct device *dev = get_cpu_device(cpu);
if (unlikely(!dev))
return;
@ -1296,8 +1295,9 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
if (policy->max_freq_req) {
/*
* CPUFREQ_CREATE_POLICY notification is sent only after
* successfully adding max_freq_req request.
* Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY
* notification, since CPUFREQ_CREATE_POLICY notification was
* sent after adding max_freq_req earlier.
*/
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_REMOVE_POLICY, policy);
@ -1391,7 +1391,7 @@ static int cpufreq_online(unsigned int cpu)
if (new_policy) {
for_each_cpu(j, policy->related_cpus) {
per_cpu(cpufreq_cpu_data, j) = policy;
add_cpu_dev_symlink(policy, j);
add_cpu_dev_symlink(policy, j, get_cpu_device(j));
}
policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
@ -1565,7 +1565,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
/* Create sysfs link on CPU registration */
policy = per_cpu(cpufreq_cpu_data, cpu);
if (policy)
add_cpu_dev_symlink(policy, cpu);
add_cpu_dev_symlink(policy, cpu, dev);
return 0;
}

View File

@ -290,7 +290,7 @@ static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
int i;
table = &buffer->sg_table;
for_each_sg(table->sgl, sg, table->nents, i) {
for_each_sgtable_sg(table, sg, i) {
struct page *page = sg_page(sg);
__free_pages(page, compound_order(page));

View File

@ -1396,7 +1396,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
struct sg_table *sg = NULL;
uint64_t user_addr = 0;
struct amdgpu_bo *bo;
struct drm_gem_object *gobj;
struct drm_gem_object *gobj = NULL;
u32 domain, alloc_domain;
u64 alloc_flags;
int ret;
@ -1506,14 +1506,16 @@ allocate_init_user_pages_failed:
remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
drm_vma_node_revoke(&gobj->vma_node, drm_priv);
err_node_allow:
drm_gem_object_put(gobj);
/* Don't unreserve system mem limit twice */
goto err_reserve_limit;
err_bo_create:
unreserve_mem_limit(adev, size, alloc_domain, !!sg);
err_reserve_limit:
mutex_destroy(&(*mem)->lock);
kfree(*mem);
if (gobj)
drm_gem_object_put(gobj);
else
kfree(*mem);
err:
if (sg) {
sg_free_table(sg);

View File

@ -3833,7 +3833,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
/* disable all interrupts */
amdgpu_irq_disable_all(adev);
if (adev->mode_info.mode_config_initialized){
if (!amdgpu_device_has_dc_support(adev))
if (!drm_drv_uses_atomic_modeset(adev_to_drm(adev)))
drm_helper_force_disable_all(adev_to_drm(adev));
else
drm_atomic_helper_shutdown(adev_to_drm(adev));
@ -4289,6 +4289,8 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
{
int r;
amdgpu_amdkfd_pre_reset(adev);
if (from_hypervisor)
r = amdgpu_virt_request_full_gpu(adev, true);
else
@ -4316,6 +4318,7 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
amdgpu_irq_gpu_reset_resume_helper(adev);
r = amdgpu_ib_ring_tests(adev);
amdgpu_amdkfd_post_reset(adev);
error:
if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {
@ -5030,7 +5033,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
cancel_delayed_work_sync(&tmp_adev->delayed_init_work);
amdgpu_amdkfd_pre_reset(tmp_adev);
if (!amdgpu_sriov_vf(tmp_adev))
amdgpu_amdkfd_pre_reset(tmp_adev);
/*
* Mark these ASICs to be reseted as untracked first
@ -5129,7 +5133,7 @@ skip_hw_reset:
drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res);
}
if (!amdgpu_device_has_dc_support(tmp_adev) && !job_signaled) {
if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) {
drm_helper_resume_force_mode(adev_to_drm(tmp_adev));
}
@ -5148,9 +5152,9 @@ skip_hw_reset:
skip_sched_resume:
list_for_each_entry(tmp_adev, device_list_handle, reset_list) {
/* unlock kfd */
if (!need_emergency_restart)
amdgpu_amdkfd_post_reset(tmp_adev);
/* unlock kfd: SRIOV would do it separately */
if (!need_emergency_restart && !amdgpu_sriov_vf(tmp_adev))
amdgpu_amdkfd_post_reset(tmp_adev);
/* kfd_post_reset will do nothing if kfd device is not initialized,
* need to bring up kfd here if it's not be initialized before

View File

@ -157,6 +157,8 @@ static int hw_id_map[MAX_HWIP] = {
[HDP_HWIP] = HDP_HWID,
[SDMA0_HWIP] = SDMA0_HWID,
[SDMA1_HWIP] = SDMA1_HWID,
[SDMA2_HWIP] = SDMA2_HWID,
[SDMA3_HWIP] = SDMA3_HWID,
[MMHUB_HWIP] = MMHUB_HWID,
[ATHUB_HWIP] = ATHUB_HWID,
[NBIO_HWIP] = NBIF_HWID,
@ -918,6 +920,7 @@ static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(3, 0, 64):
case IP_VERSION(3, 1, 1):
case IP_VERSION(3, 0, 2):
case IP_VERSION(3, 0, 192):
amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
if (!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);

View File

@ -135,6 +135,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
break;
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 64):
case IP_VERSION(3, 0, 192):
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
fw_name = FIRMWARE_SIENNA_CICHLID;
else

View File

@ -504,8 +504,8 @@ static int amdgpu_vkms_sw_fini(void *handle)
int i = 0;
for (i = 0; i < adev->mode_info.num_crtc; i++)
if (adev->mode_info.crtcs[i])
hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer);
if (adev->amdgpu_vkms_output[i].vblank_hrtimer.function)
hrtimer_cancel(&adev->amdgpu_vkms_output[i].vblank_hrtimer);
kfree(adev->mode_info.bios_hardcoded_edid);
kfree(adev->amdgpu_vkms_output);

View File

@ -4060,9 +4060,10 @@ static int gfx_v9_0_hw_fini(void *handle)
gfx_v9_0_cp_enable(adev, false);
/* Skip suspend with A+A reset */
if (adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) {
dev_dbg(adev->dev, "Device in reset. Skipping RLC halt\n");
/* Skip stopping RLC with A+A reset or when RLC controls GFX clock */
if ((adev->gmc.xgmi.connected_to_cpu && amdgpu_in_reset(adev)) ||
(adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2))) {
dev_dbg(adev->dev, "Skipping RLC halt\n");
return 0;
}

View File

@ -183,6 +183,7 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
switch (adev->ip_versions[UVD_HWIP][0]) {
case IP_VERSION(3, 0, 0):
case IP_VERSION(3, 0, 64):
case IP_VERSION(3, 0, 192):
if (amdgpu_sriov_vf(adev)) {
if (encode)
*codecs = &sriov_sc_video_codecs_encode;

View File

@ -1574,7 +1574,6 @@ retry_flush_work:
static void svm_range_restore_work(struct work_struct *work)
{
struct delayed_work *dwork = to_delayed_work(work);
struct amdkfd_process_info *process_info;
struct svm_range_list *svms;
struct svm_range *prange;
struct kfd_process *p;
@ -1594,12 +1593,10 @@ static void svm_range_restore_work(struct work_struct *work)
* the lifetime of this thread, kfd_process and mm will be valid.
*/
p = container_of(svms, struct kfd_process, svms);
process_info = p->kgd_process_info;
mm = p->mm;
if (!mm)
return;
mutex_lock(&process_info->lock);
svm_range_list_lock_and_flush_work(svms, mm);
mutex_lock(&svms->lock);
@ -1652,7 +1649,6 @@ static void svm_range_restore_work(struct work_struct *work)
out_reschedule:
mutex_unlock(&svms->lock);
mmap_write_unlock(mm);
mutex_unlock(&process_info->lock);
/* If validation failed, reschedule another attempt */
if (evicted_ranges) {
@ -2614,6 +2610,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
if (atomic_read(&svms->drain_pagefaults)) {
pr_debug("draining retry fault, drop fault 0x%llx\n", addr);
r = 0;
goto out;
}
@ -2623,6 +2620,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
mm = get_task_mm(p->lead_thread);
if (!mm) {
pr_debug("svms 0x%p failed to get mm\n", svms);
r = 0;
goto out;
}
@ -2660,6 +2658,7 @@ retry_write_locked:
if (svm_range_skip_recover(prange)) {
amdgpu_gmc_filter_faults_remove(adev, addr, pasid);
r = 0;
goto out_unlock_range;
}
@ -2668,6 +2667,7 @@ retry_write_locked:
if (timestamp < AMDGPU_SVM_RANGE_RETRY_FAULT_PENDING) {
pr_debug("svms 0x%p [0x%lx %lx] already restored\n",
svms, prange->start, prange->last);
r = 0;
goto out_unlock_range;
}
@ -3177,7 +3177,6 @@ static int
svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
{
struct amdkfd_process_info *process_info = p->kgd_process_info;
struct mm_struct *mm = current->mm;
struct list_head update_list;
struct list_head insert_list;
@ -3196,8 +3195,6 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
svms = &p->svms;
mutex_lock(&process_info->lock);
svm_range_list_lock_and_flush_work(svms, mm);
r = svm_range_is_valid(p, start, size);
@ -3273,8 +3270,6 @@ out_unlock_range:
mutex_unlock(&svms->lock);
mmap_read_unlock(mm);
out:
mutex_unlock(&process_info->lock);
pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] done, r=%d\n", p->pasid,
&p->svms, start, start + size - 1, r);

View File

@ -314,6 +314,14 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
ret = -EINVAL;
goto cleanup;
}
if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) &&
(aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) {
DRM_DEBUG_DRIVER("No DP connector available for CRC source\n");
ret = -EINVAL;
goto cleanup;
}
}
#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)

View File

@ -36,6 +36,8 @@
#include "dm_helpers.h"
#include "dc_link_ddc.h"
#include "ddc_service_types.h"
#include "dpcd_defs.h"
#include "i2caux_interface.h"
#include "dmub_cmd.h"
@ -157,6 +159,16 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = {
};
#if defined(CONFIG_DRM_AMD_DC_DCN)
static bool needs_dsc_aux_workaround(struct dc_link *link)
{
if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
(link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) &&
link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2)
return true;
return false;
}
static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector)
{
struct dc_sink *dc_sink = aconnector->dc_sink;
@ -166,7 +178,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
u8 *dsc_branch_dec_caps = NULL;
aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port);
#if defined(CONFIG_HP_HOOK_WORKAROUND)
/*
* drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs
* because it only check the dsc/fec caps of the "port variable" and not the dock
@ -176,10 +188,10 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
* Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux
*
*/
if (!aconnector->dsc_aux && !port->parent->port_parent)
if (!aconnector->dsc_aux && !port->parent->port_parent &&
needs_dsc_aux_workaround(aconnector->dc_link))
aconnector->dsc_aux = &aconnector->mst_port->dm_dp_aux.aux;
#endif
if (!aconnector->dsc_aux)
return false;

View File

@ -758,6 +758,18 @@ static bool detect_dp(struct dc_link *link,
dal_ddc_service_set_transaction_type(link->ddc,
sink_caps->transaction_type);
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock
* reports DSC support.
*/
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
link->type == dc_connection_mst_branch &&
link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 &&
link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT &&
!link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around)
link->wa_flags.dpia_mst_dsc_always_on = true;
#endif
#if defined(CONFIG_DRM_AMD_DC_HDCP)
/* In case of fallback to SST when topology discovery below fails
* HDCP caps will be querried again later by the upper layer (caller
@ -1203,6 +1215,10 @@ static bool dc_link_detect_helper(struct dc_link *link,
LINK_INFO("link=%d, mst branch is now Disconnected\n",
link->link_index);
/* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */
if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
link->wa_flags.dpia_mst_dsc_always_on = false;
dm_helpers_dp_mst_stop_top_mgr(link->ctx, link);
link->mst_stream_alloc_table.stream_count = 0;

View File

@ -2138,7 +2138,7 @@ static enum link_training_result dp_perform_8b_10b_link_training(
}
for (lane = 0; lane < (uint8_t)lt_settings->link_settings.lane_count; lane++)
lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = VOLTAGE_SWING_LEVEL0;
lt_settings->dpcd_lane_settings[lane].raw = 0;
}
if (status == LINK_TRAINING_SUCCESS) {

View File

@ -1664,6 +1664,10 @@ bool dc_is_stream_unchanged(
if (old_stream->ignore_msa_timing_param != stream->ignore_msa_timing_param)
return false;
// Only Have Audio left to check whether it is same or not. This is a corner case for Tiled sinks
if (old_stream->audio_info.mode_count != stream->audio_info.mode_count)
return false;
return true;
}
@ -2252,16 +2256,6 @@ enum dc_status dc_validate_global_state(
if (!new_ctx)
return DC_ERROR_UNEXPECTED;
#if defined(CONFIG_DRM_AMD_DC_DCN)
/*
* Update link encoder to stream assignment.
* TODO: Split out reason allocation from validation.
*/
if (dc->res_pool->funcs->link_encs_assign && fast_validate == false)
dc->res_pool->funcs->link_encs_assign(
dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
#endif
if (dc->res_pool->funcs->validate_global) {
result = dc->res_pool->funcs->validate_global(dc, new_ctx);
@ -2313,6 +2307,16 @@ enum dc_status dc_validate_global_state(
if (!dc->res_pool->funcs->validate_bandwidth(dc, new_ctx, fast_validate))
result = DC_FAIL_BANDWIDTH_VALIDATE;
#if defined(CONFIG_DRM_AMD_DC_DCN)
/*
* Only update link encoder to stream assignment after bandwidth validation passed.
* TODO: Split out assignment and validation.
*/
if (result == DC_OK && dc->res_pool->funcs->link_encs_assign && fast_validate == false)
dc->res_pool->funcs->link_encs_assign(
dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
#endif
return result;
}

View File

@ -508,7 +508,8 @@ union dpia_debug_options {
uint32_t disable_dpia:1;
uint32_t force_non_lttpr:1;
uint32_t extend_aux_rd_interval:1;
uint32_t reserved:29;
uint32_t disable_mst_dsc_work_around:1;
uint32_t reserved:28;
} bits;
uint32_t raw;
};

View File

@ -191,6 +191,8 @@ struct dc_link {
bool dp_skip_DID2;
bool dp_skip_reset_segment;
bool dp_mot_reset_segment;
/* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */
bool dpia_mst_dsc_always_on;
} wa_flags;
struct link_mst_stream_allocation_table mst_stream_alloc_table;

View File

@ -1468,7 +1468,7 @@ static int smu_disable_dpms(struct smu_context *smu)
dev_err(adev->dev, "Failed to disable smu features.\n");
}
if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 0, 0) &&
if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(9, 4, 2) &&
adev->gfx.rlc.funcs->stop)
adev->gfx.rlc.funcs->stop(adev);

View File

@ -9,6 +9,7 @@
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
#ifdef CONFIG_X86
#include <asm/set_memory.h>

View File

@ -1640,6 +1640,9 @@ struct intel_dp {
struct intel_dp_pcon_frl frl;
struct intel_psr psr;
/* When we last wrote the OUI for eDP */
unsigned long last_oui_write;
};
enum lspcon_vendor {

View File

@ -29,6 +29,7 @@
#include <linux/i2c.h>
#include <linux/notifier.h>
#include <linux/slab.h>
#include <linux/timekeeping.h>
#include <linux/types.h>
#include <asm/byteorder.h>
@ -1955,6 +1956,16 @@ intel_edp_init_source_oui(struct intel_dp *intel_dp, bool careful)
if (drm_dp_dpcd_write(&intel_dp->aux, DP_SOURCE_OUI, oui, sizeof(oui)) < 0)
drm_err(&i915->drm, "Failed to write source OUI\n");
intel_dp->last_oui_write = jiffies;
}
void intel_dp_wait_source_oui(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
drm_dbg_kms(&i915->drm, "Performing OUI wait\n");
wait_remaining_ms_from_jiffies(intel_dp->last_oui_write, 30);
}
/* If the device supports it, try to set the power state appropriately */

View File

@ -119,4 +119,6 @@ void intel_dp_pcon_dsc_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_dp_phy_test(struct intel_encoder *encoder);
void intel_dp_wait_source_oui(struct intel_dp *intel_dp);
#endif /* __INTEL_DP_H__ */

View File

@ -36,6 +36,7 @@
#include "intel_backlight.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_aux_backlight.h"
/* TODO:
@ -106,6 +107,8 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
int ret;
u8 tcon_cap[4];
intel_dp_wait_source_oui(intel_dp);
ret = drm_dp_dpcd_read(aux, INTEL_EDP_HDR_TCON_CAP0, tcon_cap, sizeof(tcon_cap));
if (ret != sizeof(tcon_cap))
return false;
@ -204,6 +207,8 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
int ret;
u8 old_ctrl, ctrl;
intel_dp_wait_source_oui(intel_dp);
ret = drm_dp_dpcd_readb(&intel_dp->aux, INTEL_EDP_HDR_GETSET_CTRL_PARAMS, &old_ctrl);
if (ret != 1) {
drm_err(&i915->drm, "Failed to read current backlight control mode: %d\n", ret);
@ -293,6 +298,13 @@ intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_panel *panel = &connector->panel;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
if (!panel->backlight.edp.vesa.info.aux_enable) {
u32 pwm_level = intel_backlight_invert_pwm_level(connector,
panel->backlight.pwm_level_max);
panel->backlight.pwm_funcs->enable(crtc_state, conn_state, pwm_level);
}
drm_edp_backlight_enable(&intel_dp->aux, &panel->backlight.edp.vesa.info, level);
}
@ -304,6 +316,10 @@ static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
drm_edp_backlight_disable(&intel_dp->aux, &panel->backlight.edp.vesa.info);
if (!panel->backlight.edp.vesa.info.aux_enable)
panel->backlight.pwm_funcs->disable(old_conn_state,
intel_backlight_invert_pwm_level(connector, 0));
}
static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector, enum pipe pipe)
@ -321,6 +337,15 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
if (ret < 0)
return ret;
if (!panel->backlight.edp.vesa.info.aux_enable) {
ret = panel->backlight.pwm_funcs->setup(connector, pipe);
if (ret < 0) {
drm_err(&i915->drm,
"Failed to setup PWM backlight controls for eDP backlight: %d\n",
ret);
return ret;
}
}
panel->backlight.max = panel->backlight.edp.vesa.info.max;
panel->backlight.min = 0;
if (current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) {
@ -340,12 +365,7 @@ intel_dp_aux_supports_vesa_backlight(struct intel_connector *connector)
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
/* TODO: We currently only support AUX only backlight configurations, not backlights which
* require a mix of PWM and AUX controls to work. In the mean time, these machines typically
* work just fine using normal PWM controls anyway.
*/
if ((intel_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP) &&
drm_edp_backlight_supported(intel_dp->edp_dpcd)) {
if (drm_edp_backlight_supported(intel_dp->edp_dpcd)) {
drm_dbg_kms(&i915->drm, "AUX Backlight Control Supported!\n");
return true;
}

View File

@ -6,6 +6,7 @@
#include <linux/slab.h> /* fault-inject.h is not standalone! */
#include <linux/fault-inject.h>
#include <linux/sched/mm.h>
#include "gem/i915_gem_lmem.h"
#include "i915_trace.h"

View File

@ -621,13 +621,6 @@ static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
FF_MODE2_GS_TIMER_MASK,
FF_MODE2_GS_TIMER_224,
0, false);
/*
* Wa_14012131227:dg1
* Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p
*/
wa_masked_en(wal, GEN7_COMMON_SLICE_CHICKEN1,
GEN9_RHWO_OPTIMIZATION_DISABLE);
}
static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,

View File

@ -29,6 +29,7 @@
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <linux/sched/signal.h>
#include <linux/sched/mm.h>
#include "gem/i915_gem_context.h"
#include "gt/intel_breadcrumbs.h"

View File

@ -4,6 +4,7 @@
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>

View File

@ -4,8 +4,8 @@ config DRM_MSM
tristate "MSM DRM"
depends on DRM
depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST
depends on COMMON_CLK
depends on IOMMU_SUPPORT
depends on (OF && COMMON_CLK) || COMPILE_TEST
depends on QCOM_OCMEM || QCOM_OCMEM=n
depends on QCOM_LLCC || QCOM_LLCC=n
depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n

View File

@ -23,8 +23,10 @@ msm-y := \
hdmi/hdmi_i2c.o \
hdmi/hdmi_phy.o \
hdmi/hdmi_phy_8960.o \
hdmi/hdmi_phy_8996.o \
hdmi/hdmi_phy_8x60.o \
hdmi/hdmi_phy_8x74.o \
hdmi/hdmi_pll_8960.o \
edp/edp.o \
edp/edp_aux.o \
edp/edp_bridge.o \
@ -37,6 +39,7 @@ msm-y := \
disp/mdp4/mdp4_dtv_encoder.o \
disp/mdp4/mdp4_lcdc_encoder.o \
disp/mdp4/mdp4_lvds_connector.o \
disp/mdp4/mdp4_lvds_pll.o \
disp/mdp4/mdp4_irq.o \
disp/mdp4/mdp4_kms.o \
disp/mdp4/mdp4_plane.o \
@ -116,9 +119,6 @@ msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
dp/dp_audio.o
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o

View File

@ -1424,17 +1424,24 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
u32 gpu_scid, cntl1_regval = 0;
u32 cntl1_regval = 0;
if (IS_ERR(a6xx_gpu->llc_mmio))
return;
if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
gpu_scid &= 0x1f;
cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) |
(gpu_scid << 15) | (gpu_scid << 20);
/* On A660, the SCID programming for UCHE traffic is done in
* A6XX_GBIF_SCACHE_CNTL0[14:10]
*/
if (adreno_is_a660_family(adreno_gpu))
gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
(1 << 8), (gpu_scid << 10) | (1 << 8));
}
/*
@ -1471,13 +1478,6 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
}
gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval);
/* On A660, the SCID programming for UCHE traffic is done in
* A6XX_GBIF_SCACHE_CNTL0[14:10]
*/
if (adreno_is_a660_family(adreno_gpu))
gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
(1 << 8), (gpu_scid << 10) | (1 << 8));
}
static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
@ -1640,7 +1640,7 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
return (unsigned long)busy_time;
}
void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);

View File

@ -777,12 +777,12 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
2, sizeof(*a6xx_state->gmu_registers));
3, sizeof(*a6xx_state->gmu_registers));
if (!a6xx_state->gmu_registers)
return;
a6xx_state->nr_gmu_registers = 2;
a6xx_state->nr_gmu_registers = 3;
/* Get the CX GMU registers from AHB */
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],

View File

@ -33,6 +33,7 @@ struct dp_aux_private {
bool read;
bool no_send_addr;
bool no_send_stop;
bool initted;
u32 offset;
u32 segment;
@ -331,6 +332,10 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
}
mutex_lock(&aux->mutex);
if (!aux->initted) {
ret = -EIO;
goto exit;
}
dp_aux_update_offset_and_segment(aux, msg);
dp_aux_transfer_helper(aux, msg, true);
@ -380,6 +385,8 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
}
aux->cmd_busy = false;
exit:
mutex_unlock(&aux->mutex);
return ret;
@ -431,8 +438,13 @@ void dp_aux_init(struct drm_dp_aux *dp_aux)
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
mutex_lock(&aux->mutex);
dp_catalog_aux_enable(aux->catalog, true);
aux->retry_cnt = 0;
aux->initted = true;
mutex_unlock(&aux->mutex);
}
void dp_aux_deinit(struct drm_dp_aux *dp_aux)
@ -441,7 +453,12 @@ void dp_aux_deinit(struct drm_dp_aux *dp_aux)
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
mutex_lock(&aux->mutex);
aux->initted = false;
dp_catalog_aux_enable(aux->catalog, false);
mutex_unlock(&aux->mutex);
}
int dp_aux_register(struct drm_dp_aux *dp_aux)

View File

@ -1658,6 +1658,8 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
if (!prop) {
DRM_DEV_DEBUG(dev,
"failed to find data lane mapping, using default\n");
/* Set the number of date lanes to 4 by default. */
msm_host->num_data_lanes = 4;
return 0;
}

View File

@ -77,6 +77,7 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
goto free_priv;
pm_runtime_get_sync(&gpu->pdev->dev);
msm_gpu_hw_init(gpu);
show_priv->state = gpu->funcs->gpu_state_get(gpu);
pm_runtime_put_sync(&gpu->pdev->dev);

View File

@ -967,29 +967,18 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
return ret;
}
static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
struct drm_file *file)
static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
ktime_t timeout)
{
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_wait_fence *args = data;
ktime_t timeout = to_ktime(args->timeout);
struct msm_gpu_submitqueue *queue;
struct msm_gpu *gpu = priv->gpu;
struct dma_fence *fence;
int ret;
if (args->pad) {
DRM_ERROR("invalid pad: %08x\n", args->pad);
if (fence_id > queue->last_fence) {
DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
fence_id, queue->last_fence);
return -EINVAL;
}
if (!gpu)
return 0;
queue = msm_submitqueue_get(file->driver_priv, args->queueid);
if (!queue)
return -ENOENT;
/*
* Map submitqueue scoped "seqno" (which is actually an idr key)
* back to underlying dma-fence
@ -1001,7 +990,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
ret = mutex_lock_interruptible(&queue->lock);
if (ret)
return ret;
fence = idr_find(&queue->fence_idr, args->fence);
fence = idr_find(&queue->fence_idr, fence_id);
if (fence)
fence = dma_fence_get_rcu(fence);
mutex_unlock(&queue->lock);
@ -1017,6 +1006,32 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
}
dma_fence_put(fence);
return ret;
}
static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
struct drm_msm_wait_fence *args = data;
struct msm_gpu_submitqueue *queue;
int ret;
if (args->pad) {
DRM_ERROR("invalid pad: %08x\n", args->pad);
return -EINVAL;
}
if (!priv->gpu)
return 0;
queue = msm_submitqueue_get(file->driver_priv, args->queueid);
if (!queue)
return -ENOENT;
ret = wait_fence(queue, args->fence, to_ktime(args->timeout));
msm_submitqueue_put(queue);
return ret;

View File

@ -1056,8 +1056,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
return 0;
@ -1121,7 +1120,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
break;
fallthrough;
default:
DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
(flags & MSM_BO_CACHE_MASK));
return -EINVAL;
}

View File

@ -5,6 +5,7 @@
*/
#include <linux/vmalloc.h>
#include <linux/sched/mm.h>
#include "msm_drv.h"
#include "msm_gem.h"

View File

@ -772,6 +772,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
args->nr_cmds);
if (IS_ERR(submit)) {
ret = PTR_ERR(submit);
submit = NULL;
goto out_unlock;
}
@ -904,6 +905,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
drm_sched_entity_push_job(&submit->base);
args->fence = submit->fence_id;
queue->last_fence = submit->fence_id;
msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
msm_process_post_deps(post_deps, args->nr_out_syncobjs,

View File

@ -359,6 +359,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
* @ring_nr: the ringbuffer used by this submitqueue, which is determined
* by the submitqueue's priority
* @faults: the number of GPU hangs associated with this submitqueue
* @last_fence: the sequence number of the last allocated fence (for error
* checking)
* @ctx: the per-drm_file context associated with the submitqueue (ie.
* which set of pgtables do submits jobs associated with the
* submitqueue use)
@ -374,6 +376,7 @@ struct msm_gpu_submitqueue {
u32 flags;
u32 ring_nr;
int faults;
uint32_t last_fence;
struct msm_file_private *ctx;
struct list_head node;
struct idr fence_idr;

View File

@ -20,6 +20,10 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
struct msm_gpu *gpu = dev_to_gpu(dev);
struct dev_pm_opp *opp;
/*
* Note that devfreq_recommended_opp() can modify the freq
* to something that actually is in the opp table:
*/
opp = devfreq_recommended_opp(dev, freq, flags);
/*
@ -28,6 +32,7 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
*/
if (gpu->devfreq.idle_freq) {
gpu->devfreq.idle_freq = *freq;
dev_pm_opp_put(opp);
return 0;
}
@ -203,9 +208,6 @@ static void msm_devfreq_idle_work(struct kthread_work *work)
struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq);
unsigned long idle_freq, target_freq = 0;
if (!df->devfreq)
return;
/*
* Hold devfreq lock to synchronize with get_dev_status()/
* target() callbacks
@ -227,6 +229,9 @@ void msm_devfreq_idle(struct msm_gpu *gpu)
{
struct msm_gpu_devfreq *df = &gpu->devfreq;
if (!df->devfreq)
return;
msm_hrtimer_queue_work(&df->idle_work, ms_to_ktime(1),
HRTIMER_MODE_ABS);
HRTIMER_MODE_REL);
}

View File

@ -34,6 +34,7 @@
#include <linux/sched.h>
#include <linux/shmem_fs.h>
#include <linux/file.h>
#include <linux/module.h>
#include <drm/drm_cache.h>
#include <drm/ttm/ttm_bo_driver.h>

View File

@ -337,10 +337,10 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_device *dev = state->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct vc4_hvs *hvs = vc4->hvs;
struct drm_crtc_state *old_crtc_state;
struct drm_crtc_state *new_crtc_state;
struct drm_crtc *crtc;
struct vc4_hvs_state *old_hvs_state;
unsigned int channel;
int i;
for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
@ -353,30 +353,32 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state)
vc4_hvs_mask_underrun(dev, vc4_crtc_state->assigned_channel);
}
if (vc4->hvs->hvs5)
clk_set_min_rate(hvs->core_clk, 500000000);
old_hvs_state = vc4_hvs_get_old_global_state(state);
if (!old_hvs_state)
if (IS_ERR(old_hvs_state))
return;
for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
struct vc4_crtc_state *vc4_crtc_state =
to_vc4_crtc_state(old_crtc_state);
unsigned int channel = vc4_crtc_state->assigned_channel;
for (channel = 0; channel < HVS_NUM_CHANNELS; channel++) {
struct drm_crtc_commit *commit;
int ret;
if (channel == VC4_HVS_CHANNEL_DISABLED)
continue;
if (!old_hvs_state->fifo_state[channel].in_use)
continue;
ret = drm_crtc_commit_wait(old_hvs_state->fifo_state[channel].pending_commit);
commit = old_hvs_state->fifo_state[channel].pending_commit;
if (!commit)
continue;
ret = drm_crtc_commit_wait(commit);
if (ret)
drm_err(dev, "Timed out waiting for commit\n");
drm_crtc_commit_put(commit);
old_hvs_state->fifo_state[channel].pending_commit = NULL;
}
if (vc4->hvs->hvs5)
clk_set_min_rate(hvs->core_clk, 500000000);
drm_atomic_helper_commit_modeset_disables(dev, state);
vc4_ctm_commit(vc4, state);
@ -410,8 +412,8 @@ static int vc4_atomic_commit_setup(struct drm_atomic_state *state)
unsigned int i;
hvs_state = vc4_hvs_get_new_global_state(state);
if (!hvs_state)
return -EINVAL;
if (WARN_ON(IS_ERR(hvs_state)))
return PTR_ERR(hvs_state);
for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
struct vc4_crtc_state *vc4_crtc_state =
@ -668,12 +670,6 @@ vc4_hvs_channels_duplicate_state(struct drm_private_obj *obj)
for (i = 0; i < HVS_NUM_CHANNELS; i++) {
state->fifo_state[i].in_use = old_state->fifo_state[i].in_use;
if (!old_state->fifo_state[i].pending_commit)
continue;
state->fifo_state[i].pending_commit =
drm_crtc_commit_get(old_state->fifo_state[i].pending_commit);
}
return &state->base;
@ -762,8 +758,8 @@ static int vc4_pv_muxing_atomic_check(struct drm_device *dev,
unsigned int i;
hvs_new_state = vc4_hvs_get_global_state(state);
if (!hvs_new_state)
return -EINVAL;
if (IS_ERR(hvs_new_state))
return PTR_ERR(hvs_new_state);
for (i = 0; i < ARRAY_SIZE(hvs_new_state->fifo_state); i++)
if (!hvs_new_state->fifo_state[i].in_use)

View File

@ -157,36 +157,6 @@ static void virtio_gpu_config_changed(struct virtio_device *vdev)
schedule_work(&vgdev->config_changed_work);
}
static __poll_t virtio_gpu_poll(struct file *filp,
struct poll_table_struct *wait)
{
struct drm_file *drm_file = filp->private_data;
struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
struct drm_device *dev = drm_file->minor->dev;
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_pending_event *e = NULL;
__poll_t mask = 0;
if (!vgdev->has_virgl_3d || !vfpriv || !vfpriv->ring_idx_mask)
return drm_poll(filp, wait);
poll_wait(filp, &drm_file->event_wait, wait);
if (!list_empty(&drm_file->event_list)) {
spin_lock_irq(&dev->event_lock);
e = list_first_entry(&drm_file->event_list,
struct drm_pending_event, link);
drm_file->event_space += e->event->length;
list_del(&e->link);
spin_unlock_irq(&dev->event_lock);
kfree(e);
mask |= EPOLLIN | EPOLLRDNORM;
}
return mask;
}
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID },
{ 0 },
@ -226,17 +196,7 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>");
MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
MODULE_AUTHOR("Alon Levy");
static const struct file_operations virtio_gpu_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.compat_ioctl = drm_compat_ioctl,
.poll = virtio_gpu_poll,
.read = drm_read,
.llseek = noop_llseek,
.mmap = drm_gem_mmap
};
DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
static const struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,

View File

@ -138,7 +138,6 @@ struct virtio_gpu_fence_driver {
spinlock_t lock;
};
#define VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL 0x10000000
struct virtio_gpu_fence_event {
struct drm_pending_event base;
struct drm_event event;

View File

@ -54,7 +54,7 @@ static int virtio_gpu_fence_event_create(struct drm_device *dev,
if (!e)
return -ENOMEM;
e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL;
e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED;
e->event.length = sizeof(e->event);
ret = drm_event_reserve_init(dev, file, &e->base, &e->event);

View File

@ -207,14 +207,14 @@ config HID_CHERRY
config HID_CHICONY
tristate "Chicony devices"
depends on HID
depends on USB_HID
default !EXPERT
help
Support for Chicony Tactical pad and special keys on Chicony keyboards.
config HID_CORSAIR
tristate "Corsair devices"
depends on HID && USB && LEDS_CLASS
depends on USB_HID && LEDS_CLASS
help
Support for Corsair devices that are not fully compliant with the
HID standard.
@ -245,7 +245,7 @@ config HID_MACALLY
config HID_PRODIKEYS
tristate "Prodikeys PC-MIDI Keyboard support"
depends on HID && SND
depends on USB_HID && SND
select SND_RAWMIDI
help
Support for Prodikeys PC-MIDI Keyboard device support.
@ -560,7 +560,7 @@ config HID_LENOVO
config HID_LOGITECH
tristate "Logitech devices"
depends on HID
depends on USB_HID
depends on LEDS_CLASS
default !EXPERT
help
@ -951,7 +951,7 @@ config HID_SAITEK
config HID_SAMSUNG
tristate "Samsung InfraRed remote control or keyboards"
depends on HID
depends on USB_HID
help
Support for Samsung InfraRed remote control or keyboards.

View File

@ -1028,8 +1028,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (drvdata->quirks & QUIRK_IS_MULTITOUCH)
drvdata->tp = &asus_i2c_tp;
if ((drvdata->quirks & QUIRK_T100_KEYBOARD) &&
hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
if ((drvdata->quirks & QUIRK_T100_KEYBOARD) && hid_is_usb(hdev)) {
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
if (intf->altsetting->desc.bInterfaceNumber == T100_TPAD_INTF) {
@ -1057,8 +1056,7 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
drvdata->tp = &asus_t100chi_tp;
}
if ((drvdata->quirks & QUIRK_MEDION_E1239T) &&
hid_is_using_ll_driver(hdev, &usb_hid_driver)) {
if ((drvdata->quirks & QUIRK_MEDION_E1239T) && hid_is_usb(hdev)) {
struct usb_host_interface *alt =
to_usb_interface(hdev->dev.parent)->altsetting;

View File

@ -191,7 +191,7 @@ static void bigben_worker(struct work_struct *work)
struct bigben_device, worker);
struct hid_field *report_field = bigben->report->field[0];
if (bigben->removed)
if (bigben->removed || !report_field)
return;
if (bigben->work_led) {

View File

@ -114,6 +114,9 @@ static int ch_probe(struct hid_device *hdev, const struct hid_device_id *id)
{
int ret;
if (!hid_is_usb(hdev))
return -EINVAL;
hdev->quirks |= HID_QUIRK_INPUT_PER_APP;
ret = hid_parse(hdev);
if (ret) {

View File

@ -553,7 +553,12 @@ static int corsair_probe(struct hid_device *dev, const struct hid_device_id *id)
int ret;
unsigned long quirks = id->driver_data;
struct corsair_drvdata *drvdata;
struct usb_interface *usbif = to_usb_interface(dev->dev.parent);
struct usb_interface *usbif;
if (!hid_is_usb(dev))
return -EINVAL;
usbif = to_usb_interface(dev->dev.parent);
drvdata = devm_kzalloc(&dev->dev, sizeof(struct corsair_drvdata),
GFP_KERNEL);

View File

@ -50,7 +50,7 @@ struct elan_drvdata {
static int is_not_elan_touchpad(struct hid_device *hdev)
{
if (hdev->bus == BUS_USB) {
if (hid_is_usb(hdev)) {
struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
return (intf->altsetting->desc.bInterfaceNumber !=

Some files were not shown because too many files have changed in this diff Show More