forked from Minki/linux
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c21234e3a84
("net/mlx5e: Fix use after free in mlx5e_fs_init()")c7eafc5ed0
("net/mlx5e: Convert ethtool_steering member of flow_steering struct to pointer") https://lore.kernel.org/all/20220825104410.67d4709c@canb.auug.org.au/ https://lore.kernel.org/all/20220823055533.334471-1-saeed@kernel.org/ Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
880b0dd94f
@ -1,2 +1,4 @@
|
||||
Alan Cox <alan@lxorguk.ukuu.org.uk>
|
||||
Alan Cox <root@hraefn.swansea.linux.org.uk>
|
||||
Christoph Hellwig <hch@lst.de>
|
||||
Marc Gonzalez <marc.w.gonzalez@free.fr>
|
||||
|
@ -272,7 +272,7 @@ poll cycle or the number of packets processed reaches netdev_budget.
|
||||
netdev_max_backlog
|
||||
------------------
|
||||
|
||||
Maximum number of packets, queued on the INPUT side, when the interface
|
||||
Maximum number of packets, queued on the INPUT side, when the interface
|
||||
receives packets faster than kernel can process them.
|
||||
|
||||
netdev_rss_key
|
||||
|
@ -233,6 +233,7 @@ allOf:
|
||||
- allwinner,sun8i-a83t-tcon-lcd
|
||||
- allwinner,sun8i-v3s-tcon
|
||||
- allwinner,sun9i-a80-tcon-lcd
|
||||
- allwinner,sun20i-d1-tcon-lcd
|
||||
|
||||
then:
|
||||
properties:
|
||||
@ -252,6 +253,7 @@ allOf:
|
||||
- allwinner,sun8i-a83t-tcon-tv
|
||||
- allwinner,sun8i-r40-tcon-tv
|
||||
- allwinner,sun9i-a80-tcon-tv
|
||||
- allwinner,sun20i-d1-tcon-tv
|
||||
|
||||
then:
|
||||
properties:
|
||||
@ -278,6 +280,7 @@ allOf:
|
||||
- allwinner,sun9i-a80-tcon-lcd
|
||||
- allwinner,sun4i-a10-tcon
|
||||
- allwinner,sun8i-a83t-tcon-lcd
|
||||
- allwinner,sun20i-d1-tcon-lcd
|
||||
|
||||
then:
|
||||
required:
|
||||
@ -294,6 +297,7 @@ allOf:
|
||||
- allwinner,sun8i-a23-tcon
|
||||
- allwinner,sun8i-a33-tcon
|
||||
- allwinner,sun8i-a83t-tcon-lcd
|
||||
- allwinner,sun20i-d1-tcon-lcd
|
||||
|
||||
then:
|
||||
properties:
|
||||
|
@ -525,8 +525,8 @@ followed by a test macro::
|
||||
If you need to expose a compiler capability to makefiles and/or C source files,
|
||||
`CC_HAS_` is the recommended prefix for the config option::
|
||||
|
||||
config CC_HAS_ASM_GOTO
|
||||
def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
|
||||
config CC_HAS_FOO
|
||||
def_bool $(success,$(srctree)/scripts/cc-check-foo.sh $(CC))
|
||||
|
||||
Build as module only
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
@ -3679,6 +3679,7 @@ F: Documentation/networking/bonding.rst
|
||||
F: drivers/net/bonding/
|
||||
F: include/net/bond*
|
||||
F: include/uapi/linux/if_bonding.h
|
||||
F: tools/testing/selftests/drivers/net/bonding/
|
||||
|
||||
BOSCH SENSORTEC BMA400 ACCELEROMETER IIO DRIVER
|
||||
M: Dan Robertson <dan@dlrobertson.com>
|
||||
@ -5145,6 +5146,7 @@ T: git git://git.samba.org/sfrench/cifs-2.6.git
|
||||
F: Documentation/admin-guide/cifs/
|
||||
F: fs/cifs/
|
||||
F: fs/smbfs_common/
|
||||
F: include/uapi/linux/cifs
|
||||
|
||||
COMPACTPCI HOTPLUG CORE
|
||||
M: Scott Murray <scott@spiteful.org>
|
||||
@ -9773,7 +9775,7 @@ M: Christian Brauner <brauner@kernel.org>
|
||||
M: Seth Forshee <sforshee@kernel.org>
|
||||
L: linux-fsdevel@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/brauner/linux.git
|
||||
T: git://git.kernel.org/pub/scm/linux/kernel/git/vfs/idmapping.git
|
||||
F: Documentation/filesystems/idmappings.rst
|
||||
F: tools/testing/selftests/mount_setattr/
|
||||
F: include/linux/mnt_idmapping.h
|
||||
|
8
Makefile
8
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 0
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -1113,13 +1113,11 @@ vmlinux-alldirs := $(sort $(vmlinux-dirs) Documentation \
|
||||
$(patsubst %/,%,$(filter %/, $(core-) \
|
||||
$(drivers-) $(libs-))))
|
||||
|
||||
subdir-modorder := $(addsuffix modules.order,$(filter %/, \
|
||||
$(core-y) $(core-m) $(libs-y) $(libs-m) \
|
||||
$(drivers-y) $(drivers-m)))
|
||||
|
||||
build-dirs := $(vmlinux-dirs)
|
||||
clean-dirs := $(vmlinux-alldirs)
|
||||
|
||||
subdir-modorder := $(addsuffix /modules.order, $(build-dirs))
|
||||
|
||||
# Externally visible symbols (used by link-vmlinux.sh)
|
||||
KBUILD_VMLINUX_OBJS := $(head-y) $(patsubst %/,%/built-in.a, $(core-y))
|
||||
KBUILD_VMLINUX_OBJS += $(addsuffix built-in.a, $(filter %/, $(libs-y)))
|
||||
|
@ -53,7 +53,6 @@ config KPROBES
|
||||
config JUMP_LABEL
|
||||
bool "Optimize very unlikely/likely branches"
|
||||
depends on HAVE_ARCH_JUMP_LABEL
|
||||
depends on CC_HAS_ASM_GOTO
|
||||
select OBJTOOL if HAVE_JUMP_LABEL_HACK
|
||||
help
|
||||
This option enables a transparent branch optimization that
|
||||
@ -1361,7 +1360,7 @@ config HAVE_PREEMPT_DYNAMIC_CALL
|
||||
|
||||
config HAVE_PREEMPT_DYNAMIC_KEY
|
||||
bool
|
||||
depends on HAVE_ARCH_JUMP_LABEL && CC_HAS_ASM_GOTO
|
||||
depends on HAVE_ARCH_JUMP_LABEL
|
||||
select HAVE_PREEMPT_DYNAMIC
|
||||
help
|
||||
An architecture should select this if it can handle the preemption
|
||||
|
@ -929,6 +929,10 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
|
||||
(system_supports_mte() && \
|
||||
test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
|
||||
|
||||
#define kvm_supports_32bit_el0() \
|
||||
(system_supports_32bit_el0() && \
|
||||
!static_branch_unlikely(&arm64_mismatched_32bit_el0))
|
||||
|
||||
int kvm_trng_call(struct kvm_vcpu *vcpu);
|
||||
#ifdef CONFIG_KVM
|
||||
extern phys_addr_t hyp_mem_base;
|
||||
|
@ -75,9 +75,11 @@ struct kvm_regs {
|
||||
|
||||
/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
|
||||
#define KVM_ARM_DEVICE_TYPE_SHIFT 0
|
||||
#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT)
|
||||
#define KVM_ARM_DEVICE_TYPE_MASK GENMASK(KVM_ARM_DEVICE_TYPE_SHIFT + 15, \
|
||||
KVM_ARM_DEVICE_TYPE_SHIFT)
|
||||
#define KVM_ARM_DEVICE_ID_SHIFT 16
|
||||
#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT)
|
||||
#define KVM_ARM_DEVICE_ID_MASK GENMASK(KVM_ARM_DEVICE_ID_SHIFT + 15, \
|
||||
KVM_ARM_DEVICE_ID_SHIFT)
|
||||
|
||||
/* Supported device IDs */
|
||||
#define KVM_ARM_DEVICE_VGIC_V2 0
|
||||
|
@ -757,8 +757,7 @@ static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
|
||||
if (likely(!vcpu_mode_is_32bit(vcpu)))
|
||||
return false;
|
||||
|
||||
return !system_supports_32bit_el0() ||
|
||||
static_branch_unlikely(&arm64_mismatched_32bit_el0);
|
||||
return !kvm_supports_32bit_el0();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -242,7 +242,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
|
||||
switch (mode) {
|
||||
case PSR_AA32_MODE_USR:
|
||||
if (!system_supports_32bit_el0())
|
||||
if (!kvm_supports_32bit_el0())
|
||||
return -EINVAL;
|
||||
break;
|
||||
case PSR_AA32_MODE_FIQ:
|
||||
|
@ -993,7 +993,7 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||
* THP doesn't start to split while we are adjusting the
|
||||
* refcounts.
|
||||
*
|
||||
* We are sure this doesn't happen, because mmu_notifier_retry
|
||||
* We are sure this doesn't happen, because mmu_invalidate_retry
|
||||
* was successful and we are holding the mmu_lock, so if this
|
||||
* THP is trying to split, it will be blocked in the mmu
|
||||
* notifier before touching any of the pages, specifically
|
||||
@ -1188,9 +1188,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
return ret;
|
||||
}
|
||||
|
||||
mmu_seq = vcpu->kvm->mmu_notifier_seq;
|
||||
mmu_seq = vcpu->kvm->mmu_invalidate_seq;
|
||||
/*
|
||||
* Ensure the read of mmu_notifier_seq happens before we call
|
||||
* Ensure the read of mmu_invalidate_seq happens before we call
|
||||
* gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
|
||||
* the page we just got a reference to gets unmapped before we have a
|
||||
* chance to grab the mmu_lock, which ensure that if the page gets
|
||||
@ -1246,7 +1246,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
else
|
||||
write_lock(&kvm->mmu_lock);
|
||||
pgt = vcpu->arch.hw_mmu->pgt;
|
||||
if (mmu_notifier_retry(kvm, mmu_seq))
|
||||
if (mmu_invalidate_retry(kvm, mmu_seq))
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
|
@ -652,7 +652,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
*/
|
||||
val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
|
||||
| (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
|
||||
if (!system_supports_32bit_el0())
|
||||
if (!kvm_supports_32bit_el0())
|
||||
val |= ARMV8_PMU_PMCR_LC;
|
||||
__vcpu_sys_reg(vcpu, r->reg) = val;
|
||||
}
|
||||
@ -701,7 +701,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
val = __vcpu_sys_reg(vcpu, PMCR_EL0);
|
||||
val &= ~ARMV8_PMU_PMCR_MASK;
|
||||
val |= p->regval & ARMV8_PMU_PMCR_MASK;
|
||||
if (!system_supports_32bit_el0())
|
||||
if (!kvm_supports_32bit_el0())
|
||||
val |= ARMV8_PMU_PMCR_LC;
|
||||
__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
|
||||
kvm_pmu_handle_pmcr(vcpu, val);
|
||||
|
@ -81,7 +81,6 @@ extern struct acpi_vector_group msi_group[MAX_IO_PICS];
|
||||
#define GSI_MIN_PCH_IRQ LOONGSON_PCH_IRQ_BASE
|
||||
#define GSI_MAX_PCH_IRQ (LOONGSON_PCH_IRQ_BASE + 256 - 1)
|
||||
|
||||
extern int find_pch_pic(u32 gsi);
|
||||
struct acpi_madt_lio_pic;
|
||||
struct acpi_madt_eio_pic;
|
||||
struct acpi_madt_ht_pic;
|
||||
|
@ -84,8 +84,6 @@
|
||||
|
||||
|
||||
#define KVM_MAX_VCPUS 16
|
||||
/* memory slots that does not exposed to userspace */
|
||||
#define KVM_PRIVATE_MEM_SLOTS 0
|
||||
|
||||
#define KVM_HALT_POLL_NS_DEFAULT 500000
|
||||
|
||||
|
@ -615,17 +615,17 @@ retry:
|
||||
* Used to check for invalidations in progress, of the pfn that is
|
||||
* returned by pfn_to_pfn_prot below.
|
||||
*/
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
mmu_seq = kvm->mmu_invalidate_seq;
|
||||
/*
|
||||
* Ensure the read of mmu_notifier_seq isn't reordered with PTE reads in
|
||||
* gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
|
||||
* Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads
|
||||
* in gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
|
||||
* risk the page we get a reference to getting unmapped before we have a
|
||||
* chance to grab the mmu_lock without mmu_notifier_retry() noticing.
|
||||
* chance to grab the mmu_lock without mmu_invalidate_retry() noticing.
|
||||
*
|
||||
* This smp_rmb() pairs with the effective smp_wmb() of the combination
|
||||
* of the pte_unmap_unlock() after the PTE is zapped, and the
|
||||
* spin_lock() in kvm_mmu_notifier_invalidate_<page|range_end>() before
|
||||
* mmu_notifier_seq is incremented.
|
||||
* mmu_invalidate_seq is incremented.
|
||||
*/
|
||||
smp_rmb();
|
||||
|
||||
@ -638,7 +638,7 @@ retry:
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
/* Check if an invalidation has taken place since we got pfn */
|
||||
if (mmu_notifier_retry(kvm, mmu_seq)) {
|
||||
if (mmu_invalidate_retry(kvm, mmu_seq)) {
|
||||
/*
|
||||
* This can happen when mappings are changed asynchronously, but
|
||||
* also synchronously if a COW is triggered by
|
||||
|
@ -146,10 +146,10 @@ menu "Processor type and features"
|
||||
|
||||
choice
|
||||
prompt "Processor type"
|
||||
default PA7000
|
||||
default PA7000 if "$(ARCH)" = "parisc"
|
||||
|
||||
config PA7000
|
||||
bool "PA7000/PA7100"
|
||||
bool "PA7000/PA7100" if "$(ARCH)" = "parisc"
|
||||
help
|
||||
This is the processor type of your CPU. This information is
|
||||
used for optimizing purposes. In order to compile a kernel
|
||||
@ -160,21 +160,21 @@ config PA7000
|
||||
which is required on some machines.
|
||||
|
||||
config PA7100LC
|
||||
bool "PA7100LC"
|
||||
bool "PA7100LC" if "$(ARCH)" = "parisc"
|
||||
help
|
||||
Select this option for the PCX-L processor, as used in the
|
||||
712, 715/64, 715/80, 715/100, 715/100XC, 725/100, 743, 748,
|
||||
D200, D210, D300, D310 and E-class
|
||||
|
||||
config PA7200
|
||||
bool "PA7200"
|
||||
bool "PA7200" if "$(ARCH)" = "parisc"
|
||||
help
|
||||
Select this option for the PCX-T' processor, as used in the
|
||||
C100, C110, J100, J110, J210XC, D250, D260, D350, D360,
|
||||
K100, K200, K210, K220, K400, K410 and K420
|
||||
|
||||
config PA7300LC
|
||||
bool "PA7300LC"
|
||||
bool "PA7300LC" if "$(ARCH)" = "parisc"
|
||||
help
|
||||
Select this option for the PCX-L2 processor, as used in the
|
||||
744, A180, B132L, B160L, B180L, C132L, C160L, C180L,
|
||||
@ -224,17 +224,8 @@ config MLONGCALLS
|
||||
Enabling this option will probably slow down your kernel.
|
||||
|
||||
config 64BIT
|
||||
bool "64-bit kernel"
|
||||
def_bool "$(ARCH)" = "parisc64"
|
||||
depends on PA8X00
|
||||
help
|
||||
Enable this if you want to support 64bit kernel on PA-RISC platform.
|
||||
|
||||
At the moment, only people willing to use more than 2GB of RAM,
|
||||
or having a 64bit-only capable PA-RISC machine should say Y here.
|
||||
|
||||
Since there is no 64bit userland on PA-RISC, there is no point to
|
||||
enable this option otherwise. The 64bit kernel is significantly bigger
|
||||
and slower than the 32bit one.
|
||||
|
||||
choice
|
||||
prompt "Kernel page size"
|
||||
|
@ -12,14 +12,6 @@
|
||||
#include <asm/barrier.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
/* compiler build environment sanity checks: */
|
||||
#if !defined(CONFIG_64BIT) && defined(__LP64__)
|
||||
#error "Please use 'ARCH=parisc' to build the 32-bit kernel."
|
||||
#endif
|
||||
#if defined(CONFIG_64BIT) && !defined(__LP64__)
|
||||
#error "Please use 'ARCH=parisc64' to build the 64-bit kernel."
|
||||
#endif
|
||||
|
||||
/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
|
||||
* on use of volatile and __*_bit() (set/clear/change):
|
||||
* *_bit() want use of volatile.
|
||||
|
@ -22,7 +22,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/pgtable.h>
|
||||
|
||||
.level PA_ASM_LEVEL
|
||||
.level 1.1
|
||||
|
||||
__INITDATA
|
||||
ENTRY(boot_args)
|
||||
@ -70,6 +70,47 @@ $bss_loop:
|
||||
stw,ma %arg2,4(%r1)
|
||||
stw,ma %arg3,4(%r1)
|
||||
|
||||
#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20)
|
||||
/* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU
|
||||
* and halt kernel if we detect a PA1.x CPU. */
|
||||
ldi 32,%r10
|
||||
mtctl %r10,%cr11
|
||||
.level 2.0
|
||||
mfctl,w %cr11,%r10
|
||||
.level 1.1
|
||||
comib,<>,n 0,%r10,$cpu_ok
|
||||
|
||||
load32 PA(msg1),%arg0
|
||||
ldi msg1_end-msg1,%arg1
|
||||
$iodc_panic:
|
||||
copy %arg0, %r10
|
||||
copy %arg1, %r11
|
||||
load32 PA(init_stack),%sp
|
||||
#define MEM_CONS 0x3A0
|
||||
ldw MEM_CONS+32(%r0),%arg0 // HPA
|
||||
ldi ENTRY_IO_COUT,%arg1
|
||||
ldw MEM_CONS+36(%r0),%arg2 // SPA
|
||||
ldw MEM_CONS+8(%r0),%arg3 // layers
|
||||
load32 PA(__bss_start),%r1
|
||||
stw %r1,-52(%sp) // arg4
|
||||
stw %r0,-56(%sp) // arg5
|
||||
stw %r10,-60(%sp) // arg6 = ptr to text
|
||||
stw %r11,-64(%sp) // arg7 = len
|
||||
stw %r0,-68(%sp) // arg8
|
||||
load32 PA(.iodc_panic_ret), %rp
|
||||
ldw MEM_CONS+40(%r0),%r1 // ENTRY_IODC
|
||||
bv,n (%r1)
|
||||
.iodc_panic_ret:
|
||||
b . /* wait endless with ... */
|
||||
or %r10,%r10,%r10 /* qemu idle sleep */
|
||||
msg1: .ascii "Can't boot kernel which was built for PA8x00 CPUs on this machine.\r\n"
|
||||
msg1_end:
|
||||
|
||||
$cpu_ok:
|
||||
#endif
|
||||
|
||||
.level PA_ASM_LEVEL
|
||||
|
||||
/* Initialize startup VM. Just map first 16/32 MB of memory */
|
||||
load32 PA(swapper_pg_dir),%r4
|
||||
mtctl %r4,%cr24 /* Initialize kernel root pointer */
|
||||
|
@ -93,7 +93,7 @@
|
||||
#define R1(i) (((i)>>21)&0x1f)
|
||||
#define R2(i) (((i)>>16)&0x1f)
|
||||
#define R3(i) ((i)&0x1f)
|
||||
#define FR3(i) ((((i)<<1)&0x1f)|(((i)>>6)&1))
|
||||
#define FR3(i) ((((i)&0x1f)<<1)|(((i)>>6)&1))
|
||||
#define IM(i,n) (((i)>>1&((1<<(n-1))-1))|((i)&1?((0-1L)<<(n-1)):0))
|
||||
#define IM5_2(i) IM((i)>>16,5)
|
||||
#define IM5_3(i) IM((i),5)
|
||||
|
@ -666,7 +666,7 @@ static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq,
|
||||
VM_WARN(!spin_is_locked(&kvm->mmu_lock),
|
||||
"%s called with kvm mmu_lock not held \n", __func__);
|
||||
|
||||
if (mmu_notifier_retry(kvm, mmu_seq))
|
||||
if (mmu_invalidate_retry(kvm, mmu_seq))
|
||||
return NULL;
|
||||
|
||||
pte = __find_linux_pte(kvm->mm->pgd, ea, NULL, hshift);
|
||||
|
@ -68,10 +68,6 @@ void __init set_pci_dma_ops(const struct dma_map_ops *dma_ops)
|
||||
pci_dma_ops = dma_ops;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function should run under locking protection, specifically
|
||||
* hose_spinlock.
|
||||
*/
|
||||
static int get_phb_number(struct device_node *dn)
|
||||
{
|
||||
int ret, phb_id = -1;
|
||||
@ -108,15 +104,20 @@ static int get_phb_number(struct device_node *dn)
|
||||
if (!ret)
|
||||
phb_id = (int)(prop & (MAX_PHBS - 1));
|
||||
|
||||
spin_lock(&hose_spinlock);
|
||||
|
||||
/* We need to be sure to not use the same PHB number twice. */
|
||||
if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap))
|
||||
return phb_id;
|
||||
goto out_unlock;
|
||||
|
||||
/* If everything fails then fallback to dynamic PHB numbering. */
|
||||
phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS);
|
||||
BUG_ON(phb_id >= MAX_PHBS);
|
||||
set_bit(phb_id, phb_bitmap);
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&hose_spinlock);
|
||||
|
||||
return phb_id;
|
||||
}
|
||||
|
||||
@ -127,10 +128,13 @@ struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
|
||||
phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
|
||||
if (phb == NULL)
|
||||
return NULL;
|
||||
spin_lock(&hose_spinlock);
|
||||
|
||||
phb->global_number = get_phb_number(dev);
|
||||
|
||||
spin_lock(&hose_spinlock);
|
||||
list_add_tail(&phb->list_node, &hose_list);
|
||||
spin_unlock(&hose_spinlock);
|
||||
|
||||
phb->dn = dev;
|
||||
phb->is_dynamic = slab_is_available();
|
||||
#ifdef CONFIG_PPC64
|
||||
|
@ -90,7 +90,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
|
||||
unsigned long pfn;
|
||||
|
||||
/* used to check for invalidations in progress */
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
mmu_seq = kvm->mmu_invalidate_seq;
|
||||
smp_rmb();
|
||||
|
||||
/* Get host physical address for gpa */
|
||||
@ -151,7 +151,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
|
||||
cpte = kvmppc_mmu_hpte_cache_next(vcpu);
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
|
||||
if (!cpte || mmu_invalidate_retry(kvm, mmu_seq)) {
|
||||
r = -EAGAIN;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
@ -578,7 +578,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
|
||||
return -EFAULT;
|
||||
|
||||
/* used to check for invalidations in progress */
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
mmu_seq = kvm->mmu_invalidate_seq;
|
||||
smp_rmb();
|
||||
|
||||
ret = -EFAULT;
|
||||
@ -693,7 +693,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
|
||||
|
||||
/* Check if we might have been invalidated; let the guest retry if so */
|
||||
ret = RESUME_GUEST;
|
||||
if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
|
||||
if (mmu_invalidate_retry(vcpu->kvm, mmu_seq)) {
|
||||
unlock_rmap(rmap);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
@ -640,7 +640,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
|
||||
/* Check if we might have been invalidated; let the guest retry if so */
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
ret = -EAGAIN;
|
||||
if (mmu_notifier_retry(kvm, mmu_seq))
|
||||
if (mmu_invalidate_retry(kvm, mmu_seq))
|
||||
goto out_unlock;
|
||||
|
||||
/* Now traverse again under the lock and change the tree */
|
||||
@ -830,7 +830,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
|
||||
bool large_enable;
|
||||
|
||||
/* used to check for invalidations in progress */
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
mmu_seq = kvm->mmu_invalidate_seq;
|
||||
smp_rmb();
|
||||
|
||||
/*
|
||||
@ -1191,7 +1191,7 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
|
||||
* Increase the mmu notifier sequence number to prevent any page
|
||||
* fault that read the memslot earlier from writing a PTE.
|
||||
*/
|
||||
kvm->mmu_notifier_seq++;
|
||||
kvm->mmu_invalidate_seq++;
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
}
|
||||
|
||||
|
@ -1580,7 +1580,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
|
||||
/* 2. Find the host pte for this L1 guest real address */
|
||||
|
||||
/* Used to check for invalidations in progress */
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
mmu_seq = kvm->mmu_invalidate_seq;
|
||||
smp_rmb();
|
||||
|
||||
/* See if can find translation in our partition scoped tables for L1 */
|
||||
|
@ -219,7 +219,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
|
||||
g_ptel = ptel;
|
||||
|
||||
/* used later to detect if we might have been invalidated */
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
mmu_seq = kvm->mmu_invalidate_seq;
|
||||
smp_rmb();
|
||||
|
||||
/* Find the memslot (if any) for this address */
|
||||
@ -366,7 +366,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
|
||||
rmap = real_vmalloc_addr(rmap);
|
||||
lock_rmap(rmap);
|
||||
/* Check for pending invalidations under the rmap chain lock */
|
||||
if (mmu_notifier_retry(kvm, mmu_seq)) {
|
||||
if (mmu_invalidate_retry(kvm, mmu_seq)) {
|
||||
/* inval in progress, write a non-present HPTE */
|
||||
pteh |= HPTE_V_ABSENT;
|
||||
pteh &= ~HPTE_V_VALID;
|
||||
@ -932,7 +932,7 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
|
||||
int i;
|
||||
|
||||
/* Used later to detect if we might have been invalidated */
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
mmu_seq = kvm->mmu_invalidate_seq;
|
||||
smp_rmb();
|
||||
|
||||
arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
|
||||
@ -960,7 +960,7 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
|
||||
long ret = H_SUCCESS;
|
||||
|
||||
/* Used later to detect if we might have been invalidated */
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
mmu_seq = kvm->mmu_invalidate_seq;
|
||||
smp_rmb();
|
||||
|
||||
arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
|
||||
|
@ -339,7 +339,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||
unsigned long flags;
|
||||
|
||||
/* used to check for invalidations in progress */
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
mmu_seq = kvm->mmu_invalidate_seq;
|
||||
smp_rmb();
|
||||
|
||||
/*
|
||||
@ -460,7 +460,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||
}
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
if (mmu_notifier_retry(kvm, mmu_seq)) {
|
||||
if (mmu_invalidate_retry(kvm, mmu_seq)) {
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ unsigned long elf_hwcap __read_mostly;
|
||||
/* Host ISA bitmap */
|
||||
static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly;
|
||||
|
||||
__ro_after_init DEFINE_STATIC_KEY_ARRAY_FALSE(riscv_isa_ext_keys, RISCV_ISA_EXT_KEY_MAX);
|
||||
DEFINE_STATIC_KEY_ARRAY_FALSE(riscv_isa_ext_keys, RISCV_ISA_EXT_KEY_MAX);
|
||||
EXPORT_SYMBOL(riscv_isa_ext_keys);
|
||||
|
||||
/**
|
||||
|
@ -666,7 +666,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
|
||||
return ret;
|
||||
}
|
||||
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
mmu_seq = kvm->mmu_invalidate_seq;
|
||||
|
||||
hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writable);
|
||||
if (hfn == KVM_PFN_ERR_HWPOISON) {
|
||||
@ -686,7 +686,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
|
||||
if (mmu_notifier_retry(kvm, mmu_seq))
|
||||
if (mmu_invalidate_retry(kvm, mmu_seq))
|
||||
goto out_unlock;
|
||||
|
||||
if (writable) {
|
||||
|
@ -437,7 +437,7 @@ __init int hypfs_diag_init(void)
|
||||
int rc;
|
||||
|
||||
if (diag204_probe()) {
|
||||
pr_err("The hardware system does not support hypfs\n");
|
||||
pr_info("The hardware system does not support hypfs\n");
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
|
@ -496,9 +496,9 @@ fail_hypfs_sprp_exit:
|
||||
hypfs_vm_exit();
|
||||
fail_hypfs_diag_exit:
|
||||
hypfs_diag_exit();
|
||||
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
|
||||
fail_dbfs_exit:
|
||||
hypfs_dbfs_exit();
|
||||
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
|
||||
return rc;
|
||||
}
|
||||
device_initcall(hypfs_init)
|
||||
|
@ -65,20 +65,6 @@ extern void setup_clear_cpu_cap(unsigned int bit);
|
||||
|
||||
#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
|
||||
|
||||
#if defined(__clang__) && !defined(CONFIG_CC_HAS_ASM_GOTO)
|
||||
|
||||
/*
|
||||
* Workaround for the sake of BPF compilation which utilizes kernel
|
||||
* headers, but clang does not support ASM GOTO and fails the build.
|
||||
*/
|
||||
#ifndef __BPF_TRACING__
|
||||
#warning "Compiler lacks ASM_GOTO support. Add -D __BPF_TRACING__ to your compiler arguments"
|
||||
#endif
|
||||
|
||||
#define static_cpu_has(bit) boot_cpu_has(bit)
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* Static testing of CPU features. Used the same as boot_cpu_has(). It
|
||||
* statically patches the target code for additional performance. Use
|
||||
@ -137,7 +123,6 @@ t_no:
|
||||
boot_cpu_has(bit) : \
|
||||
_static_cpu_has(bit) \
|
||||
)
|
||||
#endif
|
||||
|
||||
#define cpu_has_bug(c, bit) cpu_has(c, (bit))
|
||||
#define set_cpu_bug(c, bit) set_cpu_cap(c, (bit))
|
||||
|
@ -286,10 +286,6 @@ vdso_install:
|
||||
|
||||
archprepare: checkbin
|
||||
checkbin:
|
||||
ifndef CONFIG_CC_HAS_ASM_GOTO
|
||||
@echo Compiler lacks asm-goto support.
|
||||
@exit 1
|
||||
endif
|
||||
ifdef CONFIG_RETPOLINE
|
||||
ifeq ($(RETPOLINE_CFLAGS),)
|
||||
@echo "You are building kernel with non-retpoline compiler." >&2
|
||||
|
@ -155,20 +155,6 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
|
||||
|
||||
#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
|
||||
|
||||
#if defined(__clang__) && !defined(CONFIG_CC_HAS_ASM_GOTO)
|
||||
|
||||
/*
|
||||
* Workaround for the sake of BPF compilation which utilizes kernel
|
||||
* headers, but clang does not support ASM GOTO and fails the build.
|
||||
*/
|
||||
#ifndef __BPF_TRACING__
|
||||
#warning "Compiler lacks ASM_GOTO support. Add -D __BPF_TRACING__ to your compiler arguments"
|
||||
#endif
|
||||
|
||||
#define static_cpu_has(bit) boot_cpu_has(bit)
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* Static testing of CPU features. Used the same as boot_cpu_has(). It
|
||||
* statically patches the target code for additional performance. Use
|
||||
@ -208,7 +194,6 @@ t_no:
|
||||
boot_cpu_has(bit) : \
|
||||
_static_cpu_has(bit) \
|
||||
)
|
||||
#endif
|
||||
|
||||
#define cpu_has_bug(c, bit) cpu_has(c, (bit))
|
||||
#define set_cpu_bug(c, bit) set_cpu_cap(c, (bit))
|
||||
|
@ -31,6 +31,16 @@
|
||||
|
||||
#define __noendbr __attribute__((nocf_check))
|
||||
|
||||
/*
|
||||
* Create a dummy function pointer reference to prevent objtool from marking
|
||||
* the function as needing to be "sealed" (i.e. ENDBR converted to NOP by
|
||||
* apply_ibt_endbr()).
|
||||
*/
|
||||
#define IBT_NOSEAL(fname) \
|
||||
".pushsection .discard.ibt_endbr_noseal\n\t" \
|
||||
_ASM_PTR fname "\n\t" \
|
||||
".popsection\n\t"
|
||||
|
||||
static inline __attribute_const__ u32 gen_endbr(void)
|
||||
{
|
||||
u32 endbr;
|
||||
@ -84,6 +94,7 @@ extern __noendbr void ibt_restore(u64 save);
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define ASM_ENDBR
|
||||
#define IBT_NOSEAL(name)
|
||||
|
||||
#define __noendbr
|
||||
|
||||
|
@ -53,7 +53,7 @@
|
||||
#define KVM_MAX_VCPU_IDS (KVM_MAX_VCPUS * KVM_VCPU_ID_RATIO)
|
||||
|
||||
/* memory slots that are not exposed to userspace */
|
||||
#define KVM_PRIVATE_MEM_SLOTS 3
|
||||
#define KVM_INTERNAL_MEM_SLOTS 3
|
||||
|
||||
#define KVM_HALT_POLL_NS_DEFAULT 200000
|
||||
|
||||
|
@ -11,7 +11,7 @@
|
||||
|
||||
#define __CLOBBERS_MEM(clb...) "memory", ## clb
|
||||
|
||||
#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CONFIG_CC_HAS_ASM_GOTO)
|
||||
#ifndef __GCC_ASM_FLAG_OUTPUTS__
|
||||
|
||||
/* Use asm goto */
|
||||
|
||||
@ -27,7 +27,7 @@ cc_label: c = true; \
|
||||
c; \
|
||||
})
|
||||
|
||||
#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */
|
||||
#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) */
|
||||
|
||||
/* Use flags output or a set instruction */
|
||||
|
||||
@ -40,7 +40,7 @@ cc_label: c = true; \
|
||||
c; \
|
||||
})
|
||||
|
||||
#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */
|
||||
#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) */
|
||||
|
||||
#define GEN_UNARY_RMWcc_4(op, var, cc, arg0) \
|
||||
__GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM())
|
||||
|
@ -505,7 +505,7 @@ static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs)
|
||||
match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^
|
||||
((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT);
|
||||
if (p->ainsn.jcc.type >= 0xe)
|
||||
match = match && (regs->flags & X86_EFLAGS_ZF);
|
||||
match = match || (regs->flags & X86_EFLAGS_ZF);
|
||||
}
|
||||
__kprobe_emulate_jmp(p, regs, (match && !invert) || (!match && invert));
|
||||
}
|
||||
|
@ -326,7 +326,8 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
|
||||
".align " __stringify(FASTOP_SIZE) " \n\t" \
|
||||
".type " name ", @function \n\t" \
|
||||
name ":\n\t" \
|
||||
ASM_ENDBR
|
||||
ASM_ENDBR \
|
||||
IBT_NOSEAL(name)
|
||||
|
||||
#define FOP_FUNC(name) \
|
||||
__FOP_FUNC(#name)
|
||||
@ -446,27 +447,12 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
|
||||
FOP_END
|
||||
|
||||
/* Special case for SETcc - 1 instruction per cc */
|
||||
|
||||
/*
|
||||
* Depending on .config the SETcc functions look like:
|
||||
*
|
||||
* ENDBR [4 bytes; CONFIG_X86_KERNEL_IBT]
|
||||
* SETcc %al [3 bytes]
|
||||
* RET | JMP __x86_return_thunk [1,5 bytes; CONFIG_RETHUNK]
|
||||
* INT3 [1 byte; CONFIG_SLS]
|
||||
*/
|
||||
#define SETCC_ALIGN 16
|
||||
|
||||
#define FOP_SETCC(op) \
|
||||
".align " __stringify(SETCC_ALIGN) " \n\t" \
|
||||
".type " #op ", @function \n\t" \
|
||||
#op ": \n\t" \
|
||||
ASM_ENDBR \
|
||||
FOP_FUNC(op) \
|
||||
#op " %al \n\t" \
|
||||
__FOP_RET(#op) \
|
||||
".skip " __stringify(SETCC_ALIGN) " - (.-" #op "), 0xcc \n\t"
|
||||
FOP_RET(op)
|
||||
|
||||
__FOP_START(setcc, SETCC_ALIGN)
|
||||
FOP_START(setcc)
|
||||
FOP_SETCC(seto)
|
||||
FOP_SETCC(setno)
|
||||
FOP_SETCC(setc)
|
||||
@ -493,7 +479,7 @@ FOP_END;
|
||||
|
||||
/*
|
||||
* XXX: inoutclob user must know where the argument is being expanded.
|
||||
* Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault.
|
||||
* Using asm goto would allow us to remove _fault.
|
||||
*/
|
||||
#define asm_safe(insn, inoutclob...) \
|
||||
({ \
|
||||
@ -1079,7 +1065,7 @@ static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
|
||||
static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
|
||||
{
|
||||
u8 rc;
|
||||
void (*fop)(void) = (void *)em_setcc + SETCC_ALIGN * (condition & 0xf);
|
||||
void (*fop)(void) = (void *)em_setcc + FASTOP_SIZE * (condition & 0xf);
|
||||
|
||||
flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
|
||||
asm("push %[flags]; popf; " CALL_NOSPEC
|
||||
|
@ -2914,7 +2914,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
|
||||
* If addresses are being invalidated, skip prefetching to avoid
|
||||
* accidentally prefetching those addresses.
|
||||
*/
|
||||
if (unlikely(vcpu->kvm->mmu_notifier_count))
|
||||
if (unlikely(vcpu->kvm->mmu_invalidate_in_progress))
|
||||
return;
|
||||
|
||||
__direct_pte_prefetch(vcpu, sp, sptep);
|
||||
@ -2928,7 +2928,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
|
||||
*
|
||||
* There are several ways to safely use this helper:
|
||||
*
|
||||
* - Check mmu_notifier_retry_hva() after grabbing the mapping level, before
|
||||
* - Check mmu_invalidate_retry_hva() after grabbing the mapping level, before
|
||||
* consuming it. In this case, mmu_lock doesn't need to be held during the
|
||||
* lookup, but it does need to be held while checking the MMU notifier.
|
||||
*
|
||||
@ -3056,7 +3056,7 @@ void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
|
||||
return;
|
||||
|
||||
/*
|
||||
* mmu_notifier_retry() was successful and mmu_lock is held, so
|
||||
* mmu_invalidate_retry() was successful and mmu_lock is held, so
|
||||
* the pmd can't be split from under us.
|
||||
*/
|
||||
fault->goal_level = fault->req_level;
|
||||
@ -4203,7 +4203,7 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
|
||||
return true;
|
||||
|
||||
return fault->slot &&
|
||||
mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva);
|
||||
mmu_invalidate_retry_hva(vcpu->kvm, mmu_seq, fault->hva);
|
||||
}
|
||||
|
||||
static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
|
||||
@ -4227,7 +4227,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
mmu_seq = vcpu->kvm->mmu_notifier_seq;
|
||||
mmu_seq = vcpu->kvm->mmu_invalidate_seq;
|
||||
smp_rmb();
|
||||
|
||||
r = kvm_faultin_pfn(vcpu, fault);
|
||||
@ -6055,7 +6055,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
|
||||
|
||||
write_lock(&kvm->mmu_lock);
|
||||
|
||||
kvm_inc_notifier_count(kvm, gfn_start, gfn_end);
|
||||
kvm_mmu_invalidate_begin(kvm, gfn_start, gfn_end);
|
||||
|
||||
flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
|
||||
|
||||
@ -6069,7 +6069,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
|
||||
kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
|
||||
gfn_end - gfn_start);
|
||||
|
||||
kvm_dec_notifier_count(kvm, gfn_start, gfn_end);
|
||||
kvm_mmu_invalidate_end(kvm, gfn_start, gfn_end);
|
||||
|
||||
write_unlock(&kvm->mmu_lock);
|
||||
}
|
||||
|
@ -589,7 +589,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
|
||||
* If addresses are being invalidated, skip prefetching to avoid
|
||||
* accidentally prefetching those addresses.
|
||||
*/
|
||||
if (unlikely(vcpu->kvm->mmu_notifier_count))
|
||||
if (unlikely(vcpu->kvm->mmu_invalidate_in_progress))
|
||||
return;
|
||||
|
||||
if (sp->role.direct)
|
||||
@ -838,7 +838,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
|
||||
else
|
||||
fault->max_level = walker.level;
|
||||
|
||||
mmu_seq = vcpu->kvm->mmu_notifier_seq;
|
||||
mmu_seq = vcpu->kvm->mmu_invalidate_seq;
|
||||
smp_rmb();
|
||||
|
||||
r = kvm_faultin_pfn(vcpu, fault);
|
||||
|
@ -645,7 +645,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
|
||||
pages++;
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
|
||||
prot = __pgprot(pgprot_val(prot) | __PAGE_KERNEL_LARGE);
|
||||
prot = __pgprot(pgprot_val(prot) | _PAGE_PSE);
|
||||
|
||||
set_pte_init((pte_t *)pud,
|
||||
pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
|
||||
|
@ -2229,26 +2229,6 @@ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
|
||||
|
||||
/**
|
||||
* blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
|
||||
* @q: request queue.
|
||||
*
|
||||
* The caller is responsible for serializing this function against
|
||||
* blk_mq_{start,stop}_hw_queue().
|
||||
*/
|
||||
bool blk_mq_queue_stopped(struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
unsigned long i;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
if (blk_mq_hctx_stopped(hctx))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_queue_stopped);
|
||||
|
||||
/*
|
||||
* This function is often used for pausing .queue_rq() by driver when
|
||||
* there isn't enough resource or some conditions aren't satisfied, and
|
||||
@ -2570,7 +2550,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
|
||||
break;
|
||||
case BLK_STS_RESOURCE:
|
||||
case BLK_STS_DEV_RESOURCE:
|
||||
blk_mq_request_bypass_insert(rq, false, last);
|
||||
blk_mq_request_bypass_insert(rq, false, true);
|
||||
blk_mq_commit_rqs(hctx, &queued, from_schedule);
|
||||
return;
|
||||
default:
|
||||
|
@ -2122,6 +2122,7 @@ const char *ata_get_cmd_name(u8 command)
|
||||
{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
|
||||
{ ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
|
||||
{ ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
|
||||
{ ATA_CMD_NCQ_NON_DATA, "NCQ NON-DATA" },
|
||||
{ ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" },
|
||||
{ ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" },
|
||||
{ ATA_CMD_PIO_READ, "READ SECTOR(S)" },
|
||||
|
@ -555,7 +555,7 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
|
||||
return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
|
||||
}
|
||||
|
||||
static bool ubq_daemon_is_dying(struct ublk_queue *ubq)
|
||||
static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
|
||||
{
|
||||
return ubq->ubq_daemon->flags & PF_EXITING;
|
||||
}
|
||||
@ -605,8 +605,9 @@ static void ublk_complete_rq(struct request *req)
|
||||
}
|
||||
|
||||
/*
|
||||
* __ublk_fail_req() may be called from abort context or ->ubq_daemon
|
||||
* context during exiting, so lock is required.
|
||||
* Since __ublk_rq_task_work always fails requests immediately during
|
||||
* exiting, __ublk_fail_req() is only called from abort context during
|
||||
* exiting. So lock is unnecessary.
|
||||
*
|
||||
* Also aborting may not be started yet, keep in mind that one failed
|
||||
* request may be issued by block layer again.
|
||||
@ -644,8 +645,7 @@ static inline void __ublk_rq_task_work(struct request *req)
|
||||
struct ublk_device *ub = ubq->dev;
|
||||
int tag = req->tag;
|
||||
struct ublk_io *io = &ubq->ios[tag];
|
||||
bool task_exiting = current != ubq->ubq_daemon ||
|
||||
(current->flags & PF_EXITING);
|
||||
bool task_exiting = current != ubq->ubq_daemon || ubq_daemon_is_dying(ubq);
|
||||
unsigned int mapped_bytes;
|
||||
|
||||
pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
|
||||
@ -680,6 +680,11 @@ static inline void __ublk_rq_task_work(struct request *req)
|
||||
* do the copy work.
|
||||
*/
|
||||
io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
|
||||
/* update iod->addr because ublksrv may have passed a new io buffer */
|
||||
ublk_get_iod(ubq, req->tag)->addr = io->addr;
|
||||
pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
|
||||
__func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
|
||||
ublk_get_iod(ubq, req->tag)->addr);
|
||||
}
|
||||
|
||||
mapped_bytes = ublk_map_io(ubq, req, io);
|
||||
@ -751,9 +756,25 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
if (task_work_add(ubq->ubq_daemon, &data->work, notify_mode))
|
||||
goto fail;
|
||||
} else {
|
||||
struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd;
|
||||
struct ublk_io *io = &ubq->ios[rq->tag];
|
||||
struct io_uring_cmd *cmd = io->cmd;
|
||||
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
|
||||
|
||||
/*
|
||||
* If the check pass, we know that this is a re-issued request aborted
|
||||
* previously in monitor_work because the ubq_daemon(cmd's task) is
|
||||
* PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
|
||||
* because this ioucmd's io_uring context may be freed now if no inflight
|
||||
* ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
|
||||
*
|
||||
* Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
|
||||
* the tag). Then the request is re-started(allocating the tag) and we are here.
|
||||
* Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
|
||||
* guarantees that here is a re-issued request aborted previously.
|
||||
*/
|
||||
if ((io->flags & UBLK_IO_FLAG_ABORTED))
|
||||
goto fail;
|
||||
|
||||
pdu->req = rq;
|
||||
io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb);
|
||||
}
|
||||
|
@ -1146,14 +1146,15 @@ static ssize_t bd_stat_show(struct device *dev,
|
||||
static ssize_t debug_stat_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
int version = 2;
|
||||
int version = 1;
|
||||
struct zram *zram = dev_to_zram(dev);
|
||||
ssize_t ret;
|
||||
|
||||
down_read(&zram->init_lock);
|
||||
ret = scnprintf(buf, PAGE_SIZE,
|
||||
"version: %d\n%8llu\n",
|
||||
"version: %d\n%8llu %8llu\n",
|
||||
version,
|
||||
(u64)atomic64_read(&zram->stats.writestall),
|
||||
(u64)atomic64_read(&zram->stats.miss_free));
|
||||
up_read(&zram->init_lock);
|
||||
|
||||
@ -1351,7 +1352,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned long alloced_pages;
|
||||
unsigned long handle = 0;
|
||||
unsigned long handle = -ENOMEM;
|
||||
unsigned int comp_len = 0;
|
||||
void *src, *dst, *mem;
|
||||
struct zcomp_strm *zstrm;
|
||||
@ -1369,6 +1370,7 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
|
||||
}
|
||||
kunmap_atomic(mem);
|
||||
|
||||
compress_again:
|
||||
zstrm = zcomp_stream_get(zram->comp);
|
||||
src = kmap_atomic(page);
|
||||
ret = zcomp_compress(zstrm, src, &comp_len);
|
||||
@ -1377,20 +1379,39 @@ static int __zram_bvec_write(struct zram *zram, struct bio_vec *bvec,
|
||||
if (unlikely(ret)) {
|
||||
zcomp_stream_put(zram->comp);
|
||||
pr_err("Compression failed! err=%d\n", ret);
|
||||
zs_free(zram->mem_pool, handle);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (comp_len >= huge_class_size)
|
||||
comp_len = PAGE_SIZE;
|
||||
|
||||
handle = zs_malloc(zram->mem_pool, comp_len,
|
||||
__GFP_KSWAPD_RECLAIM |
|
||||
__GFP_NOWARN |
|
||||
__GFP_HIGHMEM |
|
||||
__GFP_MOVABLE);
|
||||
|
||||
/*
|
||||
* handle allocation has 2 paths:
|
||||
* a) fast path is executed with preemption disabled (for
|
||||
* per-cpu streams) and has __GFP_DIRECT_RECLAIM bit clear,
|
||||
* since we can't sleep;
|
||||
* b) slow path enables preemption and attempts to allocate
|
||||
* the page with __GFP_DIRECT_RECLAIM bit set. we have to
|
||||
* put per-cpu compression stream and, thus, to re-do
|
||||
* the compression once handle is allocated.
|
||||
*
|
||||
* if we have a 'non-null' handle here then we are coming
|
||||
* from the slow path and handle has already been allocated.
|
||||
*/
|
||||
if (IS_ERR((void *)handle))
|
||||
handle = zs_malloc(zram->mem_pool, comp_len,
|
||||
__GFP_KSWAPD_RECLAIM |
|
||||
__GFP_NOWARN |
|
||||
__GFP_HIGHMEM |
|
||||
__GFP_MOVABLE);
|
||||
if (IS_ERR((void *)handle)) {
|
||||
zcomp_stream_put(zram->comp);
|
||||
atomic64_inc(&zram->stats.writestall);
|
||||
handle = zs_malloc(zram->mem_pool, comp_len,
|
||||
GFP_NOIO | __GFP_HIGHMEM |
|
||||
__GFP_MOVABLE);
|
||||
if (!IS_ERR((void *)handle))
|
||||
goto compress_again;
|
||||
return PTR_ERR((void *)handle);
|
||||
}
|
||||
|
||||
@ -1948,6 +1969,7 @@ static int zram_add(void)
|
||||
if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
|
||||
blk_queue_max_write_zeroes_sectors(zram->disk->queue, UINT_MAX);
|
||||
|
||||
blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, zram->disk->queue);
|
||||
ret = device_add_disk(NULL, zram->disk, zram_disk_groups);
|
||||
if (ret)
|
||||
goto out_cleanup_disk;
|
||||
|
@ -81,6 +81,7 @@ struct zram_stats {
|
||||
atomic64_t huge_pages_since; /* no. of huge pages since zram set up */
|
||||
atomic64_t pages_stored; /* no. of pages currently stored */
|
||||
atomic_long_t max_used_pages; /* no. of maximum pages stored */
|
||||
atomic64_t writestall; /* no. of write slow paths */
|
||||
atomic64_t miss_free; /* no. of missed free */
|
||||
#ifdef CONFIG_ZRAM_WRITEBACK
|
||||
atomic64_t bd_count; /* no. of pages in backing device */
|
||||
|
@ -630,7 +630,7 @@ static int __init dmi_smbios3_present(const u8 *buf)
|
||||
{
|
||||
if (memcmp(buf, "_SM3_", 5) == 0 &&
|
||||
buf[6] < 32 && dmi_checksum(buf, buf[6])) {
|
||||
dmi_ver = get_unaligned_be32(buf + 6) & 0xFFFFFF;
|
||||
dmi_ver = get_unaligned_be24(buf + 7);
|
||||
dmi_num = 0; /* No longer specified */
|
||||
dmi_len = get_unaligned_le32(buf + 12);
|
||||
dmi_base = get_unaligned_le64(buf + 16);
|
||||
|
@ -148,30 +148,22 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
|
||||
struct amdgpu_reset_context *reset_context)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
|
||||
struct list_head *reset_device_list = reset_context->reset_device_list;
|
||||
struct amdgpu_device *tmp_adev = NULL;
|
||||
struct list_head reset_device_list;
|
||||
int r = 0;
|
||||
|
||||
dev_dbg(adev->dev, "aldebaran perform hw reset\n");
|
||||
|
||||
if (reset_device_list == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) &&
|
||||
reset_context->hive == NULL) {
|
||||
/* Wrong context, return error */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&reset_device_list);
|
||||
if (reset_context->hive) {
|
||||
list_for_each_entry (tmp_adev,
|
||||
&reset_context->hive->device_list,
|
||||
gmc.xgmi.head)
|
||||
list_add_tail(&tmp_adev->reset_list,
|
||||
&reset_device_list);
|
||||
} else {
|
||||
list_add_tail(&reset_context->reset_req_dev->reset_list,
|
||||
&reset_device_list);
|
||||
}
|
||||
|
||||
list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
|
||||
list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
|
||||
mutex_lock(&tmp_adev->reset_cntl->reset_lock);
|
||||
tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_MODE2;
|
||||
}
|
||||
@ -179,7 +171,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
|
||||
* Mode2 reset doesn't need any sync between nodes in XGMI hive, instead launch
|
||||
* them together so that they can be completed asynchronously on multiple nodes
|
||||
*/
|
||||
list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
|
||||
list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
|
||||
/* For XGMI run all resets in parallel to speed up the process */
|
||||
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
|
||||
if (!queue_work(system_unbound_wq,
|
||||
@ -197,7 +189,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
|
||||
|
||||
/* For XGMI wait for all resets to complete before proceed */
|
||||
if (!r) {
|
||||
list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
|
||||
list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
|
||||
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
|
||||
flush_work(&tmp_adev->reset_cntl->reset_work);
|
||||
r = tmp_adev->asic_reset_res;
|
||||
@ -207,7 +199,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
|
||||
list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
|
||||
mutex_unlock(&tmp_adev->reset_cntl->reset_lock);
|
||||
tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE;
|
||||
}
|
||||
@ -339,10 +331,13 @@ static int
|
||||
aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
|
||||
struct amdgpu_reset_context *reset_context)
|
||||
{
|
||||
struct list_head *reset_device_list = reset_context->reset_device_list;
|
||||
struct amdgpu_device *tmp_adev = NULL;
|
||||
struct list_head reset_device_list;
|
||||
int r;
|
||||
|
||||
if (reset_device_list == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (reset_context->reset_req_dev->ip_versions[MP1_HWIP][0] ==
|
||||
IP_VERSION(13, 0, 2) &&
|
||||
reset_context->hive == NULL) {
|
||||
@ -350,19 +345,7 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&reset_device_list);
|
||||
if (reset_context->hive) {
|
||||
list_for_each_entry (tmp_adev,
|
||||
&reset_context->hive->device_list,
|
||||
gmc.xgmi.head)
|
||||
list_add_tail(&tmp_adev->reset_list,
|
||||
&reset_device_list);
|
||||
} else {
|
||||
list_add_tail(&reset_context->reset_req_dev->reset_list,
|
||||
&reset_device_list);
|
||||
}
|
||||
|
||||
list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
|
||||
list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
|
||||
dev_info(tmp_adev->dev,
|
||||
"GPU reset succeeded, trying to resume\n");
|
||||
r = aldebaran_mode2_restore_ip(tmp_adev);
|
||||
|
@ -317,7 +317,7 @@ enum amdgpu_kiq_irq {
|
||||
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
|
||||
AMDGPU_CP_KIQ_IRQ_LAST
|
||||
};
|
||||
|
||||
#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
|
||||
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
|
||||
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
|
||||
#define MAX_KIQ_REG_TRY 1000
|
||||
|
@ -96,6 +96,7 @@ struct amdgpu_amdkfd_fence {
|
||||
struct amdgpu_kfd_dev {
|
||||
struct kfd_dev *dev;
|
||||
uint64_t vram_used;
|
||||
uint64_t vram_used_aligned;
|
||||
bool init_complete;
|
||||
struct work_struct reset_work;
|
||||
};
|
||||
|
@ -40,10 +40,10 @@
|
||||
#define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
|
||||
|
||||
/*
|
||||
* Align VRAM allocations to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
|
||||
* Align VRAM availability to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
|
||||
* BO chunk
|
||||
*/
|
||||
#define VRAM_ALLOCATION_ALIGN (1 << 21)
|
||||
#define VRAM_AVAILABLITY_ALIGN (1 << 21)
|
||||
|
||||
/* Impose limit on how much memory KFD can use */
|
||||
static struct {
|
||||
@ -149,7 +149,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
|
||||
* to avoid fragmentation caused by 4K allocations in the tail
|
||||
* 2M BO chunk.
|
||||
*/
|
||||
vram_needed = ALIGN(size, VRAM_ALLOCATION_ALIGN);
|
||||
vram_needed = size;
|
||||
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
|
||||
system_mem_needed = size;
|
||||
} else if (!(alloc_flag &
|
||||
@ -182,8 +182,10 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
|
||||
*/
|
||||
WARN_ONCE(vram_needed && !adev,
|
||||
"adev reference can't be null when vram is used");
|
||||
if (adev)
|
||||
if (adev) {
|
||||
adev->kfd.vram_used += vram_needed;
|
||||
adev->kfd.vram_used_aligned += ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
|
||||
}
|
||||
kfd_mem_limit.system_mem_used += system_mem_needed;
|
||||
kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
|
||||
|
||||
@ -203,8 +205,10 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
|
||||
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
|
||||
WARN_ONCE(!adev,
|
||||
"adev reference can't be null when alloc mem flags vram is set");
|
||||
if (adev)
|
||||
adev->kfd.vram_used -= ALIGN(size, VRAM_ALLOCATION_ALIGN);
|
||||
if (adev) {
|
||||
adev->kfd.vram_used -= size;
|
||||
adev->kfd.vram_used_aligned -= ALIGN(size, VRAM_AVAILABLITY_ALIGN);
|
||||
}
|
||||
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
|
||||
kfd_mem_limit.system_mem_used -= size;
|
||||
} else if (!(alloc_flag &
|
||||
@ -1608,15 +1612,14 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev)
|
||||
uint64_t reserved_for_pt =
|
||||
ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
|
||||
size_t available;
|
||||
|
||||
spin_lock(&kfd_mem_limit.mem_limit_lock);
|
||||
available = adev->gmc.real_vram_size
|
||||
- adev->kfd.vram_used
|
||||
- adev->kfd.vram_used_aligned
|
||||
- atomic64_read(&adev->vram_pin_size)
|
||||
- reserved_for_pt;
|
||||
spin_unlock(&kfd_mem_limit.mem_limit_lock);
|
||||
|
||||
return ALIGN_DOWN(available, VRAM_ALLOCATION_ALIGN);
|
||||
return ALIGN_DOWN(available, VRAM_AVAILABLITY_ALIGN);
|
||||
}
|
||||
|
||||
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
|
||||
|
@ -314,7 +314,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
|
||||
mem_channel_number = vram_info->v30.channel_num;
|
||||
mem_channel_width = vram_info->v30.channel_width;
|
||||
if (vram_width)
|
||||
*vram_width = mem_channel_number * mem_channel_width;
|
||||
*vram_width = mem_channel_number * (1 << mem_channel_width);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -837,16 +837,12 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
||||
continue;
|
||||
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false);
|
||||
if (r) {
|
||||
mutex_unlock(&p->bo_list->bo_list_mutex);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
|
||||
if (r) {
|
||||
mutex_unlock(&p->bo_list->bo_list_mutex);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_vm_handle_moved(adev, vm);
|
||||
|
@ -1705,7 +1705,7 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
|
||||
char reg_offset[11];
|
||||
uint32_t *new, *tmp = NULL;
|
||||
uint32_t *new = NULL, *tmp = NULL;
|
||||
int ret, i = 0, len = 0;
|
||||
|
||||
do {
|
||||
@ -1747,7 +1747,8 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
|
||||
ret = size;
|
||||
|
||||
error_free:
|
||||
kfree(tmp);
|
||||
if (tmp != new)
|
||||
kfree(tmp);
|
||||
kfree(new);
|
||||
return ret;
|
||||
}
|
||||
|
@ -4742,6 +4742,8 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
|
||||
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
|
||||
reset_list);
|
||||
amdgpu_reset_reg_dumps(tmp_adev);
|
||||
|
||||
reset_context->reset_device_list = device_list_handle;
|
||||
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
|
||||
/* If reset handler not implemented, continue; otherwise return */
|
||||
if (r == -ENOSYS)
|
||||
|
@ -272,10 +272,6 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
|
||||
/* Signal all jobs not yet scheduled */
|
||||
for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
|
||||
struct drm_sched_rq *rq = &sched->sched_rq[i];
|
||||
|
||||
if (!rq)
|
||||
continue;
|
||||
|
||||
spin_lock(&rq->lock);
|
||||
list_for_each_entry(s_entity, &rq->entities, list) {
|
||||
while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
|
||||
|
@ -37,6 +37,7 @@ struct amdgpu_reset_context {
|
||||
struct amdgpu_device *reset_req_dev;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_hive_info *hive;
|
||||
struct list_head *reset_device_list;
|
||||
unsigned long flags;
|
||||
};
|
||||
|
||||
|
@ -637,6 +637,8 @@ struct amdgpu_ttm_tt {
|
||||
#endif
|
||||
};
|
||||
|
||||
#define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm)
|
||||
|
||||
#ifdef CONFIG_DRM_AMDGPU_USERPTR
|
||||
/*
|
||||
* amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
|
||||
@ -648,7 +650,7 @@ struct amdgpu_ttm_tt {
|
||||
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
|
||||
{
|
||||
struct ttm_tt *ttm = bo->tbo.ttm;
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
unsigned long start = gtt->userptr;
|
||||
struct vm_area_struct *vma;
|
||||
struct mm_struct *mm;
|
||||
@ -702,7 +704,7 @@ out_unlock:
|
||||
*/
|
||||
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
bool r = false;
|
||||
|
||||
if (!gtt || !gtt->userptr)
|
||||
@ -751,7 +753,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
|
||||
struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
|
||||
enum dma_data_direction direction = write ?
|
||||
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
|
||||
@ -788,7 +790,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
|
||||
struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
|
||||
enum dma_data_direction direction = write ?
|
||||
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
|
||||
@ -822,7 +824,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
|
||||
{
|
||||
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
|
||||
struct ttm_tt *ttm = tbo->ttm;
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
|
||||
if (amdgpu_bo_encrypted(abo))
|
||||
flags |= AMDGPU_PTE_TMZ;
|
||||
@ -860,7 +862,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
|
||||
struct ttm_resource *bo_mem)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
|
||||
struct amdgpu_ttm_tt *gtt = (void*)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
uint64_t flags;
|
||||
int r;
|
||||
|
||||
@ -927,7 +929,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
|
||||
struct ttm_operation_ctx ctx = { false, false };
|
||||
struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
|
||||
struct ttm_placement placement;
|
||||
struct ttm_place placements;
|
||||
struct ttm_resource *tmp;
|
||||
@ -998,7 +1000,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
|
||||
struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
|
||||
/* if the pages have userptr pinning then clear that first */
|
||||
if (gtt->userptr) {
|
||||
@ -1025,7 +1027,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
|
||||
static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
|
||||
struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
|
||||
if (gtt->usertask)
|
||||
put_task_struct(gtt->usertask);
|
||||
@ -1079,7 +1081,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
|
||||
struct ttm_operation_ctx *ctx)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
pgoff_t i;
|
||||
int ret;
|
||||
|
||||
@ -1113,7 +1115,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
|
||||
static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
|
||||
struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
struct amdgpu_device *adev;
|
||||
pgoff_t i;
|
||||
|
||||
@ -1182,7 +1184,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
|
||||
/* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
|
||||
bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
|
||||
|
||||
gtt = (void *)bo->ttm;
|
||||
gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
|
||||
gtt->userptr = addr;
|
||||
gtt->userflags = flags;
|
||||
|
||||
@ -1199,7 +1201,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
|
||||
*/
|
||||
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
|
||||
if (gtt == NULL)
|
||||
return NULL;
|
||||
@ -1218,7 +1220,7 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
|
||||
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
|
||||
unsigned long end, unsigned long *userptr)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
unsigned long size;
|
||||
|
||||
if (gtt == NULL || !gtt->userptr)
|
||||
@ -1241,7 +1243,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
|
||||
*/
|
||||
bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
|
||||
if (gtt == NULL || !gtt->userptr)
|
||||
return false;
|
||||
@ -1254,7 +1256,7 @@ bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
|
||||
*/
|
||||
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
|
||||
if (gtt == NULL)
|
||||
return false;
|
||||
|
@ -496,8 +496,7 @@ static int amdgpu_vkms_sw_init(void *handle)
|
||||
adev_to_drm(adev)->mode_config.max_height = YRES_MAX;
|
||||
|
||||
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||
/* disable prefer shadow for now due to hibernation issues */
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 0;
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||
|
||||
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
|
||||
|
||||
|
@ -28,13 +28,44 @@
|
||||
#include "navi10_enum.h"
|
||||
#include "soc15_common.h"
|
||||
|
||||
#define regATHUB_MISC_CNTL_V3_0_1 0x00d7
|
||||
#define regATHUB_MISC_CNTL_V3_0_1_BASE_IDX 0
|
||||
|
||||
|
||||
static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t data;
|
||||
|
||||
switch (adev->ip_versions[ATHUB_HWIP][0]) {
|
||||
case IP_VERSION(3, 0, 1):
|
||||
data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1);
|
||||
break;
|
||||
default:
|
||||
data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
|
||||
break;
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
static void athub_v3_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data)
|
||||
{
|
||||
switch (adev->ip_versions[ATHUB_HWIP][0]) {
|
||||
case IP_VERSION(3, 0, 1):
|
||||
WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1, data);
|
||||
break;
|
||||
default:
|
||||
WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
athub_v3_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
uint32_t def, data;
|
||||
|
||||
def = data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
|
||||
def = data = athub_v3_0_get_cg_cntl(adev);
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_MGCG))
|
||||
data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
|
||||
@ -42,7 +73,7 @@ athub_v3_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
|
||||
athub_v3_0_set_cg_cntl(adev, data);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -51,7 +82,7 @@ athub_v3_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
|
||||
{
|
||||
uint32_t def, data;
|
||||
|
||||
def = data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
|
||||
def = data = athub_v3_0_get_cg_cntl(adev);
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_LS))
|
||||
data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
|
||||
@ -59,7 +90,7 @@ athub_v3_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
|
||||
data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
|
||||
athub_v3_0_set_cg_cntl(adev, data);
|
||||
}
|
||||
|
||||
int athub_v3_0_set_clockgating(struct amdgpu_device *adev,
|
||||
@ -70,6 +101,7 @@ int athub_v3_0_set_clockgating(struct amdgpu_device *adev,
|
||||
|
||||
switch (adev->ip_versions[ATHUB_HWIP][0]) {
|
||||
case IP_VERSION(3, 0, 0):
|
||||
case IP_VERSION(3, 0, 1):
|
||||
case IP_VERSION(3, 0, 2):
|
||||
athub_v3_0_update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
@ -88,7 +120,7 @@ void athub_v3_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
|
||||
int data;
|
||||
|
||||
/* AMD_CG_SUPPORT_ATHUB_MGCG */
|
||||
data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
|
||||
data = athub_v3_0_get_cg_cntl(adev);
|
||||
if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_ATHUB_MGCG;
|
||||
|
||||
|
@ -2796,8 +2796,7 @@ static int dce_v10_0_sw_init(void *handle)
|
||||
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||
|
||||
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||
/* disable prefer shadow for now due to hibernation issues */
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 0;
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||
|
||||
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
|
||||
|
||||
|
@ -2914,8 +2914,7 @@ static int dce_v11_0_sw_init(void *handle)
|
||||
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||
|
||||
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||
/* disable prefer shadow for now due to hibernation issues */
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 0;
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||
|
||||
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
|
||||
|
||||
|
@ -2673,8 +2673,7 @@ static int dce_v6_0_sw_init(void *handle)
|
||||
adev_to_drm(adev)->mode_config.max_width = 16384;
|
||||
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||
/* disable prefer shadow for now due to hibernation issues */
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 0;
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
|
||||
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
|
||||
|
||||
|
@ -2693,8 +2693,11 @@ static int dce_v8_0_sw_init(void *handle)
|
||||
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||
|
||||
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||
/* disable prefer shadow for now due to hibernation issues */
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 0;
|
||||
if (adev->asic_type == CHIP_HAWAII)
|
||||
/* disable prefer shadow for now due to hibernation issues */
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 0;
|
||||
else
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||
|
||||
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
|
||||
|
||||
|
@ -4846,7 +4846,7 @@ static int gfx_v10_0_sw_init(void *handle)
|
||||
case IP_VERSION(10, 3, 3):
|
||||
case IP_VERSION(10, 3, 7):
|
||||
adev->gfx.me.num_me = 1;
|
||||
adev->gfx.me.num_pipe_per_me = 2;
|
||||
adev->gfx.me.num_pipe_per_me = 1;
|
||||
adev->gfx.me.num_queue_per_pipe = 1;
|
||||
adev->gfx.mec.num_mec = 2;
|
||||
adev->gfx.mec.num_pipe_per_mec = 4;
|
||||
|
@ -53,6 +53,7 @@
|
||||
#define GFX11_MEC_HPD_SIZE 2048
|
||||
|
||||
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
|
||||
#define RLC_PG_DELAY_3_DEFAULT_GC_11_0_1 0x1388
|
||||
|
||||
#define regCGTT_WD_CLK_CTRL 0x5086
|
||||
#define regCGTT_WD_CLK_CTRL_BASE_IDX 1
|
||||
@ -5279,6 +5280,38 @@ static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
|
||||
.update_spm_vmid = gfx_v11_0_update_spm_vmid,
|
||||
};
|
||||
|
||||
static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
u32 data = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
|
||||
|
||||
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
|
||||
data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
|
||||
else
|
||||
data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
|
||||
|
||||
WREG32_SOC15(GC, 0, regRLC_PG_CNTL, data);
|
||||
|
||||
// Program RLC_PG_DELAY3 for CGPG hysteresis
|
||||
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
|
||||
switch (adev->ip_versions[GC_HWIP][0]) {
|
||||
case IP_VERSION(11, 0, 1):
|
||||
WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
amdgpu_gfx_rlc_enter_safe_mode(adev);
|
||||
|
||||
gfx_v11_cntl_power_gating(adev, enable);
|
||||
|
||||
amdgpu_gfx_rlc_exit_safe_mode(adev);
|
||||
}
|
||||
|
||||
static int gfx_v11_0_set_powergating_state(void *handle,
|
||||
enum amd_powergating_state state)
|
||||
{
|
||||
@ -5293,6 +5326,11 @@ static int gfx_v11_0_set_powergating_state(void *handle,
|
||||
case IP_VERSION(11, 0, 2):
|
||||
amdgpu_gfx_off_ctrl(adev, enable);
|
||||
break;
|
||||
case IP_VERSION(11, 0, 1):
|
||||
gfx_v11_cntl_pg(adev, enable);
|
||||
/* TODO: Enable this when GFXOFF is ready */
|
||||
// amdgpu_gfx_off_ctrl(adev, enable);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -5310,6 +5348,7 @@ static int gfx_v11_0_set_clockgating_state(void *handle,
|
||||
|
||||
switch (adev->ip_versions[GC_HWIP][0]) {
|
||||
case IP_VERSION(11, 0, 0):
|
||||
case IP_VERSION(11, 0, 1):
|
||||
case IP_VERSION(11, 0, 2):
|
||||
gfx_v11_0_update_gfx_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
|
@ -419,6 +419,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
uint32_t seq;
|
||||
uint16_t queried_pasid;
|
||||
bool ret;
|
||||
u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
|
||||
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
|
||||
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
|
||||
|
||||
@ -437,7 +438,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
|
||||
amdgpu_ring_commit(ring);
|
||||
spin_unlock(&adev->gfx.kiq.ring_lock);
|
||||
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
|
||||
r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
|
||||
if (r < 1) {
|
||||
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
|
||||
return -ETIME;
|
||||
|
@ -896,6 +896,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
uint32_t seq;
|
||||
uint16_t queried_pasid;
|
||||
bool ret;
|
||||
u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
|
||||
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
|
||||
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
|
||||
|
||||
@ -935,7 +936,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
|
||||
amdgpu_ring_commit(ring);
|
||||
spin_unlock(&adev->gfx.kiq.ring_lock);
|
||||
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
|
||||
r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
|
||||
if (r < 1) {
|
||||
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
|
||||
up_read(&adev->reset_domain->sem);
|
||||
@ -1624,12 +1625,15 @@ static int gmc_v9_0_sw_init(void *handle)
|
||||
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
|
||||
else
|
||||
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
|
||||
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
|
||||
adev->gmc.translate_further = adev->vm_manager.num_level > 1;
|
||||
break;
|
||||
case IP_VERSION(9, 4, 1):
|
||||
adev->num_vmhubs = 3;
|
||||
|
||||
/* Keep the vm size same with Vega20 */
|
||||
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
|
||||
adev->gmc.translate_further = adev->vm_manager.num_level > 1;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -40,6 +40,156 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
|
||||
0);
|
||||
}
|
||||
|
||||
static void hdp_v5_2_update_mem_power_gating(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
uint32_t hdp_clk_cntl;
|
||||
uint32_t hdp_mem_pwr_cntl;
|
||||
|
||||
if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
|
||||
AMD_CG_SUPPORT_HDP_DS |
|
||||
AMD_CG_SUPPORT_HDP_SD)))
|
||||
return;
|
||||
|
||||
hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
|
||||
hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
|
||||
|
||||
/* Before doing clock/power mode switch, forced on MEM clock */
|
||||
hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
|
||||
ATOMIC_MEM_CLK_SOFT_OVERRIDE, 1);
|
||||
hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
|
||||
RC_MEM_CLK_SOFT_OVERRIDE, 1);
|
||||
WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
|
||||
|
||||
/* disable clock and power gating before any changing */
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
|
||||
ATOMIC_MEM_POWER_CTRL_EN, 0);
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
|
||||
ATOMIC_MEM_POWER_LS_EN, 0);
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
|
||||
ATOMIC_MEM_POWER_DS_EN, 0);
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
|
||||
ATOMIC_MEM_POWER_SD_EN, 0);
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
|
||||
RC_MEM_POWER_CTRL_EN, 0);
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
|
||||
RC_MEM_POWER_LS_EN, 0);
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
|
||||
RC_MEM_POWER_DS_EN, 0);
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
|
||||
RC_MEM_POWER_SD_EN, 0);
|
||||
WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
|
||||
|
||||
/* Already disabled above. The actions below are for "enabled" only */
|
||||
if (enable) {
|
||||
/* only one clock gating mode (LS/DS/SD) can be enabled */
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
|
||||
HDP_MEM_POWER_CTRL,
|
||||
ATOMIC_MEM_POWER_SD_EN, 1);
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
|
||||
HDP_MEM_POWER_CTRL,
|
||||
RC_MEM_POWER_SD_EN, 1);
|
||||
} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
|
||||
HDP_MEM_POWER_CTRL,
|
||||
ATOMIC_MEM_POWER_LS_EN, 1);
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
|
||||
HDP_MEM_POWER_CTRL,
|
||||
RC_MEM_POWER_LS_EN, 1);
|
||||
} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
|
||||
HDP_MEM_POWER_CTRL,
|
||||
ATOMIC_MEM_POWER_DS_EN, 1);
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
|
||||
HDP_MEM_POWER_CTRL,
|
||||
RC_MEM_POWER_DS_EN, 1);
|
||||
}
|
||||
|
||||
/* confirmed that ATOMIC/RC_MEM_POWER_CTRL_EN have to be set for SRAM LS/DS/SD */
|
||||
if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
|
||||
AMD_CG_SUPPORT_HDP_SD)) {
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
|
||||
ATOMIC_MEM_POWER_CTRL_EN, 1);
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
|
||||
RC_MEM_POWER_CTRL_EN, 1);
|
||||
WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
|
||||
}
|
||||
}
|
||||
|
||||
/* disable MEM clock override after clock/power mode changing */
|
||||
hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
|
||||
ATOMIC_MEM_CLK_SOFT_OVERRIDE, 0);
|
||||
hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
|
||||
RC_MEM_CLK_SOFT_OVERRIDE, 0);
|
||||
WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
|
||||
}
|
||||
|
||||
static void hdp_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
uint32_t hdp_clk_cntl;
|
||||
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
|
||||
return;
|
||||
|
||||
hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
|
||||
|
||||
if (enable) {
|
||||
hdp_clk_cntl &=
|
||||
~(uint32_t)
|
||||
(HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
|
||||
} else {
|
||||
hdp_clk_cntl |= HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
|
||||
}
|
||||
|
||||
WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
|
||||
}
|
||||
|
||||
static void hdp_v5_2_get_clockgating_state(struct amdgpu_device *adev,
|
||||
u64 *flags)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
||||
/* AMD_CG_SUPPORT_HDP_MGCG */
|
||||
tmp = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
|
||||
if (!(tmp & (HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
|
||||
*flags |= AMD_CG_SUPPORT_HDP_MGCG;
|
||||
|
||||
/* AMD_CG_SUPPORT_HDP_LS/DS/SD */
|
||||
tmp = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
|
||||
if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_HDP_LS;
|
||||
else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_HDP_DS;
|
||||
else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_HDP_SD;
|
||||
}
|
||||
|
||||
static void hdp_v5_2_update_clock_gating(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
hdp_v5_2_update_mem_power_gating(adev, enable);
|
||||
hdp_v5_2_update_medium_grain_clock_gating(adev, enable);
|
||||
}
|
||||
|
||||
const struct amdgpu_hdp_funcs hdp_v5_2_funcs = {
|
||||
.flush_hdp = hdp_v5_2_flush_hdp,
|
||||
.update_clock_gating = hdp_v5_2_update_clock_gating,
|
||||
.get_clock_gating_state = hdp_v5_2_get_clockgating_state,
|
||||
};
|
||||
|
@ -727,6 +727,7 @@ static const struct amd_ip_funcs ih_v6_0_ip_funcs = {
|
||||
static const struct amdgpu_ih_funcs ih_v6_0_funcs = {
|
||||
.get_wptr = ih_v6_0_get_wptr,
|
||||
.decode_iv = amdgpu_ih_decode_iv_helper,
|
||||
.decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
|
||||
.set_rptr = ih_v6_0_set_rptr
|
||||
};
|
||||
|
||||
|
@ -518,18 +518,41 @@ static u64 mmhub_v3_0_1_get_mc_fb_offset(struct amdgpu_device *adev)
|
||||
static void mmhub_v3_0_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
//TODO
|
||||
uint32_t def, data;
|
||||
|
||||
def = data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
|
||||
|
||||
if (enable)
|
||||
data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
|
||||
else
|
||||
data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK;
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG, data);
|
||||
}
|
||||
|
||||
static void mmhub_v3_0_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
//TODO
|
||||
uint32_t def, data;
|
||||
|
||||
def = data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
|
||||
|
||||
if (enable)
|
||||
data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
|
||||
else
|
||||
data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG, data);
|
||||
}
|
||||
|
||||
static int mmhub_v3_0_1_set_clockgating(struct amdgpu_device *adev,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
mmhub_v3_0_1_update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
mmhub_v3_0_1_update_medium_grain_light_sleep(adev,
|
||||
@ -539,7 +562,20 @@ static int mmhub_v3_0_1_set_clockgating(struct amdgpu_device *adev,
|
||||
|
||||
static void mmhub_v3_0_1_get_clockgating(struct amdgpu_device *adev, u64 *flags)
|
||||
{
|
||||
//TODO
|
||||
int data;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
*flags = 0;
|
||||
|
||||
data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
|
||||
|
||||
/* AMD_CG_SUPPORT_MC_MGCG */
|
||||
if (data & MM_ATC_L2_MISC_CG__ENABLE_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_MC_MGCG;
|
||||
|
||||
/* AMD_CG_SUPPORT_MC_LS */
|
||||
if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_MC_LS;
|
||||
}
|
||||
|
||||
const struct amdgpu_mmhub_funcs mmhub_v3_0_1_funcs = {
|
||||
|
@ -409,9 +409,11 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
|
||||
u32 wptr, tmp;
|
||||
struct amdgpu_ih_regs *ih_regs;
|
||||
|
||||
if (ih == &adev->irq.ih) {
|
||||
if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
|
||||
/* Only ring0 supports writeback. On other rings fall back
|
||||
* to register-based code with overflow checking below.
|
||||
* ih_soft ring doesn't have any backing hardware registers,
|
||||
* update wptr and return.
|
||||
*/
|
||||
wptr = le32_to_cpu(*ih->wptr_cpu);
|
||||
|
||||
@ -483,6 +485,9 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev,
|
||||
{
|
||||
struct amdgpu_ih_regs *ih_regs;
|
||||
|
||||
if (ih == &adev->irq.ih_soft)
|
||||
return;
|
||||
|
||||
if (ih->use_doorbell) {
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
*ih->rptr_cpu = ih->rptr;
|
||||
|
@ -101,6 +101,16 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
|
||||
adev->psp.dtm_context.context.bin_desc.start_addr =
|
||||
(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
|
||||
le32_to_cpu(ta_hdr->dtm.offset_bytes);
|
||||
|
||||
if (adev->apu_flags & AMD_APU_IS_RENOIR) {
|
||||
adev->psp.securedisplay_context.context.bin_desc.fw_version =
|
||||
le32_to_cpu(ta_hdr->securedisplay.fw_version);
|
||||
adev->psp.securedisplay_context.context.bin_desc.size_bytes =
|
||||
le32_to_cpu(ta_hdr->securedisplay.size_bytes);
|
||||
adev->psp.securedisplay_context.context.bin_desc.start_addr =
|
||||
(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
|
||||
le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -20,7 +20,6 @@
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include <linux/dev_printk.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include "amdgpu.h"
|
||||
|
@ -546,8 +546,10 @@ static int soc21_common_early_init(void *handle)
|
||||
case IP_VERSION(11, 0, 0):
|
||||
adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
#if 0
|
||||
AMD_CG_SUPPORT_GFX_3D_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_3D_CGLS |
|
||||
#endif
|
||||
AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_REPEATER_FGCG |
|
||||
AMD_CG_SUPPORT_GFX_FGCG |
|
||||
@ -575,7 +577,9 @@ static int soc21_common_early_init(void *handle)
|
||||
AMD_CG_SUPPORT_VCN_MGCG |
|
||||
AMD_CG_SUPPORT_JPEG_MGCG |
|
||||
AMD_CG_SUPPORT_ATHUB_MGCG |
|
||||
AMD_CG_SUPPORT_ATHUB_LS;
|
||||
AMD_CG_SUPPORT_ATHUB_LS |
|
||||
AMD_CG_SUPPORT_IH_CG |
|
||||
AMD_CG_SUPPORT_HDP_SD;
|
||||
adev->pg_flags =
|
||||
AMD_PG_SUPPORT_VCN |
|
||||
AMD_PG_SUPPORT_VCN_DPG |
|
||||
@ -586,9 +590,23 @@ static int soc21_common_early_init(void *handle)
|
||||
break;
|
||||
case IP_VERSION(11, 0, 1):
|
||||
adev->cg_flags =
|
||||
AMD_CG_SUPPORT_GFX_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_FGCG |
|
||||
AMD_CG_SUPPORT_REPEATER_FGCG |
|
||||
AMD_CG_SUPPORT_GFX_PERF_CLK |
|
||||
AMD_CG_SUPPORT_MC_MGCG |
|
||||
AMD_CG_SUPPORT_MC_LS |
|
||||
AMD_CG_SUPPORT_HDP_MGCG |
|
||||
AMD_CG_SUPPORT_HDP_LS |
|
||||
AMD_CG_SUPPORT_ATHUB_MGCG |
|
||||
AMD_CG_SUPPORT_ATHUB_LS |
|
||||
AMD_CG_SUPPORT_IH_CG |
|
||||
AMD_CG_SUPPORT_VCN_MGCG |
|
||||
AMD_CG_SUPPORT_JPEG_MGCG;
|
||||
adev->pg_flags =
|
||||
AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_JPEG;
|
||||
adev->external_rev_id = adev->rev_id + 0x1;
|
||||
break;
|
||||
@ -683,6 +701,7 @@ static int soc21_common_set_clockgating_state(void *handle,
|
||||
|
||||
switch (adev->ip_versions[NBIO_HWIP][0]) {
|
||||
case IP_VERSION(4, 3, 0):
|
||||
case IP_VERSION(4, 3, 1):
|
||||
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
|
||||
@ -690,6 +709,10 @@ static int soc21_common_set_clockgating_state(void *handle,
|
||||
adev->hdp.funcs->update_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
break;
|
||||
case IP_VERSION(7, 7, 0):
|
||||
adev->hdp.funcs->update_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -1115,7 +1115,7 @@ static int vcn_v4_0_start(struct amdgpu_device *adev)
|
||||
*
|
||||
* Stop VCN block with dpg mode
|
||||
*/
|
||||
static int vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
|
||||
static void vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
||||
@ -1133,7 +1133,6 @@ static int vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
|
||||
/* disable dynamic power gating mode */
|
||||
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
|
||||
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1154,7 +1153,7 @@ static int vcn_v4_0_stop(struct amdgpu_device *adev)
|
||||
fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
|
||||
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
|
||||
r = vcn_v4_0_stop_dpg_mode(adev, i);
|
||||
vcn_v4_0_stop_dpg_mode(adev, i);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -334,9 +334,11 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
|
||||
u32 wptr, tmp;
|
||||
struct amdgpu_ih_regs *ih_regs;
|
||||
|
||||
if (ih == &adev->irq.ih) {
|
||||
if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
|
||||
/* Only ring0 supports writeback. On other rings fall back
|
||||
* to register-based code with overflow checking below.
|
||||
* ih_soft ring doesn't have any backing hardware registers,
|
||||
* update wptr and return.
|
||||
*/
|
||||
wptr = le32_to_cpu(*ih->wptr_cpu);
|
||||
|
||||
@ -409,6 +411,9 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev,
|
||||
{
|
||||
struct amdgpu_ih_regs *ih_regs;
|
||||
|
||||
if (ih == &adev->irq.ih_soft)
|
||||
return;
|
||||
|
||||
if (ih->use_doorbell) {
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
*ih->rptr_cpu = ih->rptr;
|
||||
|
@ -385,9 +385,11 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
|
||||
u32 wptr, tmp;
|
||||
struct amdgpu_ih_regs *ih_regs;
|
||||
|
||||
if (ih == &adev->irq.ih) {
|
||||
if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
|
||||
/* Only ring0 supports writeback. On other rings fall back
|
||||
* to register-based code with overflow checking below.
|
||||
* ih_soft ring doesn't have any backing hardware registers,
|
||||
* update wptr and return.
|
||||
*/
|
||||
wptr = le32_to_cpu(*ih->wptr_cpu);
|
||||
|
||||
@ -461,6 +463,9 @@ static void vega20_ih_set_rptr(struct amdgpu_device *adev,
|
||||
{
|
||||
struct amdgpu_ih_regs *ih_regs;
|
||||
|
||||
if (ih == &adev->irq.ih_soft)
|
||||
return;
|
||||
|
||||
if (ih->use_doorbell) {
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
*ih->rptr_cpu = ih->rptr;
|
||||
|
@ -874,7 +874,7 @@ static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
|
||||
err = kfd_wait_on_events(p, args->num_events,
|
||||
(void __user *)args->events_ptr,
|
||||
(args->wait_for_all != 0),
|
||||
args->timeout, &args->wait_result);
|
||||
&args->timeout, &args->wait_result);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -102,13 +102,18 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
|
||||
|
||||
switch (sdma_version) {
|
||||
case IP_VERSION(6, 0, 0):
|
||||
case IP_VERSION(6, 0, 1):
|
||||
case IP_VERSION(6, 0, 2):
|
||||
/* Reserve 1 for paging and 1 for gfx */
|
||||
kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
|
||||
/* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */
|
||||
kfd->device_info.reserved_sdma_queues_bitmap = 0xFULL;
|
||||
break;
|
||||
case IP_VERSION(6, 0, 1):
|
||||
/* Reserve 1 for paging and 1 for gfx */
|
||||
kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
|
||||
/* BIT(0)=engine-0 queue-0; BIT(1)=engine-0 queue-1; ... */
|
||||
kfd->device_info.reserved_sdma_queues_bitmap = 0x3ULL;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -894,7 +894,8 @@ static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
|
||||
return msecs_to_jiffies(user_timeout_ms) + 1;
|
||||
}
|
||||
|
||||
static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
|
||||
static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters,
|
||||
bool undo_auto_reset)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
@ -903,6 +904,9 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
|
||||
spin_lock(&waiters[i].event->lock);
|
||||
remove_wait_queue(&waiters[i].event->wq,
|
||||
&waiters[i].wait);
|
||||
if (undo_auto_reset && waiters[i].activated &&
|
||||
waiters[i].event && waiters[i].event->auto_reset)
|
||||
set_event(waiters[i].event);
|
||||
spin_unlock(&waiters[i].event->lock);
|
||||
}
|
||||
|
||||
@ -911,7 +915,7 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
|
||||
|
||||
int kfd_wait_on_events(struct kfd_process *p,
|
||||
uint32_t num_events, void __user *data,
|
||||
bool all, uint32_t user_timeout_ms,
|
||||
bool all, uint32_t *user_timeout_ms,
|
||||
uint32_t *wait_result)
|
||||
{
|
||||
struct kfd_event_data __user *events =
|
||||
@ -920,7 +924,7 @@ int kfd_wait_on_events(struct kfd_process *p,
|
||||
int ret = 0;
|
||||
|
||||
struct kfd_event_waiter *event_waiters = NULL;
|
||||
long timeout = user_timeout_to_jiffies(user_timeout_ms);
|
||||
long timeout = user_timeout_to_jiffies(*user_timeout_ms);
|
||||
|
||||
event_waiters = alloc_event_waiters(num_events);
|
||||
if (!event_waiters) {
|
||||
@ -970,15 +974,11 @@ int kfd_wait_on_events(struct kfd_process *p,
|
||||
}
|
||||
|
||||
if (signal_pending(current)) {
|
||||
/*
|
||||
* This is wrong when a nonzero, non-infinite timeout
|
||||
* is specified. We need to use
|
||||
* ERESTARTSYS_RESTARTBLOCK, but struct restart_block
|
||||
* contains a union with data for each user and it's
|
||||
* in generic kernel code that I don't want to
|
||||
* touch yet.
|
||||
*/
|
||||
ret = -ERESTARTSYS;
|
||||
if (*user_timeout_ms != KFD_EVENT_TIMEOUT_IMMEDIATE &&
|
||||
*user_timeout_ms != KFD_EVENT_TIMEOUT_INFINITE)
|
||||
*user_timeout_ms = jiffies_to_msecs(
|
||||
max(0l, timeout-1));
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1019,7 +1019,7 @@ int kfd_wait_on_events(struct kfd_process *p,
|
||||
event_waiters, events);
|
||||
|
||||
out_unlock:
|
||||
free_waiters(num_events, event_waiters);
|
||||
free_waiters(num_events, event_waiters, ret == -ERESTARTSYS);
|
||||
mutex_unlock(&p->event_mutex);
|
||||
out:
|
||||
if (ret)
|
||||
|
@ -1317,7 +1317,7 @@ void kfd_event_free_process(struct kfd_process *p);
|
||||
int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
|
||||
int kfd_wait_on_events(struct kfd_process *p,
|
||||
uint32_t num_events, void __user *data,
|
||||
bool all, uint32_t user_timeout_ms,
|
||||
bool all, uint32_t *user_timeout_ms,
|
||||
uint32_t *wait_result);
|
||||
void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
|
||||
uint32_t valid_id_bits);
|
||||
|
@ -541,7 +541,6 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
|
||||
kfree(svm_bo);
|
||||
return -ESRCH;
|
||||
}
|
||||
svm_bo->svms = prange->svms;
|
||||
svm_bo->eviction_fence =
|
||||
amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
|
||||
mm,
|
||||
@ -3273,7 +3272,6 @@ int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
|
||||
static void svm_range_evict_svm_bo_worker(struct work_struct *work)
|
||||
{
|
||||
struct svm_range_bo *svm_bo;
|
||||
struct kfd_process *p;
|
||||
struct mm_struct *mm;
|
||||
int r = 0;
|
||||
|
||||
@ -3281,13 +3279,12 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
|
||||
if (!svm_bo_ref_unless_zero(svm_bo))
|
||||
return; /* svm_bo was freed while eviction was pending */
|
||||
|
||||
/* svm_range_bo_release destroys this worker thread. So during
|
||||
* the lifetime of this thread, kfd_process and mm will be valid.
|
||||
*/
|
||||
p = container_of(svm_bo->svms, struct kfd_process, svms);
|
||||
mm = p->mm;
|
||||
if (!mm)
|
||||
if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
|
||||
mm = svm_bo->eviction_fence->mm;
|
||||
} else {
|
||||
svm_range_bo_unref(svm_bo);
|
||||
return;
|
||||
}
|
||||
|
||||
mmap_read_lock(mm);
|
||||
spin_lock(&svm_bo->list_lock);
|
||||
@ -3305,8 +3302,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
|
||||
|
||||
mutex_lock(&prange->migrate_mutex);
|
||||
do {
|
||||
r = svm_migrate_vram_to_ram(prange,
|
||||
svm_bo->eviction_fence->mm,
|
||||
r = svm_migrate_vram_to_ram(prange, mm,
|
||||
KFD_MIGRATE_TRIGGER_TTM_EVICTION);
|
||||
} while (!r && prange->actual_loc && --retries);
|
||||
|
||||
@ -3324,6 +3320,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
|
||||
}
|
||||
spin_unlock(&svm_bo->list_lock);
|
||||
mmap_read_unlock(mm);
|
||||
mmput(mm);
|
||||
|
||||
dma_fence_signal(&svm_bo->eviction_fence->base);
|
||||
|
||||
|
@ -46,7 +46,6 @@ struct svm_range_bo {
|
||||
spinlock_t list_lock;
|
||||
struct amdgpu_amdkfd_fence *eviction_fence;
|
||||
struct work_struct eviction_work;
|
||||
struct svm_range_list *svms;
|
||||
uint32_t evicting;
|
||||
struct work_struct release_work;
|
||||
};
|
||||
|
@ -1392,8 +1392,8 @@ static int kfd_build_p2p_node_entry(struct kfd_topology_device *dev,
|
||||
|
||||
static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int gpu_node)
|
||||
{
|
||||
struct kfd_iolink_properties *gpu_link, *tmp_link, *cpu_link;
|
||||
struct kfd_iolink_properties *props = NULL, *props2 = NULL;
|
||||
struct kfd_iolink_properties *gpu_link, *cpu_link;
|
||||
struct kfd_topology_device *cpu_dev;
|
||||
int ret = 0;
|
||||
int i, num_cpu;
|
||||
@ -1416,16 +1416,19 @@ static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int g
|
||||
continue;
|
||||
|
||||
/* find CPU <--> CPU links */
|
||||
cpu_link = NULL;
|
||||
cpu_dev = kfd_topology_device_by_proximity_domain(i);
|
||||
if (cpu_dev) {
|
||||
list_for_each_entry(cpu_link,
|
||||
list_for_each_entry(tmp_link,
|
||||
&cpu_dev->io_link_props, list) {
|
||||
if (cpu_link->node_to == gpu_link->node_to)
|
||||
if (tmp_link->node_to == gpu_link->node_to) {
|
||||
cpu_link = tmp_link;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (cpu_link->node_to != gpu_link->node_to)
|
||||
if (!cpu_link)
|
||||
return -ENOMEM;
|
||||
|
||||
/* CPU <--> CPU <--> GPU, GPU node*/
|
||||
|
@ -3825,8 +3825,11 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
|
||||
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||
|
||||
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||
/* disable prefer shadow for now due to hibernation issues */
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 0;
|
||||
if (adev->asic_type == CHIP_HAWAII)
|
||||
/* disable prefer shadow for now due to hibernation issues */
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 0;
|
||||
else
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||
/* indicates support for immediate flip */
|
||||
adev_to_drm(adev)->mode_config.async_page_flip = true;
|
||||
|
||||
@ -4135,6 +4138,7 @@ static void register_backlight_device(struct amdgpu_display_manager *dm,
|
||||
}
|
||||
}
|
||||
|
||||
static void amdgpu_set_panel_orientation(struct drm_connector *connector);
|
||||
|
||||
/*
|
||||
* In this architecture, the association
|
||||
@ -4326,6 +4330,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
||||
adev_to_drm(adev)->vblank_disable_immediate = false;
|
||||
}
|
||||
}
|
||||
amdgpu_set_panel_orientation(&aconnector->base);
|
||||
}
|
||||
|
||||
/* Software is initialized. Now we can register interrupt handlers. */
|
||||
@ -6684,6 +6689,10 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector)
|
||||
connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
|
||||
return;
|
||||
|
||||
mutex_lock(&connector->dev->mode_config.mutex);
|
||||
amdgpu_dm_connector_get_modes(connector);
|
||||
mutex_unlock(&connector->dev->mode_config.mutex);
|
||||
|
||||
encoder = amdgpu_dm_connector_to_encoder(connector);
|
||||
if (!encoder)
|
||||
return;
|
||||
@ -6728,8 +6737,6 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
|
||||
* restored here.
|
||||
*/
|
||||
amdgpu_dm_update_freesync_caps(connector, edid);
|
||||
|
||||
amdgpu_set_panel_orientation(connector);
|
||||
} else {
|
||||
amdgpu_dm_connector->num_modes = 0;
|
||||
}
|
||||
|
@ -660,7 +660,7 @@ static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_ty
|
||||
add_gfx10_1_modifiers(adev, mods, &size, &capacity);
|
||||
break;
|
||||
case AMDGPU_FAMILY_GC_11_0_0:
|
||||
case AMDGPU_FAMILY_GC_11_0_2:
|
||||
case AMDGPU_FAMILY_GC_11_0_1:
|
||||
add_gfx11_modifiers(adev, mods, &size, &capacity);
|
||||
break;
|
||||
}
|
||||
@ -1412,7 +1412,7 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
|
||||
}
|
||||
break;
|
||||
case AMDGPU_FAMILY_GC_11_0_0:
|
||||
case AMDGPU_FAMILY_GC_11_0_2:
|
||||
case AMDGPU_FAMILY_GC_11_0_1:
|
||||
switch (AMD_FMT_MOD_GET(TILE, modifier)) {
|
||||
case AMD_FMT_MOD_TILE_GFX11_256K_R_X:
|
||||
case AMD_FMT_MOD_TILE_GFX9_64K_R_X:
|
||||
|
@ -100,3 +100,24 @@ void convert_float_matrix(
|
||||
matrix[i] = (uint16_t)reg_value;
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t find_gcd(uint32_t a, uint32_t b)
|
||||
{
|
||||
uint32_t remainder = 0;
|
||||
while (b != 0) {
|
||||
remainder = a % b;
|
||||
a = b;
|
||||
b = remainder;
|
||||
}
|
||||
return a;
|
||||
}
|
||||
|
||||
void reduce_fraction(uint32_t num, uint32_t den,
|
||||
uint32_t *out_num, uint32_t *out_den)
|
||||
{
|
||||
uint32_t gcd = 0;
|
||||
|
||||
gcd = find_gcd(num, den);
|
||||
*out_num = num / gcd;
|
||||
*out_den = den / gcd;
|
||||
}
|
||||
|
@ -38,6 +38,9 @@ void convert_float_matrix(
|
||||
struct fixed31_32 *flt,
|
||||
uint32_t buffer_size);
|
||||
|
||||
void reduce_fraction(uint32_t num, uint32_t den,
|
||||
uint32_t *out_num, uint32_t *out_den);
|
||||
|
||||
static inline unsigned int log_2(unsigned int num)
|
||||
{
|
||||
return ilog2(num);
|
||||
|
@ -337,7 +337,7 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
|
||||
break;
|
||||
}
|
||||
|
||||
case AMDGPU_FAMILY_GC_11_0_2: {
|
||||
case AMDGPU_FAMILY_GC_11_0_1: {
|
||||
struct clk_mgr_dcn314 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
|
||||
|
||||
if (clk_mgr == NULL) {
|
||||
@ -397,7 +397,7 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
|
||||
dcn32_clk_mgr_destroy(clk_mgr);
|
||||
break;
|
||||
|
||||
case AMDGPU_FAMILY_GC_11_0_2:
|
||||
case AMDGPU_FAMILY_GC_11_0_1:
|
||||
dcn314_clk_mgr_destroy(clk_mgr);
|
||||
break;
|
||||
|
||||
|
@ -24,10 +24,9 @@
|
||||
*/
|
||||
|
||||
#include "dccg.h"
|
||||
#include "clk_mgr_internal.h"
|
||||
#include "rn_clk_mgr.h"
|
||||
|
||||
#include "dcn20/dcn20_clk_mgr.h"
|
||||
#include "rn_clk_mgr.h"
|
||||
#include "dml/dcn20/dcn20_fpu.h"
|
||||
|
||||
#include "dce100/dce_clk_mgr.h"
|
||||
|
@ -28,6 +28,7 @@
|
||||
|
||||
#include "clk_mgr.h"
|
||||
#include "dm_pp_smu.h"
|
||||
#include "clk_mgr_internal.h"
|
||||
|
||||
extern struct wm_table ddr4_wm_table_gs;
|
||||
extern struct wm_table lpddr4_wm_table_gs;
|
||||
|
@ -307,16 +307,6 @@ static void dcn314_enable_pme_wa(struct clk_mgr *clk_mgr_base)
|
||||
dcn314_smu_enable_pme_wa(clk_mgr);
|
||||
}
|
||||
|
||||
void dcn314_init_clocks(struct clk_mgr *clk_mgr)
|
||||
{
|
||||
memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
|
||||
// Assumption is that boot state always supports pstate
|
||||
clk_mgr->clks.p_state_change_support = true;
|
||||
clk_mgr->clks.prev_p_state_change_support = true;
|
||||
clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
|
||||
clk_mgr->clks.zstate_support = DCN_ZSTATE_SUPPORT_UNKNOWN;
|
||||
}
|
||||
|
||||
bool dcn314_are_clock_states_equal(struct dc_clocks *a,
|
||||
struct dc_clocks *b)
|
||||
{
|
||||
@ -425,7 +415,7 @@ static struct wm_table lpddr5_wm_table = {
|
||||
}
|
||||
};
|
||||
|
||||
static DpmClocks_t dummy_clocks;
|
||||
static DpmClocks314_t dummy_clocks;
|
||||
|
||||
static struct dcn314_watermarks dummy_wms = { 0 };
|
||||
|
||||
@ -510,7 +500,7 @@ static void dcn314_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
|
||||
static void dcn314_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
|
||||
struct dcn314_smu_dpm_clks *smu_dpm_clks)
|
||||
{
|
||||
DpmClocks_t *table = smu_dpm_clks->dpm_clks;
|
||||
DpmClocks314_t *table = smu_dpm_clks->dpm_clks;
|
||||
|
||||
if (!clk_mgr->smu_ver)
|
||||
return;
|
||||
@ -527,6 +517,26 @@ static void dcn314_get_dpm_table_from_smu(struct clk_mgr_internal *clk_mgr,
|
||||
dcn314_smu_transfer_dpm_table_smu_2_dram(clk_mgr);
|
||||
}
|
||||
|
||||
static inline bool is_valid_clock_value(uint32_t clock_value)
|
||||
{
|
||||
return clock_value > 1 && clock_value < 100000;
|
||||
}
|
||||
|
||||
static unsigned int convert_wck_ratio(uint8_t wck_ratio)
|
||||
{
|
||||
switch (wck_ratio) {
|
||||
case WCK_RATIO_1_2:
|
||||
return 2;
|
||||
|
||||
case WCK_RATIO_1_4:
|
||||
return 4;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
|
||||
{
|
||||
uint32_t max = 0;
|
||||
@ -540,89 +550,127 @@ static uint32_t find_max_clk_value(const uint32_t clocks[], uint32_t num_clocks)
|
||||
return max;
|
||||
}
|
||||
|
||||
static unsigned int find_clk_for_voltage(
|
||||
const DpmClocks_t *clock_table,
|
||||
const uint32_t clocks[],
|
||||
unsigned int voltage)
|
||||
{
|
||||
int i;
|
||||
int max_voltage = 0;
|
||||
int clock = 0;
|
||||
|
||||
for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) {
|
||||
if (clock_table->SocVoltage[i] == voltage) {
|
||||
return clocks[i];
|
||||
} else if (clock_table->SocVoltage[i] >= max_voltage &&
|
||||
clock_table->SocVoltage[i] < voltage) {
|
||||
max_voltage = clock_table->SocVoltage[i];
|
||||
clock = clocks[i];
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(clock);
|
||||
return clock;
|
||||
}
|
||||
|
||||
static void dcn314_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk_mgr,
|
||||
struct integrated_info *bios_info,
|
||||
const DpmClocks_t *clock_table)
|
||||
const DpmClocks314_t *clock_table)
|
||||
{
|
||||
int i, j;
|
||||
struct clk_bw_params *bw_params = clk_mgr->base.bw_params;
|
||||
uint32_t max_dispclk = 0, max_dppclk = 0;
|
||||
struct clk_limit_table_entry def_max = bw_params->clk_table.entries[bw_params->clk_table.num_entries - 1];
|
||||
uint32_t max_pstate = 0, max_fclk = 0, min_pstate = 0, max_dispclk = 0, max_dppclk = 0;
|
||||
int i;
|
||||
|
||||
j = -1;
|
||||
|
||||
ASSERT(NUM_DF_PSTATE_LEVELS <= MAX_NUM_DPM_LVL);
|
||||
|
||||
/* Find lowest DPM, FCLK is filled in reverse order*/
|
||||
|
||||
for (i = NUM_DF_PSTATE_LEVELS - 1; i >= 0; i--) {
|
||||
if (clock_table->DfPstateTable[i].FClk != 0) {
|
||||
j = i;
|
||||
break;
|
||||
/* Find highest valid fclk pstate */
|
||||
for (i = 0; i < clock_table->NumDfPstatesEnabled; i++) {
|
||||
if (is_valid_clock_value(clock_table->DfPstateTable[i].FClk) &&
|
||||
clock_table->DfPstateTable[i].FClk > max_fclk) {
|
||||
max_fclk = clock_table->DfPstateTable[i].FClk;
|
||||
max_pstate = i;
|
||||
}
|
||||
}
|
||||
|
||||
if (j == -1) {
|
||||
/* clock table is all 0s, just use our own hardcode */
|
||||
ASSERT(0);
|
||||
return;
|
||||
}
|
||||
/* We expect the table to contain at least one valid fclk entry. */
|
||||
ASSERT(is_valid_clock_value(max_fclk));
|
||||
|
||||
bw_params->clk_table.num_entries = j + 1;
|
||||
|
||||
/* dispclk and dppclk can be max at any voltage, same number of levels for both */
|
||||
/* Dispclk and dppclk can be max at any voltage, same number of levels for both */
|
||||
if (clock_table->NumDispClkLevelsEnabled <= NUM_DISPCLK_DPM_LEVELS &&
|
||||
clock_table->NumDispClkLevelsEnabled <= NUM_DPPCLK_DPM_LEVELS) {
|
||||
max_dispclk = find_max_clk_value(clock_table->DispClocks, clock_table->NumDispClkLevelsEnabled);
|
||||
max_dppclk = find_max_clk_value(clock_table->DppClocks, clock_table->NumDispClkLevelsEnabled);
|
||||
} else {
|
||||
/* Invalid number of entries in the table from PMFW. */
|
||||
ASSERT(0);
|
||||
}
|
||||
|
||||
for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) {
|
||||
bw_params->clk_table.entries[i].fclk_mhz = clock_table->DfPstateTable[j].FClk;
|
||||
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[j].MemClk;
|
||||
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[j].Voltage;
|
||||
switch (clock_table->DfPstateTable[j].WckRatio) {
|
||||
case WCK_RATIO_1_2:
|
||||
bw_params->clk_table.entries[i].wck_ratio = 2;
|
||||
break;
|
||||
case WCK_RATIO_1_4:
|
||||
bw_params->clk_table.entries[i].wck_ratio = 4;
|
||||
break;
|
||||
default:
|
||||
bw_params->clk_table.entries[i].wck_ratio = 1;
|
||||
/* Base the clock table on dcfclk, need at least one entry regardless of pmfw table */
|
||||
for (i = 0; i < clock_table->NumDcfClkLevelsEnabled; i++) {
|
||||
uint32_t min_fclk = clock_table->DfPstateTable[0].FClk;
|
||||
int j;
|
||||
|
||||
for (j = 1; j < clock_table->NumDfPstatesEnabled; j++) {
|
||||
if (is_valid_clock_value(clock_table->DfPstateTable[j].FClk) &&
|
||||
clock_table->DfPstateTable[j].FClk < min_fclk &&
|
||||
clock_table->DfPstateTable[j].Voltage <= clock_table->SocVoltage[i]) {
|
||||
min_fclk = clock_table->DfPstateTable[j].FClk;
|
||||
min_pstate = j;
|
||||
}
|
||||
}
|
||||
bw_params->clk_table.entries[i].dcfclk_mhz = find_clk_for_voltage(clock_table, clock_table->DcfClocks, clock_table->DfPstateTable[j].Voltage);
|
||||
bw_params->clk_table.entries[i].socclk_mhz = find_clk_for_voltage(clock_table, clock_table->SocClocks, clock_table->DfPstateTable[j].Voltage);
|
||||
|
||||
/* First search defaults for the clocks we don't read using closest lower or equal default dcfclk */
|
||||
for (j = bw_params->clk_table.num_entries - 1; j > 0; j--)
|
||||
if (bw_params->clk_table.entries[j].dcfclk_mhz <= clock_table->DcfClocks[i])
|
||||
break;
|
||||
|
||||
bw_params->clk_table.entries[i].phyclk_mhz = bw_params->clk_table.entries[j].phyclk_mhz;
|
||||
bw_params->clk_table.entries[i].phyclk_d18_mhz = bw_params->clk_table.entries[j].phyclk_d18_mhz;
|
||||
bw_params->clk_table.entries[i].dtbclk_mhz = bw_params->clk_table.entries[j].dtbclk_mhz;
|
||||
|
||||
/* Now update clocks we do read */
|
||||
bw_params->clk_table.entries[i].fclk_mhz = min_fclk;
|
||||
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[min_pstate].MemClk;
|
||||
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[min_pstate].Voltage;
|
||||
bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i];
|
||||
bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i];
|
||||
bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
|
||||
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
|
||||
}
|
||||
bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
|
||||
clock_table->DfPstateTable[min_pstate].WckRatio);
|
||||
};
|
||||
|
||||
/* Make sure to include at least one entry at highest pstate */
|
||||
if (max_pstate != min_pstate || i == 0) {
|
||||
if (i > MAX_NUM_DPM_LVL - 1)
|
||||
i = MAX_NUM_DPM_LVL - 1;
|
||||
|
||||
bw_params->clk_table.entries[i].fclk_mhz = max_fclk;
|
||||
bw_params->clk_table.entries[i].memclk_mhz = clock_table->DfPstateTable[max_pstate].MemClk;
|
||||
bw_params->clk_table.entries[i].voltage = clock_table->DfPstateTable[max_pstate].Voltage;
|
||||
bw_params->clk_table.entries[i].dcfclk_mhz = find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS);
|
||||
bw_params->clk_table.entries[i].socclk_mhz = find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
|
||||
bw_params->clk_table.entries[i].dispclk_mhz = max_dispclk;
|
||||
bw_params->clk_table.entries[i].dppclk_mhz = max_dppclk;
|
||||
bw_params->clk_table.entries[i].wck_ratio = convert_wck_ratio(
|
||||
clock_table->DfPstateTable[max_pstate].WckRatio);
|
||||
i++;
|
||||
}
|
||||
bw_params->clk_table.num_entries = i--;
|
||||
|
||||
/* Make sure all highest clocks are included*/
|
||||
bw_params->clk_table.entries[i].socclk_mhz = find_max_clk_value(clock_table->SocClocks, NUM_SOCCLK_DPM_LEVELS);
|
||||
bw_params->clk_table.entries[i].dispclk_mhz = find_max_clk_value(clock_table->DispClocks, NUM_DISPCLK_DPM_LEVELS);
|
||||
bw_params->clk_table.entries[i].dppclk_mhz = find_max_clk_value(clock_table->DppClocks, NUM_DPPCLK_DPM_LEVELS);
|
||||
ASSERT(clock_table->DcfClocks[i] == find_max_clk_value(clock_table->DcfClocks, NUM_DCFCLK_DPM_LEVELS));
|
||||
bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
|
||||
bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
|
||||
bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
|
||||
|
||||
/*
|
||||
* Set any 0 clocks to max default setting. Not an issue for
|
||||
* power since we aren't doing switching in such case anyway
|
||||
*/
|
||||
for (i = 0; i < bw_params->clk_table.num_entries; i++) {
|
||||
if (!bw_params->clk_table.entries[i].fclk_mhz) {
|
||||
bw_params->clk_table.entries[i].fclk_mhz = def_max.fclk_mhz;
|
||||
bw_params->clk_table.entries[i].memclk_mhz = def_max.memclk_mhz;
|
||||
bw_params->clk_table.entries[i].voltage = def_max.voltage;
|
||||
}
|
||||
if (!bw_params->clk_table.entries[i].dcfclk_mhz)
|
||||
bw_params->clk_table.entries[i].dcfclk_mhz = def_max.dcfclk_mhz;
|
||||
if (!bw_params->clk_table.entries[i].socclk_mhz)
|
||||
bw_params->clk_table.entries[i].socclk_mhz = def_max.socclk_mhz;
|
||||
if (!bw_params->clk_table.entries[i].dispclk_mhz)
|
||||
bw_params->clk_table.entries[i].dispclk_mhz = def_max.dispclk_mhz;
|
||||
if (!bw_params->clk_table.entries[i].dppclk_mhz)
|
||||
bw_params->clk_table.entries[i].dppclk_mhz = def_max.dppclk_mhz;
|
||||
if (!bw_params->clk_table.entries[i].phyclk_mhz)
|
||||
bw_params->clk_table.entries[i].phyclk_mhz = def_max.phyclk_mhz;
|
||||
if (!bw_params->clk_table.entries[i].phyclk_d18_mhz)
|
||||
bw_params->clk_table.entries[i].phyclk_d18_mhz = def_max.phyclk_d18_mhz;
|
||||
if (!bw_params->clk_table.entries[i].dtbclk_mhz)
|
||||
bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz;
|
||||
}
|
||||
ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz);
|
||||
bw_params->vram_type = bios_info->memory_type;
|
||||
bw_params->num_channels = bios_info->ma_channel_number;
|
||||
bw_params->num_channels = bios_info->ma_channel_number ? bios_info->ma_channel_number : 4;
|
||||
|
||||
for (i = 0; i < WM_SET_COUNT; i++) {
|
||||
bw_params->wm_table.entries[i].wm_inst = i;
|
||||
@ -641,7 +689,7 @@ static struct clk_mgr_funcs dcn314_funcs = {
|
||||
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
|
||||
.get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz,
|
||||
.update_clocks = dcn314_update_clocks,
|
||||
.init_clocks = dcn314_init_clocks,
|
||||
.init_clocks = dcn31_init_clocks,
|
||||
.enable_pme_wa = dcn314_enable_pme_wa,
|
||||
.are_clock_states_equal = dcn314_are_clock_states_equal,
|
||||
.notify_wm_ranges = dcn314_notify_wm_ranges
|
||||
@ -681,10 +729,10 @@ void dcn314_clk_mgr_construct(
|
||||
}
|
||||
ASSERT(clk_mgr->smu_wm_set.wm_set);
|
||||
|
||||
smu_dpm_clks.dpm_clks = (DpmClocks_t *)dm_helpers_allocate_gpu_mem(
|
||||
smu_dpm_clks.dpm_clks = (DpmClocks314_t *)dm_helpers_allocate_gpu_mem(
|
||||
clk_mgr->base.base.ctx,
|
||||
DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
|
||||
sizeof(DpmClocks_t),
|
||||
sizeof(DpmClocks314_t),
|
||||
&smu_dpm_clks.mc_address.quad_part);
|
||||
|
||||
if (smu_dpm_clks.dpm_clks == NULL) {
|
||||
@ -729,7 +777,7 @@ void dcn314_clk_mgr_construct(
|
||||
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
|
||||
dcn314_get_dpm_table_from_smu(&clk_mgr->base, &smu_dpm_clks);
|
||||
|
||||
if (ctx->dc_bios && ctx->dc_bios->integrated_info) {
|
||||
if (ctx->dc_bios && ctx->dc_bios->integrated_info && ctx->dc->config.use_default_clock_table == false) {
|
||||
dcn314_clk_mgr_helper_populate_bw_params(
|
||||
&clk_mgr->base,
|
||||
ctx->dc_bios->integrated_info,
|
||||
|
@ -42,7 +42,7 @@ struct clk_mgr_dcn314 {
|
||||
|
||||
bool dcn314_are_clock_states_equal(struct dc_clocks *a,
|
||||
struct dc_clocks *b);
|
||||
void dcn314_init_clocks(struct clk_mgr *clk_mgr);
|
||||
|
||||
void dcn314_update_clocks(struct clk_mgr *clk_mgr_base,
|
||||
struct dc_state *context,
|
||||
bool safe_to_lower);
|
||||
|
@ -36,6 +36,37 @@ typedef enum {
|
||||
WCK_RATIO_MAX
|
||||
} WCK_RATIO_e;
|
||||
|
||||
typedef struct {
|
||||
uint32_t FClk;
|
||||
uint32_t MemClk;
|
||||
uint32_t Voltage;
|
||||
uint8_t WckRatio;
|
||||
uint8_t Spare[3];
|
||||
} DfPstateTable314_t;
|
||||
|
||||
//Freq in MHz
|
||||
//Voltage in milli volts with 2 fractional bits
|
||||
typedef struct {
|
||||
uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
|
||||
uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
|
||||
uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
|
||||
uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
|
||||
uint32_t VClocks[NUM_VCN_DPM_LEVELS];
|
||||
uint32_t DClocks[NUM_VCN_DPM_LEVELS];
|
||||
uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
|
||||
DfPstateTable314_t DfPstateTable[NUM_DF_PSTATE_LEVELS];
|
||||
|
||||
uint8_t NumDcfClkLevelsEnabled;
|
||||
uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
|
||||
uint8_t NumSocClkLevelsEnabled;
|
||||
uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk
|
||||
uint8_t NumDfPstatesEnabled;
|
||||
uint8_t spare[3];
|
||||
|
||||
uint32_t MinGfxClk;
|
||||
uint32_t MaxGfxClk;
|
||||
} DpmClocks314_t;
|
||||
|
||||
struct dcn314_watermarks {
|
||||
// Watermarks
|
||||
WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES];
|
||||
@ -43,7 +74,7 @@ struct dcn314_watermarks {
|
||||
};
|
||||
|
||||
struct dcn314_smu_dpm_clks {
|
||||
DpmClocks_t *dpm_clks;
|
||||
DpmClocks314_t *dpm_clks;
|
||||
union large_integer mc_address;
|
||||
};
|
||||
|
||||
|
@ -1074,8 +1074,15 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
|
||||
struct dc_stream_state *old_stream =
|
||||
dc->current_state->res_ctx.pipe_ctx[i].stream;
|
||||
bool should_disable = true;
|
||||
bool pipe_split_change =
|
||||
context->res_ctx.pipe_ctx[i].top_pipe != dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
|
||||
bool pipe_split_change = false;
|
||||
|
||||
if ((context->res_ctx.pipe_ctx[i].top_pipe) &&
|
||||
(dc->current_state->res_ctx.pipe_ctx[i].top_pipe))
|
||||
pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe->pipe_idx !=
|
||||
dc->current_state->res_ctx.pipe_ctx[i].top_pipe->pipe_idx;
|
||||
else
|
||||
pipe_split_change = context->res_ctx.pipe_ctx[i].top_pipe !=
|
||||
dc->current_state->res_ctx.pipe_ctx[i].top_pipe;
|
||||
|
||||
for (j = 0; j < context->stream_count; j++) {
|
||||
if (old_stream == context->streams[j]) {
|
||||
@ -3229,7 +3236,7 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
odm_pipe->ttu_regs.min_ttu_vblank = MAX_TTU;
|
||||
}
|
||||
|
||||
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) {
|
||||
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
|
||||
if (top_pipe_to_program &&
|
||||
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
|
||||
if (should_use_dmub_lock(stream->link)) {
|
||||
@ -3247,7 +3254,6 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
|
||||
top_pipe_to_program->stream_res.tg);
|
||||
}
|
||||
}
|
||||
|
||||
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
|
||||
if (dc->hwss.subvp_pipe_control_lock)
|
||||
@ -3466,7 +3472,7 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
|
||||
}
|
||||
|
||||
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed) {
|
||||
if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
|
||||
if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
|
||||
top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
|
||||
top_pipe_to_program->stream_res.tg,
|
||||
@ -3493,21 +3499,19 @@ static void commit_planes_for_stream(struct dc *dc,
|
||||
top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
|
||||
top_pipe_to_program->stream_res.tg);
|
||||
}
|
||||
}
|
||||
|
||||
if (update_type != UPDATE_TYPE_FAST) {
|
||||
if (update_type != UPDATE_TYPE_FAST)
|
||||
dc->hwss.post_unlock_program_front_end(dc, context);
|
||||
|
||||
/* Since phantom pipe programming is moved to post_unlock_program_front_end,
|
||||
* move the SubVP lock to after the phantom pipes have been setup
|
||||
*/
|
||||
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
|
||||
if (dc->hwss.subvp_pipe_control_lock)
|
||||
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
|
||||
} else {
|
||||
if (dc->hwss.subvp_pipe_control_lock)
|
||||
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
|
||||
}
|
||||
/* Since phantom pipe programming is moved to post_unlock_program_front_end,
|
||||
* move the SubVP lock to after the phantom pipes have been setup
|
||||
*/
|
||||
if (should_lock_all_pipes && dc->hwss.interdependent_update_lock) {
|
||||
if (dc->hwss.subvp_pipe_control_lock)
|
||||
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, NULL, subvp_prev_use);
|
||||
} else {
|
||||
if (dc->hwss.subvp_pipe_control_lock)
|
||||
dc->hwss.subvp_pipe_control_lock(dc, context, false, should_lock_all_pipes, top_pipe_to_program, subvp_prev_use);
|
||||
}
|
||||
|
||||
// Fire manual trigger only when bottom plane is flipped
|
||||
@ -4292,7 +4296,7 @@ bool dc_is_dmub_outbox_supported(struct dc *dc)
|
||||
!dc->debug.dpia_debug.bits.disable_dpia)
|
||||
return true;
|
||||
|
||||
if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_2 &&
|
||||
if (dc->ctx->asic_id.chip_family == AMDGPU_FAMILY_GC_11_0_1 &&
|
||||
!dc->debug.dpia_debug.bits.disable_dpia)
|
||||
return true;
|
||||
|
||||
@ -4340,6 +4344,7 @@ void dc_enable_dmub_outbox(struct dc *dc)
|
||||
struct dc_context *dc_ctx = dc->ctx;
|
||||
|
||||
dmub_enable_outbox_notification(dc_ctx->dmub_srv);
|
||||
DC_LOG_DC("%s: dmub outbox notifications enabled\n", __func__);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3372,7 +3372,7 @@ bool dc_link_setup_psr(struct dc_link *link,
|
||||
switch(link->ctx->asic_id.chip_family) {
|
||||
case FAMILY_YELLOW_CARP:
|
||||
case AMDGPU_FAMILY_GC_10_3_6:
|
||||
case AMDGPU_FAMILY_GC_11_0_2:
|
||||
case AMDGPU_FAMILY_GC_11_0_1:
|
||||
if(!dc->debug.disable_z10)
|
||||
psr_context->psr_level.bits.SKIP_CRTC_DISABLE = false;
|
||||
break;
|
||||
|
@ -169,7 +169,7 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
|
||||
if (ASICREV_IS_GC_11_0_2(asic_id.hw_internal_rev))
|
||||
dc_version = DCN_VERSION_3_21;
|
||||
break;
|
||||
case AMDGPU_FAMILY_GC_11_0_2:
|
||||
case AMDGPU_FAMILY_GC_11_0_1:
|
||||
dc_version = DCN_VERSION_3_14;
|
||||
break;
|
||||
default:
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user