forked from Minki/linux
Merge branch 'topic/memalloc-cleanup' into for-next
ALSA: Drop hackish GFP giveaway for CONTINUOUS pages This is a series of cleanup patches for dropping the current hackish way of passing the GFP_* flags for CONTINOUS and VMALLOC memory allocations. There are only three users for this legacy feature, and all of them seem superfluous. And, if any driver requires the memory restriction in future, it can now pass the proper device pointer for specifying the DMA mask. Link: https://lore.kernel.org/r/20220823115740.14123-1-tiwai@suse.de Signed-off-by: Takashi Iwai <tiwai@suse.de>
This commit is contained in:
commit
384c687fb4
@ -59,7 +59,7 @@ Like with atomic_t, the rule of thumb is:
|
||||
- RMW operations that have a return value are fully ordered.
|
||||
|
||||
- RMW operations that are conditional are unordered on FAILURE,
|
||||
otherwise the above rules apply. In the case of test_and_{}_bit() operations,
|
||||
otherwise the above rules apply. In the case of test_and_set_bit_lock(),
|
||||
if the bit in memory is unchanged by the operation then it is deemed to have
|
||||
failed.
|
||||
|
||||
|
@ -233,6 +233,7 @@ allOf:
|
||||
- allwinner,sun8i-a83t-tcon-lcd
|
||||
- allwinner,sun8i-v3s-tcon
|
||||
- allwinner,sun9i-a80-tcon-lcd
|
||||
- allwinner,sun20i-d1-tcon-lcd
|
||||
|
||||
then:
|
||||
properties:
|
||||
@ -252,6 +253,7 @@ allOf:
|
||||
- allwinner,sun8i-a83t-tcon-tv
|
||||
- allwinner,sun8i-r40-tcon-tv
|
||||
- allwinner,sun9i-a80-tcon-tv
|
||||
- allwinner,sun20i-d1-tcon-tv
|
||||
|
||||
then:
|
||||
properties:
|
||||
@ -278,6 +280,7 @@ allOf:
|
||||
- allwinner,sun9i-a80-tcon-lcd
|
||||
- allwinner,sun4i-a10-tcon
|
||||
- allwinner,sun8i-a83t-tcon-lcd
|
||||
- allwinner,sun20i-d1-tcon-lcd
|
||||
|
||||
then:
|
||||
required:
|
||||
@ -294,6 +297,7 @@ allOf:
|
||||
- allwinner,sun8i-a23-tcon
|
||||
- allwinner,sun8i-a33-tcon
|
||||
- allwinner,sun8i-a83t-tcon-lcd
|
||||
- allwinner,sun20i-d1-tcon-lcd
|
||||
|
||||
then:
|
||||
properties:
|
||||
|
@ -14,7 +14,7 @@ MAC node:
|
||||
- mac-address : The 6-byte MAC address. If present, it is the default
|
||||
MAC address.
|
||||
- internal-phy : phandle to the internal PHY node
|
||||
- phy-handle : phandle the external PHY node
|
||||
- phy-handle : phandle to the external PHY node
|
||||
|
||||
Internal PHY node:
|
||||
- compatible : Should be "qcom,fsm9900-emac-sgmii" or "qcom,qdf2432-emac-sgmii".
|
||||
|
@ -47,12 +47,6 @@ properties:
|
||||
description:
|
||||
Properties for single LDO regulator.
|
||||
|
||||
properties:
|
||||
regulator-name:
|
||||
pattern: "^LDO[1-5]$"
|
||||
description:
|
||||
should be "LDO1", ..., "LDO5"
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
"^BUCK[1-6]$":
|
||||
@ -62,11 +56,6 @@ properties:
|
||||
Properties for single BUCK regulator.
|
||||
|
||||
properties:
|
||||
regulator-name:
|
||||
pattern: "^BUCK[1-6]$"
|
||||
description:
|
||||
should be "BUCK1", ..., "BUCK6"
|
||||
|
||||
nxp,dvs-run-voltage:
|
||||
$ref: "/schemas/types.yaml#/definitions/uint32"
|
||||
minimum: 600000
|
||||
|
@ -10,7 +10,7 @@ description:
|
||||
See spi-peripheral-props.yaml for more info.
|
||||
|
||||
maintainers:
|
||||
- Pratyush Yadav <p.yadav@ti.com>
|
||||
- Vaishnav Achath <vaishnav.a@ti.com>
|
||||
|
||||
properties:
|
||||
# cdns,qspi-nor.yaml
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Cadence Quad SPI controller
|
||||
|
||||
maintainers:
|
||||
- Pratyush Yadav <p.yadav@ti.com>
|
||||
- Vaishnav Achath <vaishnav.a@ti.com>
|
||||
|
||||
allOf:
|
||||
- $ref: spi-controller.yaml#
|
||||
|
@ -16,7 +16,7 @@ description:
|
||||
their own separate schema that should be referenced from here.
|
||||
|
||||
maintainers:
|
||||
- Pratyush Yadav <p.yadav@ti.com>
|
||||
- Mark Brown <broonie@kernel.org>
|
||||
|
||||
properties:
|
||||
reg:
|
||||
|
@ -42,7 +42,7 @@ properties:
|
||||
description:
|
||||
Address ranges of the thermal registers. If more then one range is given
|
||||
the first one must be the common registers followed by each sensor
|
||||
according the datasheet.
|
||||
according to the datasheet.
|
||||
minItems: 1
|
||||
maxItems: 4
|
||||
|
||||
|
@ -525,8 +525,8 @@ followed by a test macro::
|
||||
If you need to expose a compiler capability to makefiles and/or C source files,
|
||||
`CC_HAS_` is the recommended prefix for the config option::
|
||||
|
||||
config CC_HAS_ASM_GOTO
|
||||
def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC))
|
||||
config CC_HAS_FOO
|
||||
def_bool $(success,$(srctree)/scripts/cc-check-foo.sh $(CC))
|
||||
|
||||
Build as module only
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
@ -3565,13 +3565,17 @@ given size.
|
||||
The second argument (type) and the third argument (device pointer) are
|
||||
dependent on the bus. For normal devices, pass the device pointer
|
||||
(typically identical as ``card->dev``) to the third argument with
|
||||
``SNDRV_DMA_TYPE_DEV`` type. For the continuous buffer unrelated to the
|
||||
``SNDRV_DMA_TYPE_DEV`` type.
|
||||
|
||||
For the continuous buffer unrelated to the
|
||||
bus can be pre-allocated with ``SNDRV_DMA_TYPE_CONTINUOUS`` type.
|
||||
You can pass NULL to the device pointer in that case, which is the
|
||||
default mode implying to allocate with ``GFP_KERNEL`` flag.
|
||||
If you need a different GFP flag, you can pass it by encoding the flag
|
||||
into the device pointer via a special macro
|
||||
:c:func:`snd_dma_continuous_data()`.
|
||||
If you need a restricted (lower) address, set up the coherent DMA mask
|
||||
bits for the device, and pass the device pointer, like the normal
|
||||
device memory allocations. For this type, it's still allowed to pass
|
||||
NULL to the device pointer, too, if no address restriction is needed.
|
||||
|
||||
For the scatter-gather buffers, use ``SNDRV_DMA_TYPE_DEV_SG`` with the
|
||||
device pointer (see the `Non-Contiguous Buffers`_ section).
|
||||
|
||||
@ -3811,15 +3815,6 @@ arguments here. Since each vmalloc call should succeed at any time,
|
||||
we don't need to pre-allocate the buffers like other continuous
|
||||
pages.
|
||||
|
||||
If you need the 32bit DMA allocation, pass the device pointer encoded
|
||||
by :c:func:`snd_dma_continuous_data()` with ``GFP_KERNEL|__GFP_DMA32``
|
||||
argument.
|
||||
|
||||
::
|
||||
|
||||
snd_pcm_set_managed_buffer_all(pcm, SNDRV_DMA_TYPE_VMALLOC,
|
||||
snd_dma_continuous_data(GFP_KERNEL | __GFP_DMA32), 0, 0);
|
||||
|
||||
Proc Interface
|
||||
==============
|
||||
|
||||
|
@ -33,7 +33,7 @@ EXAMPLE
|
||||
=======
|
||||
In the example below, **rtla timerlat hist** is set to run for *10* minutes,
|
||||
in the cpus *0-4*, *skipping zero* only lines. Moreover, **rtla timerlat
|
||||
hist** will change the priority of the *timelat* threads to run under
|
||||
hist** will change the priority of the *timerlat* threads to run under
|
||||
*SCHED_DEADLINE* priority, with a *10us* runtime every *1ms* period. The
|
||||
*1ms* period is also passed to the *timerlat* tracer::
|
||||
|
||||
|
@ -2178,7 +2178,7 @@ M: Jean-Marie Verdun <verdun@hpe.com>
|
||||
M: Nick Hawkins <nick.hawkins@hpe.com>
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/arm/hpe,gxp.yaml
|
||||
F: Documentation/devicetree/bindings/spi/hpe,gxp-spi.yaml
|
||||
F: Documentation/devicetree/bindings/spi/hpe,gxp-spifi.yaml
|
||||
F: Documentation/devicetree/bindings/timer/hpe,gxp-timer.yaml
|
||||
F: arch/arm/boot/dts/hpe-bmc*
|
||||
F: arch/arm/boot/dts/hpe-gxp*
|
||||
@ -5145,6 +5145,7 @@ T: git git://git.samba.org/sfrench/cifs-2.6.git
|
||||
F: Documentation/admin-guide/cifs/
|
||||
F: fs/cifs/
|
||||
F: fs/smbfs_common/
|
||||
F: include/uapi/linux/cifs
|
||||
|
||||
COMPACTPCI HOTPLUG CORE
|
||||
M: Scott Murray <scott@spiteful.org>
|
||||
|
8
Makefile
8
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 0
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -1113,13 +1113,11 @@ vmlinux-alldirs := $(sort $(vmlinux-dirs) Documentation \
|
||||
$(patsubst %/,%,$(filter %/, $(core-) \
|
||||
$(drivers-) $(libs-))))
|
||||
|
||||
subdir-modorder := $(addsuffix modules.order,$(filter %/, \
|
||||
$(core-y) $(core-m) $(libs-y) $(libs-m) \
|
||||
$(drivers-y) $(drivers-m)))
|
||||
|
||||
build-dirs := $(vmlinux-dirs)
|
||||
clean-dirs := $(vmlinux-alldirs)
|
||||
|
||||
subdir-modorder := $(addsuffix /modules.order, $(build-dirs))
|
||||
|
||||
# Externally visible symbols (used by link-vmlinux.sh)
|
||||
KBUILD_VMLINUX_OBJS := $(head-y) $(patsubst %/,%/built-in.a, $(core-y))
|
||||
KBUILD_VMLINUX_OBJS += $(addsuffix built-in.a, $(filter %/, $(libs-y)))
|
||||
|
@ -53,7 +53,6 @@ config KPROBES
|
||||
config JUMP_LABEL
|
||||
bool "Optimize very unlikely/likely branches"
|
||||
depends on HAVE_ARCH_JUMP_LABEL
|
||||
depends on CC_HAS_ASM_GOTO
|
||||
select OBJTOOL if HAVE_JUMP_LABEL_HACK
|
||||
help
|
||||
This option enables a transparent branch optimization that
|
||||
@ -1361,7 +1360,7 @@ config HAVE_PREEMPT_DYNAMIC_CALL
|
||||
|
||||
config HAVE_PREEMPT_DYNAMIC_KEY
|
||||
bool
|
||||
depends on HAVE_ARCH_JUMP_LABEL && CC_HAS_ASM_GOTO
|
||||
depends on HAVE_ARCH_JUMP_LABEL
|
||||
select HAVE_PREEMPT_DYNAMIC
|
||||
help
|
||||
An architecture should select this if it can handle the preemption
|
||||
|
@ -929,6 +929,10 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
|
||||
(system_supports_mte() && \
|
||||
test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
|
||||
|
||||
#define kvm_supports_32bit_el0() \
|
||||
(system_supports_32bit_el0() && \
|
||||
!static_branch_unlikely(&arm64_mismatched_32bit_el0))
|
||||
|
||||
int kvm_trng_call(struct kvm_vcpu *vcpu);
|
||||
#ifdef CONFIG_KVM
|
||||
extern phys_addr_t hyp_mem_base;
|
||||
|
@ -75,9 +75,11 @@ struct kvm_regs {
|
||||
|
||||
/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
|
||||
#define KVM_ARM_DEVICE_TYPE_SHIFT 0
|
||||
#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT)
|
||||
#define KVM_ARM_DEVICE_TYPE_MASK GENMASK(KVM_ARM_DEVICE_TYPE_SHIFT + 15, \
|
||||
KVM_ARM_DEVICE_TYPE_SHIFT)
|
||||
#define KVM_ARM_DEVICE_ID_SHIFT 16
|
||||
#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT)
|
||||
#define KVM_ARM_DEVICE_ID_MASK GENMASK(KVM_ARM_DEVICE_ID_SHIFT + 15, \
|
||||
KVM_ARM_DEVICE_ID_SHIFT)
|
||||
|
||||
/* Supported device IDs */
|
||||
#define KVM_ARM_DEVICE_VGIC_V2 0
|
||||
|
@ -757,8 +757,7 @@ static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu)
|
||||
if (likely(!vcpu_mode_is_32bit(vcpu)))
|
||||
return false;
|
||||
|
||||
return !system_supports_32bit_el0() ||
|
||||
static_branch_unlikely(&arm64_mismatched_32bit_el0);
|
||||
return !kvm_supports_32bit_el0();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -242,7 +242,7 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
|
||||
switch (mode) {
|
||||
case PSR_AA32_MODE_USR:
|
||||
if (!system_supports_32bit_el0())
|
||||
if (!kvm_supports_32bit_el0())
|
||||
return -EINVAL;
|
||||
break;
|
||||
case PSR_AA32_MODE_FIQ:
|
||||
|
@ -993,7 +993,7 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
|
||||
* THP doesn't start to split while we are adjusting the
|
||||
* refcounts.
|
||||
*
|
||||
* We are sure this doesn't happen, because mmu_notifier_retry
|
||||
* We are sure this doesn't happen, because mmu_invalidate_retry
|
||||
* was successful and we are holding the mmu_lock, so if this
|
||||
* THP is trying to split, it will be blocked in the mmu
|
||||
* notifier before touching any of the pages, specifically
|
||||
@ -1188,9 +1188,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
return ret;
|
||||
}
|
||||
|
||||
mmu_seq = vcpu->kvm->mmu_notifier_seq;
|
||||
mmu_seq = vcpu->kvm->mmu_invalidate_seq;
|
||||
/*
|
||||
* Ensure the read of mmu_notifier_seq happens before we call
|
||||
* Ensure the read of mmu_invalidate_seq happens before we call
|
||||
* gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
|
||||
* the page we just got a reference to gets unmapped before we have a
|
||||
* chance to grab the mmu_lock, which ensure that if the page gets
|
||||
@ -1246,7 +1246,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
else
|
||||
write_lock(&kvm->mmu_lock);
|
||||
pgt = vcpu->arch.hw_mmu->pgt;
|
||||
if (mmu_notifier_retry(kvm, mmu_seq))
|
||||
if (mmu_invalidate_retry(kvm, mmu_seq))
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
|
@ -652,7 +652,7 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
|
||||
*/
|
||||
val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
|
||||
| (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
|
||||
if (!system_supports_32bit_el0())
|
||||
if (!kvm_supports_32bit_el0())
|
||||
val |= ARMV8_PMU_PMCR_LC;
|
||||
__vcpu_sys_reg(vcpu, r->reg) = val;
|
||||
}
|
||||
@ -701,7 +701,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
val = __vcpu_sys_reg(vcpu, PMCR_EL0);
|
||||
val &= ~ARMV8_PMU_PMCR_MASK;
|
||||
val |= p->regval & ARMV8_PMU_PMCR_MASK;
|
||||
if (!system_supports_32bit_el0())
|
||||
if (!kvm_supports_32bit_el0())
|
||||
val |= ARMV8_PMU_PMCR_LC;
|
||||
__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
|
||||
kvm_pmu_handle_pmcr(vcpu, val);
|
||||
|
@ -81,7 +81,6 @@ extern struct acpi_vector_group msi_group[MAX_IO_PICS];
|
||||
#define GSI_MIN_PCH_IRQ LOONGSON_PCH_IRQ_BASE
|
||||
#define GSI_MAX_PCH_IRQ (LOONGSON_PCH_IRQ_BASE + 256 - 1)
|
||||
|
||||
extern int find_pch_pic(u32 gsi);
|
||||
struct acpi_madt_lio_pic;
|
||||
struct acpi_madt_eio_pic;
|
||||
struct acpi_madt_ht_pic;
|
||||
|
@ -84,8 +84,6 @@
|
||||
|
||||
|
||||
#define KVM_MAX_VCPUS 16
|
||||
/* memory slots that does not exposed to userspace */
|
||||
#define KVM_PRIVATE_MEM_SLOTS 0
|
||||
|
||||
#define KVM_HALT_POLL_NS_DEFAULT 500000
|
||||
|
||||
|
@ -615,17 +615,17 @@ retry:
|
||||
* Used to check for invalidations in progress, of the pfn that is
|
||||
* returned by pfn_to_pfn_prot below.
|
||||
*/
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
mmu_seq = kvm->mmu_invalidate_seq;
|
||||
/*
|
||||
* Ensure the read of mmu_notifier_seq isn't reordered with PTE reads in
|
||||
* gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
|
||||
* Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads
|
||||
* in gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
|
||||
* risk the page we get a reference to getting unmapped before we have a
|
||||
* chance to grab the mmu_lock without mmu_notifier_retry() noticing.
|
||||
* chance to grab the mmu_lock without mmu_invalidate_retry() noticing.
|
||||
*
|
||||
* This smp_rmb() pairs with the effective smp_wmb() of the combination
|
||||
* of the pte_unmap_unlock() after the PTE is zapped, and the
|
||||
* spin_lock() in kvm_mmu_notifier_invalidate_<page|range_end>() before
|
||||
* mmu_notifier_seq is incremented.
|
||||
* mmu_invalidate_seq is incremented.
|
||||
*/
|
||||
smp_rmb();
|
||||
|
||||
@ -638,7 +638,7 @@ retry:
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
/* Check if an invalidation has taken place since we got pfn */
|
||||
if (mmu_notifier_retry(kvm, mmu_seq)) {
|
||||
if (mmu_invalidate_retry(kvm, mmu_seq)) {
|
||||
/*
|
||||
* This can happen when mappings are changed asynchronously, but
|
||||
* also synchronously if a COW is triggered by
|
||||
|
@ -50,7 +50,8 @@
|
||||
stw r13, PT_R13(sp)
|
||||
stw r14, PT_R14(sp)
|
||||
stw r15, PT_R15(sp)
|
||||
stw r2, PT_ORIG_R2(sp)
|
||||
movi r24, -1
|
||||
stw r24, PT_ORIG_R2(sp)
|
||||
stw r7, PT_ORIG_R7(sp)
|
||||
|
||||
stw ra, PT_RA(sp)
|
||||
|
@ -74,6 +74,8 @@ extern void show_regs(struct pt_regs *);
|
||||
((struct pt_regs *)((unsigned long)current_thread_info() + THREAD_SIZE)\
|
||||
- 1)
|
||||
|
||||
#define force_successful_syscall_return() (current_pt_regs()->orig_r2 = -1)
|
||||
|
||||
int do_syscall_trace_enter(void);
|
||||
void do_syscall_trace_exit(void);
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
@ -185,6 +185,7 @@ ENTRY(handle_system_call)
|
||||
ldw r5, PT_R5(sp)
|
||||
|
||||
local_restart:
|
||||
stw r2, PT_ORIG_R2(sp)
|
||||
/* Check that the requested system call is within limits */
|
||||
movui r1, __NR_syscalls
|
||||
bgeu r2, r1, ret_invsyscall
|
||||
@ -192,7 +193,6 @@ local_restart:
|
||||
movhi r11, %hiadj(sys_call_table)
|
||||
add r1, r1, r11
|
||||
ldw r1, %lo(sys_call_table)(r1)
|
||||
beq r1, r0, ret_invsyscall
|
||||
|
||||
/* Check if we are being traced */
|
||||
GET_THREAD_INFO r11
|
||||
@ -213,6 +213,9 @@ local_restart:
|
||||
translate_rc_and_ret:
|
||||
movi r1, 0
|
||||
bge r2, zero, 3f
|
||||
ldw r1, PT_ORIG_R2(sp)
|
||||
addi r1, r1, 1
|
||||
beq r1, zero, 3f
|
||||
sub r2, zero, r2
|
||||
movi r1, 1
|
||||
3:
|
||||
@ -255,9 +258,9 @@ traced_system_call:
|
||||
ldw r6, PT_R6(sp)
|
||||
ldw r7, PT_R7(sp)
|
||||
|
||||
/* Fetch the syscall function, we don't need to check the boundaries
|
||||
* since this is already done.
|
||||
*/
|
||||
/* Fetch the syscall function. */
|
||||
movui r1, __NR_syscalls
|
||||
bgeu r2, r1, traced_invsyscall
|
||||
slli r1, r2, 2
|
||||
movhi r11,%hiadj(sys_call_table)
|
||||
add r1, r1, r11
|
||||
@ -276,6 +279,9 @@ traced_system_call:
|
||||
translate_rc_and_ret2:
|
||||
movi r1, 0
|
||||
bge r2, zero, 4f
|
||||
ldw r1, PT_ORIG_R2(sp)
|
||||
addi r1, r1, 1
|
||||
beq r1, zero, 4f
|
||||
sub r2, zero, r2
|
||||
movi r1, 1
|
||||
4:
|
||||
@ -287,6 +293,11 @@ end_translate_rc_and_ret2:
|
||||
RESTORE_SWITCH_STACK
|
||||
br ret_from_exception
|
||||
|
||||
/* If the syscall number was invalid return ENOSYS */
|
||||
traced_invsyscall:
|
||||
movi r2, -ENOSYS
|
||||
br translate_rc_and_ret2
|
||||
|
||||
Luser_return:
|
||||
GET_THREAD_INFO r11 /* get thread_info pointer */
|
||||
ldw r10, TI_FLAGS(r11) /* get thread_info->flags */
|
||||
@ -336,9 +347,6 @@ external_interrupt:
|
||||
/* skip if no interrupt is pending */
|
||||
beq r12, r0, ret_from_interrupt
|
||||
|
||||
movi r24, -1
|
||||
stw r24, PT_ORIG_R2(sp)
|
||||
|
||||
/*
|
||||
* Process an external hardware interrupt.
|
||||
*/
|
||||
|
@ -242,7 +242,7 @@ static int do_signal(struct pt_regs *regs)
|
||||
/*
|
||||
* If we were from a system call, check for system call restarting...
|
||||
*/
|
||||
if (regs->orig_r2 >= 0) {
|
||||
if (regs->orig_r2 >= 0 && regs->r1) {
|
||||
continue_addr = regs->ea;
|
||||
restart_addr = continue_addr - 4;
|
||||
retval = regs->r2;
|
||||
@ -264,6 +264,7 @@ static int do_signal(struct pt_regs *regs)
|
||||
regs->ea = restart_addr;
|
||||
break;
|
||||
}
|
||||
regs->orig_r2 = -1;
|
||||
}
|
||||
|
||||
if (get_signal(&ksig)) {
|
||||
|
@ -13,5 +13,6 @@
|
||||
#define __SYSCALL(nr, call) [nr] = (call),
|
||||
|
||||
void *sys_call_table[__NR_syscalls] = {
|
||||
[0 ... __NR_syscalls-1] = sys_ni_syscall,
|
||||
#include <asm/unistd.h>
|
||||
};
|
||||
|
@ -666,7 +666,7 @@ static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq,
|
||||
VM_WARN(!spin_is_locked(&kvm->mmu_lock),
|
||||
"%s called with kvm mmu_lock not held \n", __func__);
|
||||
|
||||
if (mmu_notifier_retry(kvm, mmu_seq))
|
||||
if (mmu_invalidate_retry(kvm, mmu_seq))
|
||||
return NULL;
|
||||
|
||||
pte = __find_linux_pte(kvm->mm->pgd, ea, NULL, hshift);
|
||||
|
@ -68,10 +68,6 @@ void __init set_pci_dma_ops(const struct dma_map_ops *dma_ops)
|
||||
pci_dma_ops = dma_ops;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function should run under locking protection, specifically
|
||||
* hose_spinlock.
|
||||
*/
|
||||
static int get_phb_number(struct device_node *dn)
|
||||
{
|
||||
int ret, phb_id = -1;
|
||||
@ -108,15 +104,20 @@ static int get_phb_number(struct device_node *dn)
|
||||
if (!ret)
|
||||
phb_id = (int)(prop & (MAX_PHBS - 1));
|
||||
|
||||
spin_lock(&hose_spinlock);
|
||||
|
||||
/* We need to be sure to not use the same PHB number twice. */
|
||||
if ((phb_id >= 0) && !test_and_set_bit(phb_id, phb_bitmap))
|
||||
return phb_id;
|
||||
goto out_unlock;
|
||||
|
||||
/* If everything fails then fallback to dynamic PHB numbering. */
|
||||
phb_id = find_first_zero_bit(phb_bitmap, MAX_PHBS);
|
||||
BUG_ON(phb_id >= MAX_PHBS);
|
||||
set_bit(phb_id, phb_bitmap);
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&hose_spinlock);
|
||||
|
||||
return phb_id;
|
||||
}
|
||||
|
||||
@ -127,10 +128,13 @@ struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
|
||||
phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
|
||||
if (phb == NULL)
|
||||
return NULL;
|
||||
spin_lock(&hose_spinlock);
|
||||
|
||||
phb->global_number = get_phb_number(dev);
|
||||
|
||||
spin_lock(&hose_spinlock);
|
||||
list_add_tail(&phb->list_node, &hose_list);
|
||||
spin_unlock(&hose_spinlock);
|
||||
|
||||
phb->dn = dev;
|
||||
phb->is_dynamic = slab_is_available();
|
||||
#ifdef CONFIG_PPC64
|
||||
|
@ -90,7 +90,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
|
||||
unsigned long pfn;
|
||||
|
||||
/* used to check for invalidations in progress */
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
mmu_seq = kvm->mmu_invalidate_seq;
|
||||
smp_rmb();
|
||||
|
||||
/* Get host physical address for gpa */
|
||||
@ -151,7 +151,7 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
|
||||
cpte = kvmppc_mmu_hpte_cache_next(vcpu);
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
|
||||
if (!cpte || mmu_invalidate_retry(kvm, mmu_seq)) {
|
||||
r = -EAGAIN;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
@ -578,7 +578,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
|
||||
return -EFAULT;
|
||||
|
||||
/* used to check for invalidations in progress */
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
mmu_seq = kvm->mmu_invalidate_seq;
|
||||
smp_rmb();
|
||||
|
||||
ret = -EFAULT;
|
||||
@ -693,7 +693,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu,
|
||||
|
||||
/* Check if we might have been invalidated; let the guest retry if so */
|
||||
ret = RESUME_GUEST;
|
||||
if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
|
||||
if (mmu_invalidate_retry(vcpu->kvm, mmu_seq)) {
|
||||
unlock_rmap(rmap);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
@ -640,7 +640,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
|
||||
/* Check if we might have been invalidated; let the guest retry if so */
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
ret = -EAGAIN;
|
||||
if (mmu_notifier_retry(kvm, mmu_seq))
|
||||
if (mmu_invalidate_retry(kvm, mmu_seq))
|
||||
goto out_unlock;
|
||||
|
||||
/* Now traverse again under the lock and change the tree */
|
||||
@ -830,7 +830,7 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu,
|
||||
bool large_enable;
|
||||
|
||||
/* used to check for invalidations in progress */
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
mmu_seq = kvm->mmu_invalidate_seq;
|
||||
smp_rmb();
|
||||
|
||||
/*
|
||||
@ -1191,7 +1191,7 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
|
||||
* Increase the mmu notifier sequence number to prevent any page
|
||||
* fault that read the memslot earlier from writing a PTE.
|
||||
*/
|
||||
kvm->mmu_notifier_seq++;
|
||||
kvm->mmu_invalidate_seq++;
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
}
|
||||
|
||||
|
@ -1580,7 +1580,7 @@ static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
|
||||
/* 2. Find the host pte for this L1 guest real address */
|
||||
|
||||
/* Used to check for invalidations in progress */
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
mmu_seq = kvm->mmu_invalidate_seq;
|
||||
smp_rmb();
|
||||
|
||||
/* See if can find translation in our partition scoped tables for L1 */
|
||||
|
@ -219,7 +219,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
|
||||
g_ptel = ptel;
|
||||
|
||||
/* used later to detect if we might have been invalidated */
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
mmu_seq = kvm->mmu_invalidate_seq;
|
||||
smp_rmb();
|
||||
|
||||
/* Find the memslot (if any) for this address */
|
||||
@ -366,7 +366,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
|
||||
rmap = real_vmalloc_addr(rmap);
|
||||
lock_rmap(rmap);
|
||||
/* Check for pending invalidations under the rmap chain lock */
|
||||
if (mmu_notifier_retry(kvm, mmu_seq)) {
|
||||
if (mmu_invalidate_retry(kvm, mmu_seq)) {
|
||||
/* inval in progress, write a non-present HPTE */
|
||||
pteh |= HPTE_V_ABSENT;
|
||||
pteh &= ~HPTE_V_VALID;
|
||||
@ -932,7 +932,7 @@ static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
|
||||
int i;
|
||||
|
||||
/* Used later to detect if we might have been invalidated */
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
mmu_seq = kvm->mmu_invalidate_seq;
|
||||
smp_rmb();
|
||||
|
||||
arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
|
||||
@ -960,7 +960,7 @@ static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
|
||||
long ret = H_SUCCESS;
|
||||
|
||||
/* Used later to detect if we might have been invalidated */
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
mmu_seq = kvm->mmu_invalidate_seq;
|
||||
smp_rmb();
|
||||
|
||||
arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
|
||||
|
@ -339,7 +339,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||
unsigned long flags;
|
||||
|
||||
/* used to check for invalidations in progress */
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
mmu_seq = kvm->mmu_invalidate_seq;
|
||||
smp_rmb();
|
||||
|
||||
/*
|
||||
@ -460,7 +460,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
||||
}
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
if (mmu_notifier_retry(kvm, mmu_seq)) {
|
||||
if (mmu_invalidate_retry(kvm, mmu_seq)) {
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
@ -28,7 +28,7 @@ unsigned long elf_hwcap __read_mostly;
|
||||
/* Host ISA bitmap */
|
||||
static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly;
|
||||
|
||||
__ro_after_init DEFINE_STATIC_KEY_ARRAY_FALSE(riscv_isa_ext_keys, RISCV_ISA_EXT_KEY_MAX);
|
||||
DEFINE_STATIC_KEY_ARRAY_FALSE(riscv_isa_ext_keys, RISCV_ISA_EXT_KEY_MAX);
|
||||
EXPORT_SYMBOL(riscv_isa_ext_keys);
|
||||
|
||||
/**
|
||||
|
@ -666,7 +666,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
|
||||
return ret;
|
||||
}
|
||||
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
mmu_seq = kvm->mmu_invalidate_seq;
|
||||
|
||||
hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writable);
|
||||
if (hfn == KVM_PFN_ERR_HWPOISON) {
|
||||
@ -686,7 +686,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
|
||||
if (mmu_notifier_retry(kvm, mmu_seq))
|
||||
if (mmu_invalidate_retry(kvm, mmu_seq))
|
||||
goto out_unlock;
|
||||
|
||||
if (writable) {
|
||||
|
@ -437,7 +437,7 @@ __init int hypfs_diag_init(void)
|
||||
int rc;
|
||||
|
||||
if (diag204_probe()) {
|
||||
pr_err("The hardware system does not support hypfs\n");
|
||||
pr_info("The hardware system does not support hypfs\n");
|
||||
return -ENODATA;
|
||||
}
|
||||
|
||||
|
@ -496,9 +496,9 @@ fail_hypfs_sprp_exit:
|
||||
hypfs_vm_exit();
|
||||
fail_hypfs_diag_exit:
|
||||
hypfs_diag_exit();
|
||||
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
|
||||
fail_dbfs_exit:
|
||||
hypfs_dbfs_exit();
|
||||
pr_err("Initialization of hypfs failed with rc=%i\n", rc);
|
||||
return rc;
|
||||
}
|
||||
device_initcall(hypfs_init)
|
||||
|
@ -1011,7 +1011,7 @@ error_kzalloc:
|
||||
|
||||
static int vu_find_vqs(struct virtio_device *vdev, unsigned nvqs,
|
||||
struct virtqueue *vqs[], vq_callback_t *callbacks[],
|
||||
const char * const names[], u32 sizes[], const bool *ctx,
|
||||
const char * const names[], const bool *ctx,
|
||||
struct irq_affinity *desc)
|
||||
{
|
||||
struct virtio_uml_device *vu_dev = to_virtio_uml_device(vdev);
|
||||
|
@ -65,20 +65,6 @@ extern void setup_clear_cpu_cap(unsigned int bit);
|
||||
|
||||
#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
|
||||
|
||||
#if defined(__clang__) && !defined(CONFIG_CC_HAS_ASM_GOTO)
|
||||
|
||||
/*
|
||||
* Workaround for the sake of BPF compilation which utilizes kernel
|
||||
* headers, but clang does not support ASM GOTO and fails the build.
|
||||
*/
|
||||
#ifndef __BPF_TRACING__
|
||||
#warning "Compiler lacks ASM_GOTO support. Add -D __BPF_TRACING__ to your compiler arguments"
|
||||
#endif
|
||||
|
||||
#define static_cpu_has(bit) boot_cpu_has(bit)
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* Static testing of CPU features. Used the same as boot_cpu_has(). It
|
||||
* statically patches the target code for additional performance. Use
|
||||
@ -137,7 +123,6 @@ t_no:
|
||||
boot_cpu_has(bit) : \
|
||||
_static_cpu_has(bit) \
|
||||
)
|
||||
#endif
|
||||
|
||||
#define cpu_has_bug(c, bit) cpu_has(c, (bit))
|
||||
#define set_cpu_bug(c, bit) set_cpu_cap(c, (bit))
|
||||
|
@ -286,10 +286,6 @@ vdso_install:
|
||||
|
||||
archprepare: checkbin
|
||||
checkbin:
|
||||
ifndef CONFIG_CC_HAS_ASM_GOTO
|
||||
@echo Compiler lacks asm-goto support.
|
||||
@exit 1
|
||||
endif
|
||||
ifdef CONFIG_RETPOLINE
|
||||
ifeq ($(RETPOLINE_CFLAGS),)
|
||||
@echo "You are building kernel with non-retpoline compiler." >&2
|
||||
|
@ -155,20 +155,6 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit);
|
||||
|
||||
#define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit)
|
||||
|
||||
#if defined(__clang__) && !defined(CONFIG_CC_HAS_ASM_GOTO)
|
||||
|
||||
/*
|
||||
* Workaround for the sake of BPF compilation which utilizes kernel
|
||||
* headers, but clang does not support ASM GOTO and fails the build.
|
||||
*/
|
||||
#ifndef __BPF_TRACING__
|
||||
#warning "Compiler lacks ASM_GOTO support. Add -D __BPF_TRACING__ to your compiler arguments"
|
||||
#endif
|
||||
|
||||
#define static_cpu_has(bit) boot_cpu_has(bit)
|
||||
|
||||
#else
|
||||
|
||||
/*
|
||||
* Static testing of CPU features. Used the same as boot_cpu_has(). It
|
||||
* statically patches the target code for additional performance. Use
|
||||
@ -208,7 +194,6 @@ t_no:
|
||||
boot_cpu_has(bit) : \
|
||||
_static_cpu_has(bit) \
|
||||
)
|
||||
#endif
|
||||
|
||||
#define cpu_has_bug(c, bit) cpu_has(c, (bit))
|
||||
#define set_cpu_bug(c, bit) set_cpu_cap(c, (bit))
|
||||
|
@ -64,4 +64,6 @@
|
||||
#define EX_TYPE_UCOPY_LEN4 (EX_TYPE_UCOPY_LEN | EX_DATA_IMM(4))
|
||||
#define EX_TYPE_UCOPY_LEN8 (EX_TYPE_UCOPY_LEN | EX_DATA_IMM(8))
|
||||
|
||||
#define EX_TYPE_ZEROPAD 20 /* longword load with zeropad on fault */
|
||||
|
||||
#endif
|
||||
|
@ -31,6 +31,16 @@
|
||||
|
||||
#define __noendbr __attribute__((nocf_check))
|
||||
|
||||
/*
|
||||
* Create a dummy function pointer reference to prevent objtool from marking
|
||||
* the function as needing to be "sealed" (i.e. ENDBR converted to NOP by
|
||||
* apply_ibt_endbr()).
|
||||
*/
|
||||
#define IBT_NOSEAL(fname) \
|
||||
".pushsection .discard.ibt_endbr_noseal\n\t" \
|
||||
_ASM_PTR fname "\n\t" \
|
||||
".popsection\n\t"
|
||||
|
||||
static inline __attribute_const__ u32 gen_endbr(void)
|
||||
{
|
||||
u32 endbr;
|
||||
@ -84,6 +94,7 @@ extern __noendbr void ibt_restore(u64 save);
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define ASM_ENDBR
|
||||
#define IBT_NOSEAL(name)
|
||||
|
||||
#define __noendbr
|
||||
|
||||
|
@ -53,7 +53,7 @@
|
||||
#define KVM_MAX_VCPU_IDS (KVM_MAX_VCPUS * KVM_VCPU_ID_RATIO)
|
||||
|
||||
/* memory slots that are not exposed to userspace */
|
||||
#define KVM_PRIVATE_MEM_SLOTS 3
|
||||
#define KVM_INTERNAL_MEM_SLOTS 3
|
||||
|
||||
#define KVM_HALT_POLL_NS_DEFAULT 200000
|
||||
|
||||
|
@ -11,7 +11,7 @@
|
||||
|
||||
#define __CLOBBERS_MEM(clb...) "memory", ## clb
|
||||
|
||||
#if !defined(__GCC_ASM_FLAG_OUTPUTS__) && defined(CONFIG_CC_HAS_ASM_GOTO)
|
||||
#ifndef __GCC_ASM_FLAG_OUTPUTS__
|
||||
|
||||
/* Use asm goto */
|
||||
|
||||
@ -27,7 +27,7 @@ cc_label: c = true; \
|
||||
c; \
|
||||
})
|
||||
|
||||
#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */
|
||||
#else /* defined(__GCC_ASM_FLAG_OUTPUTS__) */
|
||||
|
||||
/* Use flags output or a set instruction */
|
||||
|
||||
@ -40,7 +40,7 @@ cc_label: c = true; \
|
||||
c; \
|
||||
})
|
||||
|
||||
#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) || !defined(CONFIG_CC_HAS_ASM_GOTO) */
|
||||
#endif /* defined(__GCC_ASM_FLAG_OUTPUTS__) */
|
||||
|
||||
#define GEN_UNARY_RMWcc_4(op, var, cc, arg0) \
|
||||
__GEN_RMWcc(op " " arg0, var, cc, __CLOBBERS_MEM())
|
||||
|
@ -77,58 +77,18 @@ static inline unsigned long find_zero(unsigned long mask)
|
||||
* and the next page not being mapped, take the exception and
|
||||
* return zeroes in the non-existing part.
|
||||
*/
|
||||
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
|
||||
|
||||
static inline unsigned long load_unaligned_zeropad(const void *addr)
|
||||
{
|
||||
unsigned long offset, data;
|
||||
unsigned long ret;
|
||||
|
||||
asm_volatile_goto(
|
||||
asm volatile(
|
||||
"1: mov %[mem], %[ret]\n"
|
||||
|
||||
_ASM_EXTABLE(1b, %l[do_exception])
|
||||
|
||||
: [ret] "=r" (ret)
|
||||
: [mem] "m" (*(unsigned long *)addr)
|
||||
: : do_exception);
|
||||
|
||||
return ret;
|
||||
|
||||
do_exception:
|
||||
offset = (unsigned long)addr & (sizeof(long) - 1);
|
||||
addr = (void *)((unsigned long)addr & ~(sizeof(long) - 1));
|
||||
data = *(unsigned long *)addr;
|
||||
ret = data >> offset * 8;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
|
||||
|
||||
static inline unsigned long load_unaligned_zeropad(const void *addr)
|
||||
{
|
||||
unsigned long offset, data;
|
||||
unsigned long ret, err = 0;
|
||||
|
||||
asm( "1: mov %[mem], %[ret]\n"
|
||||
"2:\n"
|
||||
|
||||
_ASM_EXTABLE_FAULT(1b, 2b)
|
||||
|
||||
: [ret] "=&r" (ret), "+a" (err)
|
||||
_ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_ZEROPAD)
|
||||
: [ret] "=r" (ret)
|
||||
: [mem] "m" (*(unsigned long *)addr));
|
||||
|
||||
if (unlikely(err)) {
|
||||
offset = (unsigned long)addr & (sizeof(long) - 1);
|
||||
addr = (void *)((unsigned long)addr & ~(sizeof(long) - 1));
|
||||
data = *(unsigned long *)addr;
|
||||
ret = data >> offset * 8;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CC_HAS_ASM_GOTO_OUTPUT */
|
||||
|
||||
#endif /* _ASM_WORD_AT_A_TIME_H */
|
||||
|
@ -505,7 +505,7 @@ static void kprobe_emulate_jcc(struct kprobe *p, struct pt_regs *regs)
|
||||
match = ((regs->flags & X86_EFLAGS_SF) >> X86_EFLAGS_SF_BIT) ^
|
||||
((regs->flags & X86_EFLAGS_OF) >> X86_EFLAGS_OF_BIT);
|
||||
if (p->ainsn.jcc.type >= 0xe)
|
||||
match = match && (regs->flags & X86_EFLAGS_ZF);
|
||||
match = match || (regs->flags & X86_EFLAGS_ZF);
|
||||
}
|
||||
__kprobe_emulate_jmp(p, regs, (match && !invert) || (!match && invert));
|
||||
}
|
||||
|
@ -326,7 +326,8 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
|
||||
".align " __stringify(FASTOP_SIZE) " \n\t" \
|
||||
".type " name ", @function \n\t" \
|
||||
name ":\n\t" \
|
||||
ASM_ENDBR
|
||||
ASM_ENDBR \
|
||||
IBT_NOSEAL(name)
|
||||
|
||||
#define FOP_FUNC(name) \
|
||||
__FOP_FUNC(#name)
|
||||
@ -446,27 +447,12 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
|
||||
FOP_END
|
||||
|
||||
/* Special case for SETcc - 1 instruction per cc */
|
||||
|
||||
/*
|
||||
* Depending on .config the SETcc functions look like:
|
||||
*
|
||||
* ENDBR [4 bytes; CONFIG_X86_KERNEL_IBT]
|
||||
* SETcc %al [3 bytes]
|
||||
* RET | JMP __x86_return_thunk [1,5 bytes; CONFIG_RETHUNK]
|
||||
* INT3 [1 byte; CONFIG_SLS]
|
||||
*/
|
||||
#define SETCC_ALIGN 16
|
||||
|
||||
#define FOP_SETCC(op) \
|
||||
".align " __stringify(SETCC_ALIGN) " \n\t" \
|
||||
".type " #op ", @function \n\t" \
|
||||
#op ": \n\t" \
|
||||
ASM_ENDBR \
|
||||
FOP_FUNC(op) \
|
||||
#op " %al \n\t" \
|
||||
__FOP_RET(#op) \
|
||||
".skip " __stringify(SETCC_ALIGN) " - (.-" #op "), 0xcc \n\t"
|
||||
FOP_RET(op)
|
||||
|
||||
__FOP_START(setcc, SETCC_ALIGN)
|
||||
FOP_START(setcc)
|
||||
FOP_SETCC(seto)
|
||||
FOP_SETCC(setno)
|
||||
FOP_SETCC(setc)
|
||||
@ -493,7 +479,7 @@ FOP_END;
|
||||
|
||||
/*
|
||||
* XXX: inoutclob user must know where the argument is being expanded.
|
||||
* Relying on CONFIG_CC_HAS_ASM_GOTO would allow us to remove _fault.
|
||||
* Using asm goto would allow us to remove _fault.
|
||||
*/
|
||||
#define asm_safe(insn, inoutclob...) \
|
||||
({ \
|
||||
@ -1079,7 +1065,7 @@ static int em_bsr_c(struct x86_emulate_ctxt *ctxt)
|
||||
static __always_inline u8 test_cc(unsigned int condition, unsigned long flags)
|
||||
{
|
||||
u8 rc;
|
||||
void (*fop)(void) = (void *)em_setcc + SETCC_ALIGN * (condition & 0xf);
|
||||
void (*fop)(void) = (void *)em_setcc + FASTOP_SIZE * (condition & 0xf);
|
||||
|
||||
flags = (flags & EFLAGS_MASK) | X86_EFLAGS_IF;
|
||||
asm("push %[flags]; popf; " CALL_NOSPEC
|
||||
|
@ -2914,7 +2914,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
|
||||
* If addresses are being invalidated, skip prefetching to avoid
|
||||
* accidentally prefetching those addresses.
|
||||
*/
|
||||
if (unlikely(vcpu->kvm->mmu_notifier_count))
|
||||
if (unlikely(vcpu->kvm->mmu_invalidate_in_progress))
|
||||
return;
|
||||
|
||||
__direct_pte_prefetch(vcpu, sp, sptep);
|
||||
@ -2928,7 +2928,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
|
||||
*
|
||||
* There are several ways to safely use this helper:
|
||||
*
|
||||
* - Check mmu_notifier_retry_hva() after grabbing the mapping level, before
|
||||
* - Check mmu_invalidate_retry_hva() after grabbing the mapping level, before
|
||||
* consuming it. In this case, mmu_lock doesn't need to be held during the
|
||||
* lookup, but it does need to be held while checking the MMU notifier.
|
||||
*
|
||||
@ -3056,7 +3056,7 @@ void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
|
||||
return;
|
||||
|
||||
/*
|
||||
* mmu_notifier_retry() was successful and mmu_lock is held, so
|
||||
* mmu_invalidate_retry() was successful and mmu_lock is held, so
|
||||
* the pmd can't be split from under us.
|
||||
*/
|
||||
fault->goal_level = fault->req_level;
|
||||
@ -4203,7 +4203,7 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu,
|
||||
return true;
|
||||
|
||||
return fault->slot &&
|
||||
mmu_notifier_retry_hva(vcpu->kvm, mmu_seq, fault->hva);
|
||||
mmu_invalidate_retry_hva(vcpu->kvm, mmu_seq, fault->hva);
|
||||
}
|
||||
|
||||
static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
|
||||
@ -4227,7 +4227,7 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
mmu_seq = vcpu->kvm->mmu_notifier_seq;
|
||||
mmu_seq = vcpu->kvm->mmu_invalidate_seq;
|
||||
smp_rmb();
|
||||
|
||||
r = kvm_faultin_pfn(vcpu, fault);
|
||||
@ -6055,7 +6055,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
|
||||
|
||||
write_lock(&kvm->mmu_lock);
|
||||
|
||||
kvm_inc_notifier_count(kvm, gfn_start, gfn_end);
|
||||
kvm_mmu_invalidate_begin(kvm, gfn_start, gfn_end);
|
||||
|
||||
flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end);
|
||||
|
||||
@ -6069,7 +6069,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
|
||||
kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
|
||||
gfn_end - gfn_start);
|
||||
|
||||
kvm_dec_notifier_count(kvm, gfn_start, gfn_end);
|
||||
kvm_mmu_invalidate_end(kvm, gfn_start, gfn_end);
|
||||
|
||||
write_unlock(&kvm->mmu_lock);
|
||||
}
|
||||
|
@ -589,7 +589,7 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw,
|
||||
* If addresses are being invalidated, skip prefetching to avoid
|
||||
* accidentally prefetching those addresses.
|
||||
*/
|
||||
if (unlikely(vcpu->kvm->mmu_notifier_count))
|
||||
if (unlikely(vcpu->kvm->mmu_invalidate_in_progress))
|
||||
return;
|
||||
|
||||
if (sp->role.direct)
|
||||
@ -838,7 +838,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault
|
||||
else
|
||||
fault->max_level = walker.level;
|
||||
|
||||
mmu_seq = vcpu->kvm->mmu_notifier_seq;
|
||||
mmu_seq = vcpu->kvm->mmu_invalidate_seq;
|
||||
smp_rmb();
|
||||
|
||||
r = kvm_faultin_pfn(vcpu, fault);
|
||||
|
@ -41,6 +41,59 @@ static bool ex_handler_default(const struct exception_table_entry *e,
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the *very* rare case where we do a "load_unaligned_zeropad()"
|
||||
* and it's a page crosser into a non-existent page.
|
||||
*
|
||||
* This happens when we optimistically load a pathname a word-at-a-time
|
||||
* and the name is less than the full word and the next page is not
|
||||
* mapped. Typically that only happens for CONFIG_DEBUG_PAGEALLOC.
|
||||
*
|
||||
* NOTE! The faulting address is always a 'mov mem,reg' type instruction
|
||||
* of size 'long', and the exception fixup must always point to right
|
||||
* after the instruction.
|
||||
*/
|
||||
static bool ex_handler_zeropad(const struct exception_table_entry *e,
|
||||
struct pt_regs *regs,
|
||||
unsigned long fault_addr)
|
||||
{
|
||||
struct insn insn;
|
||||
const unsigned long mask = sizeof(long) - 1;
|
||||
unsigned long offset, addr, next_ip, len;
|
||||
unsigned long *reg;
|
||||
|
||||
next_ip = ex_fixup_addr(e);
|
||||
len = next_ip - regs->ip;
|
||||
if (len > MAX_INSN_SIZE)
|
||||
return false;
|
||||
|
||||
if (insn_decode(&insn, (void *) regs->ip, len, INSN_MODE_KERN))
|
||||
return false;
|
||||
if (insn.length != len)
|
||||
return false;
|
||||
|
||||
if (insn.opcode.bytes[0] != 0x8b)
|
||||
return false;
|
||||
if (insn.opnd_bytes != sizeof(long))
|
||||
return false;
|
||||
|
||||
addr = (unsigned long) insn_get_addr_ref(&insn, regs);
|
||||
if (addr == ~0ul)
|
||||
return false;
|
||||
|
||||
offset = addr & mask;
|
||||
addr = addr & ~mask;
|
||||
if (fault_addr != addr + sizeof(long))
|
||||
return false;
|
||||
|
||||
reg = insn_get_modrm_reg_ptr(&insn, regs);
|
||||
if (!reg)
|
||||
return false;
|
||||
|
||||
*reg = *(unsigned long *)addr >> (offset * 8);
|
||||
return ex_handler_default(e, regs);
|
||||
}
|
||||
|
||||
static bool ex_handler_fault(const struct exception_table_entry *fixup,
|
||||
struct pt_regs *regs, int trapnr)
|
||||
{
|
||||
@ -217,6 +270,8 @@ int fixup_exception(struct pt_regs *regs, int trapnr, unsigned long error_code,
|
||||
return ex_handler_sgx(e, regs, trapnr);
|
||||
case EX_TYPE_UCOPY_LEN:
|
||||
return ex_handler_ucopy_len(e, regs, trapnr, reg, imm);
|
||||
case EX_TYPE_ZEROPAD:
|
||||
return ex_handler_zeropad(e, regs, fault_addr);
|
||||
}
|
||||
BUG();
|
||||
}
|
||||
|
@ -645,7 +645,7 @@ phys_pud_init(pud_t *pud_page, unsigned long paddr, unsigned long paddr_end,
|
||||
pages++;
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
|
||||
prot = __pgprot(pgprot_val(prot) | __PAGE_KERNEL_LARGE);
|
||||
prot = __pgprot(pgprot_val(prot) | _PAGE_PSE);
|
||||
|
||||
set_pte_init((pte_t *)pud,
|
||||
pfn_pte((paddr & PUD_MASK) >> PAGE_SHIFT,
|
||||
|
@ -2229,26 +2229,6 @@ void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_delay_run_hw_queues);
|
||||
|
||||
/**
|
||||
* blk_mq_queue_stopped() - check whether one or more hctxs have been stopped
|
||||
* @q: request queue.
|
||||
*
|
||||
* The caller is responsible for serializing this function against
|
||||
* blk_mq_{start,stop}_hw_queue().
|
||||
*/
|
||||
bool blk_mq_queue_stopped(struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
unsigned long i;
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i)
|
||||
if (blk_mq_hctx_stopped(hctx))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_mq_queue_stopped);
|
||||
|
||||
/*
|
||||
* This function is often used for pausing .queue_rq() by driver when
|
||||
* there isn't enough resource or some conditions aren't satisfied, and
|
||||
@ -2570,7 +2550,7 @@ static void blk_mq_plug_issue_direct(struct blk_plug *plug, bool from_schedule)
|
||||
break;
|
||||
case BLK_STS_RESOURCE:
|
||||
case BLK_STS_DEV_RESOURCE:
|
||||
blk_mq_request_bypass_insert(rq, false, last);
|
||||
blk_mq_request_bypass_insert(rq, false, true);
|
||||
blk_mq_commit_rqs(hctx, &queued, from_schedule);
|
||||
return;
|
||||
default:
|
||||
|
@ -2122,6 +2122,7 @@ const char *ata_get_cmd_name(u8 command)
|
||||
{ ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
|
||||
{ ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
|
||||
{ ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
|
||||
{ ATA_CMD_NCQ_NON_DATA, "NCQ NON-DATA" },
|
||||
{ ATA_CMD_FPDMA_SEND, "SEND FPDMA QUEUED" },
|
||||
{ ATA_CMD_FPDMA_RECV, "RECEIVE FPDMA QUEUED" },
|
||||
{ ATA_CMD_PIO_READ, "READ SECTOR(S)" },
|
||||
|
@ -555,7 +555,7 @@ static inline struct ublk_uring_cmd_pdu *ublk_get_uring_cmd_pdu(
|
||||
return (struct ublk_uring_cmd_pdu *)&ioucmd->pdu;
|
||||
}
|
||||
|
||||
static bool ubq_daemon_is_dying(struct ublk_queue *ubq)
|
||||
static inline bool ubq_daemon_is_dying(struct ublk_queue *ubq)
|
||||
{
|
||||
return ubq->ubq_daemon->flags & PF_EXITING;
|
||||
}
|
||||
@ -605,8 +605,9 @@ static void ublk_complete_rq(struct request *req)
|
||||
}
|
||||
|
||||
/*
|
||||
* __ublk_fail_req() may be called from abort context or ->ubq_daemon
|
||||
* context during exiting, so lock is required.
|
||||
* Since __ublk_rq_task_work always fails requests immediately during
|
||||
* exiting, __ublk_fail_req() is only called from abort context during
|
||||
* exiting. So lock is unnecessary.
|
||||
*
|
||||
* Also aborting may not be started yet, keep in mind that one failed
|
||||
* request may be issued by block layer again.
|
||||
@ -644,8 +645,7 @@ static inline void __ublk_rq_task_work(struct request *req)
|
||||
struct ublk_device *ub = ubq->dev;
|
||||
int tag = req->tag;
|
||||
struct ublk_io *io = &ubq->ios[tag];
|
||||
bool task_exiting = current != ubq->ubq_daemon ||
|
||||
(current->flags & PF_EXITING);
|
||||
bool task_exiting = current != ubq->ubq_daemon || ubq_daemon_is_dying(ubq);
|
||||
unsigned int mapped_bytes;
|
||||
|
||||
pr_devel("%s: complete: op %d, qid %d tag %d io_flags %x addr %llx\n",
|
||||
@ -680,6 +680,11 @@ static inline void __ublk_rq_task_work(struct request *req)
|
||||
* do the copy work.
|
||||
*/
|
||||
io->flags &= ~UBLK_IO_FLAG_NEED_GET_DATA;
|
||||
/* update iod->addr because ublksrv may have passed a new io buffer */
|
||||
ublk_get_iod(ubq, req->tag)->addr = io->addr;
|
||||
pr_devel("%s: update iod->addr: op %d, qid %d tag %d io_flags %x addr %llx\n",
|
||||
__func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags,
|
||||
ublk_get_iod(ubq, req->tag)->addr);
|
||||
}
|
||||
|
||||
mapped_bytes = ublk_map_io(ubq, req, io);
|
||||
@ -751,9 +756,25 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
if (task_work_add(ubq->ubq_daemon, &data->work, notify_mode))
|
||||
goto fail;
|
||||
} else {
|
||||
struct io_uring_cmd *cmd = ubq->ios[rq->tag].cmd;
|
||||
struct ublk_io *io = &ubq->ios[rq->tag];
|
||||
struct io_uring_cmd *cmd = io->cmd;
|
||||
struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd);
|
||||
|
||||
/*
|
||||
* If the check pass, we know that this is a re-issued request aborted
|
||||
* previously in monitor_work because the ubq_daemon(cmd's task) is
|
||||
* PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore
|
||||
* because this ioucmd's io_uring context may be freed now if no inflight
|
||||
* ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work.
|
||||
*
|
||||
* Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing
|
||||
* the tag). Then the request is re-started(allocating the tag) and we are here.
|
||||
* Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED
|
||||
* guarantees that here is a re-issued request aborted previously.
|
||||
*/
|
||||
if ((io->flags & UBLK_IO_FLAG_ABORTED))
|
||||
goto fail;
|
||||
|
||||
pdu->req = rq;
|
||||
io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb);
|
||||
}
|
||||
|
@ -148,30 +148,22 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
|
||||
struct amdgpu_reset_context *reset_context)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle;
|
||||
struct list_head *reset_device_list = reset_context->reset_device_list;
|
||||
struct amdgpu_device *tmp_adev = NULL;
|
||||
struct list_head reset_device_list;
|
||||
int r = 0;
|
||||
|
||||
dev_dbg(adev->dev, "aldebaran perform hw reset\n");
|
||||
|
||||
if (reset_device_list == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) &&
|
||||
reset_context->hive == NULL) {
|
||||
/* Wrong context, return error */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&reset_device_list);
|
||||
if (reset_context->hive) {
|
||||
list_for_each_entry (tmp_adev,
|
||||
&reset_context->hive->device_list,
|
||||
gmc.xgmi.head)
|
||||
list_add_tail(&tmp_adev->reset_list,
|
||||
&reset_device_list);
|
||||
} else {
|
||||
list_add_tail(&reset_context->reset_req_dev->reset_list,
|
||||
&reset_device_list);
|
||||
}
|
||||
|
||||
list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
|
||||
list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
|
||||
mutex_lock(&tmp_adev->reset_cntl->reset_lock);
|
||||
tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_MODE2;
|
||||
}
|
||||
@ -179,7 +171,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
|
||||
* Mode2 reset doesn't need any sync between nodes in XGMI hive, instead launch
|
||||
* them together so that they can be completed asynchronously on multiple nodes
|
||||
*/
|
||||
list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
|
||||
list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
|
||||
/* For XGMI run all resets in parallel to speed up the process */
|
||||
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
|
||||
if (!queue_work(system_unbound_wq,
|
||||
@ -197,7 +189,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
|
||||
|
||||
/* For XGMI wait for all resets to complete before proceed */
|
||||
if (!r) {
|
||||
list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
|
||||
list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
|
||||
if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) {
|
||||
flush_work(&tmp_adev->reset_cntl->reset_work);
|
||||
r = tmp_adev->asic_reset_res;
|
||||
@ -207,7 +199,7 @@ aldebaran_mode2_perform_reset(struct amdgpu_reset_control *reset_ctl,
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
|
||||
list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
|
||||
mutex_unlock(&tmp_adev->reset_cntl->reset_lock);
|
||||
tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_NONE;
|
||||
}
|
||||
@ -339,10 +331,13 @@ static int
|
||||
aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
|
||||
struct amdgpu_reset_context *reset_context)
|
||||
{
|
||||
struct list_head *reset_device_list = reset_context->reset_device_list;
|
||||
struct amdgpu_device *tmp_adev = NULL;
|
||||
struct list_head reset_device_list;
|
||||
int r;
|
||||
|
||||
if (reset_device_list == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (reset_context->reset_req_dev->ip_versions[MP1_HWIP][0] ==
|
||||
IP_VERSION(13, 0, 2) &&
|
||||
reset_context->hive == NULL) {
|
||||
@ -350,19 +345,7 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&reset_device_list);
|
||||
if (reset_context->hive) {
|
||||
list_for_each_entry (tmp_adev,
|
||||
&reset_context->hive->device_list,
|
||||
gmc.xgmi.head)
|
||||
list_add_tail(&tmp_adev->reset_list,
|
||||
&reset_device_list);
|
||||
} else {
|
||||
list_add_tail(&reset_context->reset_req_dev->reset_list,
|
||||
&reset_device_list);
|
||||
}
|
||||
|
||||
list_for_each_entry (tmp_adev, &reset_device_list, reset_list) {
|
||||
list_for_each_entry(tmp_adev, reset_device_list, reset_list) {
|
||||
dev_info(tmp_adev->dev,
|
||||
"GPU reset succeeded, trying to resume\n");
|
||||
r = aldebaran_mode2_restore_ip(tmp_adev);
|
||||
|
@ -317,7 +317,7 @@ enum amdgpu_kiq_irq {
|
||||
AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0,
|
||||
AMDGPU_CP_KIQ_IRQ_LAST
|
||||
};
|
||||
|
||||
#define SRIOV_USEC_TIMEOUT 1200000 /* wait 12 * 100ms for SRIOV */
|
||||
#define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */
|
||||
#define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */
|
||||
#define MAX_KIQ_REG_TRY 1000
|
||||
|
@ -96,6 +96,7 @@ struct amdgpu_amdkfd_fence {
|
||||
struct amdgpu_kfd_dev {
|
||||
struct kfd_dev *dev;
|
||||
uint64_t vram_used;
|
||||
uint64_t vram_used_aligned;
|
||||
bool init_complete;
|
||||
struct work_struct reset_work;
|
||||
};
|
||||
|
@ -40,10 +40,10 @@
|
||||
#define AMDGPU_USERPTR_RESTORE_DELAY_MS 1
|
||||
|
||||
/*
|
||||
* Align VRAM allocations to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
|
||||
* Align VRAM availability to 2MB to avoid fragmentation caused by 4K allocations in the tail 2MB
|
||||
* BO chunk
|
||||
*/
|
||||
#define VRAM_ALLOCATION_ALIGN (1 << 21)
|
||||
#define VRAM_AVAILABLITY_ALIGN (1 << 21)
|
||||
|
||||
/* Impose limit on how much memory KFD can use */
|
||||
static struct {
|
||||
@ -149,7 +149,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
|
||||
* to avoid fragmentation caused by 4K allocations in the tail
|
||||
* 2M BO chunk.
|
||||
*/
|
||||
vram_needed = ALIGN(size, VRAM_ALLOCATION_ALIGN);
|
||||
vram_needed = size;
|
||||
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
|
||||
system_mem_needed = size;
|
||||
} else if (!(alloc_flag &
|
||||
@ -182,8 +182,10 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev,
|
||||
*/
|
||||
WARN_ONCE(vram_needed && !adev,
|
||||
"adev reference can't be null when vram is used");
|
||||
if (adev)
|
||||
if (adev) {
|
||||
adev->kfd.vram_used += vram_needed;
|
||||
adev->kfd.vram_used_aligned += ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN);
|
||||
}
|
||||
kfd_mem_limit.system_mem_used += system_mem_needed;
|
||||
kfd_mem_limit.ttm_mem_used += ttm_mem_needed;
|
||||
|
||||
@ -203,8 +205,10 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev,
|
||||
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
|
||||
WARN_ONCE(!adev,
|
||||
"adev reference can't be null when alloc mem flags vram is set");
|
||||
if (adev)
|
||||
adev->kfd.vram_used -= ALIGN(size, VRAM_ALLOCATION_ALIGN);
|
||||
if (adev) {
|
||||
adev->kfd.vram_used -= size;
|
||||
adev->kfd.vram_used_aligned -= ALIGN(size, VRAM_AVAILABLITY_ALIGN);
|
||||
}
|
||||
} else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
|
||||
kfd_mem_limit.system_mem_used -= size;
|
||||
} else if (!(alloc_flag &
|
||||
@ -1608,15 +1612,14 @@ size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev)
|
||||
uint64_t reserved_for_pt =
|
||||
ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size);
|
||||
size_t available;
|
||||
|
||||
spin_lock(&kfd_mem_limit.mem_limit_lock);
|
||||
available = adev->gmc.real_vram_size
|
||||
- adev->kfd.vram_used
|
||||
- adev->kfd.vram_used_aligned
|
||||
- atomic64_read(&adev->vram_pin_size)
|
||||
- reserved_for_pt;
|
||||
spin_unlock(&kfd_mem_limit.mem_limit_lock);
|
||||
|
||||
return ALIGN_DOWN(available, VRAM_ALLOCATION_ALIGN);
|
||||
return ALIGN_DOWN(available, VRAM_AVAILABLITY_ALIGN);
|
||||
}
|
||||
|
||||
int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
|
||||
|
@ -314,7 +314,7 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
|
||||
mem_channel_number = vram_info->v30.channel_num;
|
||||
mem_channel_width = vram_info->v30.channel_width;
|
||||
if (vram_width)
|
||||
*vram_width = mem_channel_number * mem_channel_width;
|
||||
*vram_width = mem_channel_number * (1 << mem_channel_width);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
@ -837,17 +837,13 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
||||
continue;
|
||||
|
||||
r = amdgpu_vm_bo_update(adev, bo_va, false);
|
||||
if (r) {
|
||||
mutex_unlock(&p->bo_list->bo_list_mutex);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_sync_fence(&p->job->sync, bo_va->last_pt_update);
|
||||
if (r) {
|
||||
mutex_unlock(&p->bo_list->bo_list_mutex);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_vm_handle_moved(adev, vm);
|
||||
if (r)
|
||||
|
@ -1705,7 +1705,7 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private;
|
||||
char reg_offset[11];
|
||||
uint32_t *new, *tmp = NULL;
|
||||
uint32_t *new = NULL, *tmp = NULL;
|
||||
int ret, i = 0, len = 0;
|
||||
|
||||
do {
|
||||
@ -1747,6 +1747,7 @@ static ssize_t amdgpu_reset_dump_register_list_write(struct file *f,
|
||||
ret = size;
|
||||
|
||||
error_free:
|
||||
if (tmp != new)
|
||||
kfree(tmp);
|
||||
kfree(new);
|
||||
return ret;
|
||||
|
@ -4742,6 +4742,8 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
|
||||
tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device,
|
||||
reset_list);
|
||||
amdgpu_reset_reg_dumps(tmp_adev);
|
||||
|
||||
reset_context->reset_device_list = device_list_handle;
|
||||
r = amdgpu_reset_perform_reset(tmp_adev, reset_context);
|
||||
/* If reset handler not implemented, continue; otherwise return */
|
||||
if (r == -ENOSYS)
|
||||
|
@ -272,10 +272,6 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
|
||||
/* Signal all jobs not yet scheduled */
|
||||
for (i = DRM_SCHED_PRIORITY_COUNT - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) {
|
||||
struct drm_sched_rq *rq = &sched->sched_rq[i];
|
||||
|
||||
if (!rq)
|
||||
continue;
|
||||
|
||||
spin_lock(&rq->lock);
|
||||
list_for_each_entry(s_entity, &rq->entities, list) {
|
||||
while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) {
|
||||
|
@ -37,6 +37,7 @@ struct amdgpu_reset_context {
|
||||
struct amdgpu_device *reset_req_dev;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_hive_info *hive;
|
||||
struct list_head *reset_device_list;
|
||||
unsigned long flags;
|
||||
};
|
||||
|
||||
|
@ -637,6 +637,8 @@ struct amdgpu_ttm_tt {
|
||||
#endif
|
||||
};
|
||||
|
||||
#define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm)
|
||||
|
||||
#ifdef CONFIG_DRM_AMDGPU_USERPTR
|
||||
/*
|
||||
* amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user
|
||||
@ -648,7 +650,7 @@ struct amdgpu_ttm_tt {
|
||||
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
|
||||
{
|
||||
struct ttm_tt *ttm = bo->tbo.ttm;
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
unsigned long start = gtt->userptr;
|
||||
struct vm_area_struct *vma;
|
||||
struct mm_struct *mm;
|
||||
@ -702,7 +704,7 @@ out_unlock:
|
||||
*/
|
||||
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
bool r = false;
|
||||
|
||||
if (!gtt || !gtt->userptr)
|
||||
@ -751,7 +753,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
|
||||
struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
|
||||
enum dma_data_direction direction = write ?
|
||||
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
|
||||
@ -788,7 +790,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
|
||||
struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
|
||||
enum dma_data_direction direction = write ?
|
||||
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
|
||||
@ -822,7 +824,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
|
||||
{
|
||||
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo);
|
||||
struct ttm_tt *ttm = tbo->ttm;
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
|
||||
if (amdgpu_bo_encrypted(abo))
|
||||
flags |= AMDGPU_PTE_TMZ;
|
||||
@ -860,7 +862,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
|
||||
struct ttm_resource *bo_mem)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
|
||||
struct amdgpu_ttm_tt *gtt = (void*)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
uint64_t flags;
|
||||
int r;
|
||||
|
||||
@ -927,7 +929,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
|
||||
struct ttm_operation_ctx ctx = { false, false };
|
||||
struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
|
||||
struct ttm_placement placement;
|
||||
struct ttm_place placements;
|
||||
struct ttm_resource *tmp;
|
||||
@ -998,7 +1000,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
|
||||
struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
|
||||
/* if the pages have userptr pinning then clear that first */
|
||||
if (gtt->userptr) {
|
||||
@ -1025,7 +1027,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
|
||||
static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
|
||||
struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
|
||||
if (gtt->usertask)
|
||||
put_task_struct(gtt->usertask);
|
||||
@ -1079,7 +1081,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
|
||||
struct ttm_operation_ctx *ctx)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
pgoff_t i;
|
||||
int ret;
|
||||
|
||||
@ -1113,7 +1115,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
|
||||
static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
|
||||
struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
struct amdgpu_device *adev;
|
||||
pgoff_t i;
|
||||
|
||||
@ -1182,7 +1184,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
|
||||
/* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
|
||||
bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
|
||||
|
||||
gtt = (void *)bo->ttm;
|
||||
gtt = ttm_to_amdgpu_ttm_tt(bo->ttm);
|
||||
gtt->userptr = addr;
|
||||
gtt->userflags = flags;
|
||||
|
||||
@ -1199,7 +1201,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
|
||||
*/
|
||||
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
|
||||
if (gtt == NULL)
|
||||
return NULL;
|
||||
@ -1218,7 +1220,7 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
|
||||
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
|
||||
unsigned long end, unsigned long *userptr)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
unsigned long size;
|
||||
|
||||
if (gtt == NULL || !gtt->userptr)
|
||||
@ -1241,7 +1243,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
|
||||
*/
|
||||
bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
|
||||
if (gtt == NULL || !gtt->userptr)
|
||||
return false;
|
||||
@ -1254,7 +1256,7 @@ bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm)
|
||||
*/
|
||||
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
|
||||
{
|
||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
|
||||
|
||||
if (gtt == NULL)
|
||||
return false;
|
||||
|
@ -496,8 +496,7 @@ static int amdgpu_vkms_sw_init(void *handle)
|
||||
adev_to_drm(adev)->mode_config.max_height = YRES_MAX;
|
||||
|
||||
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||
/* disable prefer shadow for now due to hibernation issues */
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 0;
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||
|
||||
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
|
||||
|
||||
|
@ -28,13 +28,44 @@
|
||||
#include "navi10_enum.h"
|
||||
#include "soc15_common.h"
|
||||
|
||||
#define regATHUB_MISC_CNTL_V3_0_1 0x00d7
|
||||
#define regATHUB_MISC_CNTL_V3_0_1_BASE_IDX 0
|
||||
|
||||
|
||||
static uint32_t athub_v3_0_get_cg_cntl(struct amdgpu_device *adev)
|
||||
{
|
||||
uint32_t data;
|
||||
|
||||
switch (adev->ip_versions[ATHUB_HWIP][0]) {
|
||||
case IP_VERSION(3, 0, 1):
|
||||
data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1);
|
||||
break;
|
||||
default:
|
||||
data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
|
||||
break;
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
static void athub_v3_0_set_cg_cntl(struct amdgpu_device *adev, uint32_t data)
|
||||
{
|
||||
switch (adev->ip_versions[ATHUB_HWIP][0]) {
|
||||
case IP_VERSION(3, 0, 1):
|
||||
WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL_V3_0_1, data);
|
||||
break;
|
||||
default:
|
||||
WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
athub_v3_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
uint32_t def, data;
|
||||
|
||||
def = data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
|
||||
def = data = athub_v3_0_get_cg_cntl(adev);
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_MGCG))
|
||||
data |= ATHUB_MISC_CNTL__CG_ENABLE_MASK;
|
||||
@ -42,7 +73,7 @@ athub_v3_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
data &= ~ATHUB_MISC_CNTL__CG_ENABLE_MASK;
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
|
||||
athub_v3_0_set_cg_cntl(adev, data);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -51,7 +82,7 @@ athub_v3_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
|
||||
{
|
||||
uint32_t def, data;
|
||||
|
||||
def = data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
|
||||
def = data = athub_v3_0_get_cg_cntl(adev);
|
||||
|
||||
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_ATHUB_LS))
|
||||
data |= ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
|
||||
@ -59,7 +90,7 @@ athub_v3_0_update_medium_grain_light_sleep(struct amdgpu_device *adev,
|
||||
data &= ~ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK;
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL, data);
|
||||
athub_v3_0_set_cg_cntl(adev, data);
|
||||
}
|
||||
|
||||
int athub_v3_0_set_clockgating(struct amdgpu_device *adev,
|
||||
@ -70,6 +101,7 @@ int athub_v3_0_set_clockgating(struct amdgpu_device *adev,
|
||||
|
||||
switch (adev->ip_versions[ATHUB_HWIP][0]) {
|
||||
case IP_VERSION(3, 0, 0):
|
||||
case IP_VERSION(3, 0, 1):
|
||||
case IP_VERSION(3, 0, 2):
|
||||
athub_v3_0_update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
@ -88,7 +120,7 @@ void athub_v3_0_get_clockgating(struct amdgpu_device *adev, u64 *flags)
|
||||
int data;
|
||||
|
||||
/* AMD_CG_SUPPORT_ATHUB_MGCG */
|
||||
data = RREG32_SOC15(ATHUB, 0, regATHUB_MISC_CNTL);
|
||||
data = athub_v3_0_get_cg_cntl(adev);
|
||||
if (data & ATHUB_MISC_CNTL__CG_ENABLE_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_ATHUB_MGCG;
|
||||
|
||||
|
@ -2796,8 +2796,7 @@ static int dce_v10_0_sw_init(void *handle)
|
||||
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||
|
||||
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||
/* disable prefer shadow for now due to hibernation issues */
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 0;
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||
|
||||
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
|
||||
|
||||
|
@ -2914,8 +2914,7 @@ static int dce_v11_0_sw_init(void *handle)
|
||||
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||
|
||||
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||
/* disable prefer shadow for now due to hibernation issues */
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 0;
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||
|
||||
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
|
||||
|
||||
|
@ -2673,8 +2673,7 @@ static int dce_v6_0_sw_init(void *handle)
|
||||
adev_to_drm(adev)->mode_config.max_width = 16384;
|
||||
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||
/* disable prefer shadow for now due to hibernation issues */
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 0;
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
|
||||
adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
|
||||
|
||||
|
@ -2693,8 +2693,11 @@ static int dce_v8_0_sw_init(void *handle)
|
||||
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||
|
||||
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||
if (adev->asic_type == CHIP_HAWAII)
|
||||
/* disable prefer shadow for now due to hibernation issues */
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 0;
|
||||
else
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||
|
||||
adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true;
|
||||
|
||||
|
@ -4846,7 +4846,7 @@ static int gfx_v10_0_sw_init(void *handle)
|
||||
case IP_VERSION(10, 3, 3):
|
||||
case IP_VERSION(10, 3, 7):
|
||||
adev->gfx.me.num_me = 1;
|
||||
adev->gfx.me.num_pipe_per_me = 2;
|
||||
adev->gfx.me.num_pipe_per_me = 1;
|
||||
adev->gfx.me.num_queue_per_pipe = 1;
|
||||
adev->gfx.mec.num_mec = 2;
|
||||
adev->gfx.mec.num_pipe_per_mec = 4;
|
||||
|
@ -53,6 +53,7 @@
|
||||
#define GFX11_MEC_HPD_SIZE 2048
|
||||
|
||||
#define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L
|
||||
#define RLC_PG_DELAY_3_DEFAULT_GC_11_0_1 0x1388
|
||||
|
||||
#define regCGTT_WD_CLK_CTRL 0x5086
|
||||
#define regCGTT_WD_CLK_CTRL_BASE_IDX 1
|
||||
@ -5279,6 +5280,38 @@ static const struct amdgpu_rlc_funcs gfx_v11_0_rlc_funcs = {
|
||||
.update_spm_vmid = gfx_v11_0_update_spm_vmid,
|
||||
};
|
||||
|
||||
static void gfx_v11_cntl_power_gating(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
u32 data = RREG32_SOC15(GC, 0, regRLC_PG_CNTL);
|
||||
|
||||
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
|
||||
data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
|
||||
else
|
||||
data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
|
||||
|
||||
WREG32_SOC15(GC, 0, regRLC_PG_CNTL, data);
|
||||
|
||||
// Program RLC_PG_DELAY3 for CGPG hysteresis
|
||||
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
|
||||
switch (adev->ip_versions[GC_HWIP][0]) {
|
||||
case IP_VERSION(11, 0, 1):
|
||||
WREG32_SOC15(GC, 0, regRLC_PG_DELAY_3, RLC_PG_DELAY_3_DEFAULT_GC_11_0_1);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void gfx_v11_cntl_pg(struct amdgpu_device *adev, bool enable)
|
||||
{
|
||||
amdgpu_gfx_rlc_enter_safe_mode(adev);
|
||||
|
||||
gfx_v11_cntl_power_gating(adev, enable);
|
||||
|
||||
amdgpu_gfx_rlc_exit_safe_mode(adev);
|
||||
}
|
||||
|
||||
static int gfx_v11_0_set_powergating_state(void *handle,
|
||||
enum amd_powergating_state state)
|
||||
{
|
||||
@ -5293,6 +5326,11 @@ static int gfx_v11_0_set_powergating_state(void *handle,
|
||||
case IP_VERSION(11, 0, 2):
|
||||
amdgpu_gfx_off_ctrl(adev, enable);
|
||||
break;
|
||||
case IP_VERSION(11, 0, 1):
|
||||
gfx_v11_cntl_pg(adev, enable);
|
||||
/* TODO: Enable this when GFXOFF is ready */
|
||||
// amdgpu_gfx_off_ctrl(adev, enable);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -5310,6 +5348,7 @@ static int gfx_v11_0_set_clockgating_state(void *handle,
|
||||
|
||||
switch (adev->ip_versions[GC_HWIP][0]) {
|
||||
case IP_VERSION(11, 0, 0):
|
||||
case IP_VERSION(11, 0, 1):
|
||||
case IP_VERSION(11, 0, 2):
|
||||
gfx_v11_0_update_gfx_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
|
@ -419,6 +419,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
uint32_t seq;
|
||||
uint16_t queried_pasid;
|
||||
bool ret;
|
||||
u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
|
||||
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
|
||||
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
|
||||
|
||||
@ -437,7 +438,7 @@ static int gmc_v10_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
|
||||
amdgpu_ring_commit(ring);
|
||||
spin_unlock(&adev->gfx.kiq.ring_lock);
|
||||
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
|
||||
r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
|
||||
if (r < 1) {
|
||||
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
|
||||
return -ETIME;
|
||||
|
@ -896,6 +896,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
uint32_t seq;
|
||||
uint16_t queried_pasid;
|
||||
bool ret;
|
||||
u32 usec_timeout = amdgpu_sriov_vf(adev) ? SRIOV_USEC_TIMEOUT : adev->usec_timeout;
|
||||
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
|
||||
struct amdgpu_kiq *kiq = &adev->gfx.kiq;
|
||||
|
||||
@ -935,7 +936,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
|
||||
|
||||
amdgpu_ring_commit(ring);
|
||||
spin_unlock(&adev->gfx.kiq.ring_lock);
|
||||
r = amdgpu_fence_wait_polling(ring, seq, adev->usec_timeout);
|
||||
r = amdgpu_fence_wait_polling(ring, seq, usec_timeout);
|
||||
if (r < 1) {
|
||||
dev_err(adev->dev, "wait for kiq fence error: %ld.\n", r);
|
||||
up_read(&adev->reset_domain->sem);
|
||||
@ -1624,12 +1625,15 @@ static int gmc_v9_0_sw_init(void *handle)
|
||||
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 47);
|
||||
else
|
||||
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
|
||||
if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
|
||||
adev->gmc.translate_further = adev->vm_manager.num_level > 1;
|
||||
break;
|
||||
case IP_VERSION(9, 4, 1):
|
||||
adev->num_vmhubs = 3;
|
||||
|
||||
/* Keep the vm size same with Vega20 */
|
||||
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
|
||||
adev->gmc.translate_further = adev->vm_manager.num_level > 1;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -40,6 +40,156 @@ static void hdp_v5_2_flush_hdp(struct amdgpu_device *adev,
|
||||
0);
|
||||
}
|
||||
|
||||
static void hdp_v5_2_update_mem_power_gating(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
uint32_t hdp_clk_cntl;
|
||||
uint32_t hdp_mem_pwr_cntl;
|
||||
|
||||
if (!(adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS |
|
||||
AMD_CG_SUPPORT_HDP_DS |
|
||||
AMD_CG_SUPPORT_HDP_SD)))
|
||||
return;
|
||||
|
||||
hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
|
||||
hdp_mem_pwr_cntl = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
|
||||
|
||||
/* Before doing clock/power mode switch, forced on MEM clock */
|
||||
hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
|
||||
ATOMIC_MEM_CLK_SOFT_OVERRIDE, 1);
|
||||
hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
|
||||
RC_MEM_CLK_SOFT_OVERRIDE, 1);
|
||||
WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
|
||||
|
||||
/* disable clock and power gating before any changing */
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
|
||||
ATOMIC_MEM_POWER_CTRL_EN, 0);
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
|
||||
ATOMIC_MEM_POWER_LS_EN, 0);
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
|
||||
ATOMIC_MEM_POWER_DS_EN, 0);
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
|
||||
ATOMIC_MEM_POWER_SD_EN, 0);
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
|
||||
RC_MEM_POWER_CTRL_EN, 0);
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
|
||||
RC_MEM_POWER_LS_EN, 0);
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
|
||||
RC_MEM_POWER_DS_EN, 0);
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
|
||||
RC_MEM_POWER_SD_EN, 0);
|
||||
WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
|
||||
|
||||
/* Already disabled above. The actions below are for "enabled" only */
|
||||
if (enable) {
|
||||
/* only one clock gating mode (LS/DS/SD) can be enabled */
|
||||
if (adev->cg_flags & AMD_CG_SUPPORT_HDP_SD) {
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
|
||||
HDP_MEM_POWER_CTRL,
|
||||
ATOMIC_MEM_POWER_SD_EN, 1);
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
|
||||
HDP_MEM_POWER_CTRL,
|
||||
RC_MEM_POWER_SD_EN, 1);
|
||||
} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS) {
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
|
||||
HDP_MEM_POWER_CTRL,
|
||||
ATOMIC_MEM_POWER_LS_EN, 1);
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
|
||||
HDP_MEM_POWER_CTRL,
|
||||
RC_MEM_POWER_LS_EN, 1);
|
||||
} else if (adev->cg_flags & AMD_CG_SUPPORT_HDP_DS) {
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
|
||||
HDP_MEM_POWER_CTRL,
|
||||
ATOMIC_MEM_POWER_DS_EN, 1);
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl,
|
||||
HDP_MEM_POWER_CTRL,
|
||||
RC_MEM_POWER_DS_EN, 1);
|
||||
}
|
||||
|
||||
/* confirmed that ATOMIC/RC_MEM_POWER_CTRL_EN have to be set for SRAM LS/DS/SD */
|
||||
if (adev->cg_flags & (AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_HDP_DS |
|
||||
AMD_CG_SUPPORT_HDP_SD)) {
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
|
||||
ATOMIC_MEM_POWER_CTRL_EN, 1);
|
||||
hdp_mem_pwr_cntl = REG_SET_FIELD(hdp_mem_pwr_cntl, HDP_MEM_POWER_CTRL,
|
||||
RC_MEM_POWER_CTRL_EN, 1);
|
||||
WREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL, hdp_mem_pwr_cntl);
|
||||
}
|
||||
}
|
||||
|
||||
/* disable MEM clock override after clock/power mode changing */
|
||||
hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
|
||||
ATOMIC_MEM_CLK_SOFT_OVERRIDE, 0);
|
||||
hdp_clk_cntl = REG_SET_FIELD(hdp_clk_cntl, HDP_CLK_CNTL,
|
||||
RC_MEM_CLK_SOFT_OVERRIDE, 0);
|
||||
WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
|
||||
}
|
||||
|
||||
static void hdp_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
uint32_t hdp_clk_cntl;
|
||||
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
|
||||
return;
|
||||
|
||||
hdp_clk_cntl = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
|
||||
|
||||
if (enable) {
|
||||
hdp_clk_cntl &=
|
||||
~(uint32_t)
|
||||
(HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK);
|
||||
} else {
|
||||
hdp_clk_cntl |= HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK;
|
||||
}
|
||||
|
||||
WREG32_SOC15(HDP, 0, regHDP_CLK_CNTL, hdp_clk_cntl);
|
||||
}
|
||||
|
||||
static void hdp_v5_2_get_clockgating_state(struct amdgpu_device *adev,
|
||||
u64 *flags)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
||||
/* AMD_CG_SUPPORT_HDP_MGCG */
|
||||
tmp = RREG32_SOC15(HDP, 0, regHDP_CLK_CNTL);
|
||||
if (!(tmp & (HDP_CLK_CNTL__ATOMIC_MEM_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__RC_MEM_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__DBUS_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__DYN_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__XDP_REG_CLK_SOFT_OVERRIDE_MASK |
|
||||
HDP_CLK_CNTL__HDP_REG_CLK_SOFT_OVERRIDE_MASK)))
|
||||
*flags |= AMD_CG_SUPPORT_HDP_MGCG;
|
||||
|
||||
/* AMD_CG_SUPPORT_HDP_LS/DS/SD */
|
||||
tmp = RREG32_SOC15(HDP, 0, regHDP_MEM_POWER_CTRL);
|
||||
if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_LS_EN_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_HDP_LS;
|
||||
else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_DS_EN_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_HDP_DS;
|
||||
else if (tmp & HDP_MEM_POWER_CTRL__ATOMIC_MEM_POWER_SD_EN_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_HDP_SD;
|
||||
}
|
||||
|
||||
static void hdp_v5_2_update_clock_gating(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
hdp_v5_2_update_mem_power_gating(adev, enable);
|
||||
hdp_v5_2_update_medium_grain_clock_gating(adev, enable);
|
||||
}
|
||||
|
||||
const struct amdgpu_hdp_funcs hdp_v5_2_funcs = {
|
||||
.flush_hdp = hdp_v5_2_flush_hdp,
|
||||
.update_clock_gating = hdp_v5_2_update_clock_gating,
|
||||
.get_clock_gating_state = hdp_v5_2_get_clockgating_state,
|
||||
};
|
||||
|
@ -727,6 +727,7 @@ static const struct amd_ip_funcs ih_v6_0_ip_funcs = {
|
||||
static const struct amdgpu_ih_funcs ih_v6_0_funcs = {
|
||||
.get_wptr = ih_v6_0_get_wptr,
|
||||
.decode_iv = amdgpu_ih_decode_iv_helper,
|
||||
.decode_iv_ts = amdgpu_ih_decode_iv_ts_helper,
|
||||
.set_rptr = ih_v6_0_set_rptr
|
||||
};
|
||||
|
||||
|
@ -518,18 +518,41 @@ static u64 mmhub_v3_0_1_get_mc_fb_offset(struct amdgpu_device *adev)
|
||||
static void mmhub_v3_0_1_update_medium_grain_clock_gating(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
//TODO
|
||||
uint32_t def, data;
|
||||
|
||||
def = data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
|
||||
|
||||
if (enable)
|
||||
data |= MM_ATC_L2_MISC_CG__ENABLE_MASK;
|
||||
else
|
||||
data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK;
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG, data);
|
||||
}
|
||||
|
||||
static void mmhub_v3_0_1_update_medium_grain_light_sleep(struct amdgpu_device *adev,
|
||||
bool enable)
|
||||
{
|
||||
//TODO
|
||||
uint32_t def, data;
|
||||
|
||||
def = data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
|
||||
|
||||
if (enable)
|
||||
data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
|
||||
else
|
||||
data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
|
||||
|
||||
if (def != data)
|
||||
WREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG, data);
|
||||
}
|
||||
|
||||
static int mmhub_v3_0_1_set_clockgating(struct amdgpu_device *adev,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
mmhub_v3_0_1_update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
mmhub_v3_0_1_update_medium_grain_light_sleep(adev,
|
||||
@ -539,7 +562,20 @@ static int mmhub_v3_0_1_set_clockgating(struct amdgpu_device *adev,
|
||||
|
||||
static void mmhub_v3_0_1_get_clockgating(struct amdgpu_device *adev, u64 *flags)
|
||||
{
|
||||
//TODO
|
||||
int data;
|
||||
|
||||
if (amdgpu_sriov_vf(adev))
|
||||
*flags = 0;
|
||||
|
||||
data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG);
|
||||
|
||||
/* AMD_CG_SUPPORT_MC_MGCG */
|
||||
if (data & MM_ATC_L2_MISC_CG__ENABLE_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_MC_MGCG;
|
||||
|
||||
/* AMD_CG_SUPPORT_MC_LS */
|
||||
if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK)
|
||||
*flags |= AMD_CG_SUPPORT_MC_LS;
|
||||
}
|
||||
|
||||
const struct amdgpu_mmhub_funcs mmhub_v3_0_1_funcs = {
|
||||
|
@ -409,9 +409,11 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
|
||||
u32 wptr, tmp;
|
||||
struct amdgpu_ih_regs *ih_regs;
|
||||
|
||||
if (ih == &adev->irq.ih) {
|
||||
if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
|
||||
/* Only ring0 supports writeback. On other rings fall back
|
||||
* to register-based code with overflow checking below.
|
||||
* ih_soft ring doesn't have any backing hardware registers,
|
||||
* update wptr and return.
|
||||
*/
|
||||
wptr = le32_to_cpu(*ih->wptr_cpu);
|
||||
|
||||
@ -483,6 +485,9 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev,
|
||||
{
|
||||
struct amdgpu_ih_regs *ih_regs;
|
||||
|
||||
if (ih == &adev->irq.ih_soft)
|
||||
return;
|
||||
|
||||
if (ih->use_doorbell) {
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
*ih->rptr_cpu = ih->rptr;
|
||||
|
@ -101,6 +101,16 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
|
||||
adev->psp.dtm_context.context.bin_desc.start_addr =
|
||||
(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
|
||||
le32_to_cpu(ta_hdr->dtm.offset_bytes);
|
||||
|
||||
if (adev->apu_flags & AMD_APU_IS_RENOIR) {
|
||||
adev->psp.securedisplay_context.context.bin_desc.fw_version =
|
||||
le32_to_cpu(ta_hdr->securedisplay.fw_version);
|
||||
adev->psp.securedisplay_context.context.bin_desc.size_bytes =
|
||||
le32_to_cpu(ta_hdr->securedisplay.size_bytes);
|
||||
adev->psp.securedisplay_context.context.bin_desc.start_addr =
|
||||
(uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
|
||||
le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -20,7 +20,6 @@
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
#include <linux/dev_printk.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include "amdgpu.h"
|
||||
|
@ -546,8 +546,10 @@ static int soc21_common_early_init(void *handle)
|
||||
case IP_VERSION(11, 0, 0):
|
||||
adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
#if 0
|
||||
AMD_CG_SUPPORT_GFX_3D_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_3D_CGLS |
|
||||
#endif
|
||||
AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_REPEATER_FGCG |
|
||||
AMD_CG_SUPPORT_GFX_FGCG |
|
||||
@ -575,7 +577,9 @@ static int soc21_common_early_init(void *handle)
|
||||
AMD_CG_SUPPORT_VCN_MGCG |
|
||||
AMD_CG_SUPPORT_JPEG_MGCG |
|
||||
AMD_CG_SUPPORT_ATHUB_MGCG |
|
||||
AMD_CG_SUPPORT_ATHUB_LS;
|
||||
AMD_CG_SUPPORT_ATHUB_LS |
|
||||
AMD_CG_SUPPORT_IH_CG |
|
||||
AMD_CG_SUPPORT_HDP_SD;
|
||||
adev->pg_flags =
|
||||
AMD_PG_SUPPORT_VCN |
|
||||
AMD_PG_SUPPORT_VCN_DPG |
|
||||
@ -586,9 +590,23 @@ static int soc21_common_early_init(void *handle)
|
||||
break;
|
||||
case IP_VERSION(11, 0, 1):
|
||||
adev->cg_flags =
|
||||
AMD_CG_SUPPORT_GFX_CGCG |
|
||||
AMD_CG_SUPPORT_GFX_CGLS |
|
||||
AMD_CG_SUPPORT_GFX_MGCG |
|
||||
AMD_CG_SUPPORT_GFX_FGCG |
|
||||
AMD_CG_SUPPORT_REPEATER_FGCG |
|
||||
AMD_CG_SUPPORT_GFX_PERF_CLK |
|
||||
AMD_CG_SUPPORT_MC_MGCG |
|
||||
AMD_CG_SUPPORT_MC_LS |
|
||||
AMD_CG_SUPPORT_HDP_MGCG |
|
||||
AMD_CG_SUPPORT_HDP_LS |
|
||||
AMD_CG_SUPPORT_ATHUB_MGCG |
|
||||
AMD_CG_SUPPORT_ATHUB_LS |
|
||||
AMD_CG_SUPPORT_IH_CG |
|
||||
AMD_CG_SUPPORT_VCN_MGCG |
|
||||
AMD_CG_SUPPORT_JPEG_MGCG;
|
||||
adev->pg_flags =
|
||||
AMD_PG_SUPPORT_GFX_PG |
|
||||
AMD_PG_SUPPORT_JPEG;
|
||||
adev->external_rev_id = adev->rev_id + 0x1;
|
||||
break;
|
||||
@ -683,6 +701,7 @@ static int soc21_common_set_clockgating_state(void *handle,
|
||||
|
||||
switch (adev->ip_versions[NBIO_HWIP][0]) {
|
||||
case IP_VERSION(4, 3, 0):
|
||||
case IP_VERSION(4, 3, 1):
|
||||
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
|
||||
@ -690,6 +709,10 @@ static int soc21_common_set_clockgating_state(void *handle,
|
||||
adev->hdp.funcs->update_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
break;
|
||||
case IP_VERSION(7, 7, 0):
|
||||
adev->hdp.funcs->update_clock_gating(adev,
|
||||
state == AMD_CG_STATE_GATE);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -1115,7 +1115,7 @@ static int vcn_v4_0_start(struct amdgpu_device *adev)
|
||||
*
|
||||
* Stop VCN block with dpg mode
|
||||
*/
|
||||
static int vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
|
||||
static void vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
|
||||
{
|
||||
uint32_t tmp;
|
||||
|
||||
@ -1133,7 +1133,6 @@ static int vcn_v4_0_stop_dpg_mode(struct amdgpu_device *adev, int inst_idx)
|
||||
/* disable dynamic power gating mode */
|
||||
WREG32_P(SOC15_REG_OFFSET(VCN, inst_idx, regUVD_POWER_STATUS), 0,
|
||||
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1154,7 +1153,7 @@ static int vcn_v4_0_stop(struct amdgpu_device *adev)
|
||||
fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF;
|
||||
|
||||
if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) {
|
||||
r = vcn_v4_0_stop_dpg_mode(adev, i);
|
||||
vcn_v4_0_stop_dpg_mode(adev, i);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -334,9 +334,11 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
|
||||
u32 wptr, tmp;
|
||||
struct amdgpu_ih_regs *ih_regs;
|
||||
|
||||
if (ih == &adev->irq.ih) {
|
||||
if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
|
||||
/* Only ring0 supports writeback. On other rings fall back
|
||||
* to register-based code with overflow checking below.
|
||||
* ih_soft ring doesn't have any backing hardware registers,
|
||||
* update wptr and return.
|
||||
*/
|
||||
wptr = le32_to_cpu(*ih->wptr_cpu);
|
||||
|
||||
@ -409,6 +411,9 @@ static void vega10_ih_set_rptr(struct amdgpu_device *adev,
|
||||
{
|
||||
struct amdgpu_ih_regs *ih_regs;
|
||||
|
||||
if (ih == &adev->irq.ih_soft)
|
||||
return;
|
||||
|
||||
if (ih->use_doorbell) {
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
*ih->rptr_cpu = ih->rptr;
|
||||
|
@ -385,9 +385,11 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
|
||||
u32 wptr, tmp;
|
||||
struct amdgpu_ih_regs *ih_regs;
|
||||
|
||||
if (ih == &adev->irq.ih) {
|
||||
if (ih == &adev->irq.ih || ih == &adev->irq.ih_soft) {
|
||||
/* Only ring0 supports writeback. On other rings fall back
|
||||
* to register-based code with overflow checking below.
|
||||
* ih_soft ring doesn't have any backing hardware registers,
|
||||
* update wptr and return.
|
||||
*/
|
||||
wptr = le32_to_cpu(*ih->wptr_cpu);
|
||||
|
||||
@ -461,6 +463,9 @@ static void vega20_ih_set_rptr(struct amdgpu_device *adev,
|
||||
{
|
||||
struct amdgpu_ih_regs *ih_regs;
|
||||
|
||||
if (ih == &adev->irq.ih_soft)
|
||||
return;
|
||||
|
||||
if (ih->use_doorbell) {
|
||||
/* XXX check if swapping is necessary on BE */
|
||||
*ih->rptr_cpu = ih->rptr;
|
||||
|
@ -874,7 +874,7 @@ static int kfd_ioctl_wait_events(struct file *filp, struct kfd_process *p,
|
||||
err = kfd_wait_on_events(p, args->num_events,
|
||||
(void __user *)args->events_ptr,
|
||||
(args->wait_for_all != 0),
|
||||
args->timeout, &args->wait_result);
|
||||
&args->timeout, &args->wait_result);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -102,13 +102,18 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
|
||||
|
||||
switch (sdma_version) {
|
||||
case IP_VERSION(6, 0, 0):
|
||||
case IP_VERSION(6, 0, 1):
|
||||
case IP_VERSION(6, 0, 2):
|
||||
/* Reserve 1 for paging and 1 for gfx */
|
||||
kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
|
||||
/* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */
|
||||
kfd->device_info.reserved_sdma_queues_bitmap = 0xFULL;
|
||||
break;
|
||||
case IP_VERSION(6, 0, 1):
|
||||
/* Reserve 1 for paging and 1 for gfx */
|
||||
kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
|
||||
/* BIT(0)=engine-0 queue-0; BIT(1)=engine-0 queue-1; ... */
|
||||
kfd->device_info.reserved_sdma_queues_bitmap = 0x3ULL;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -894,7 +894,8 @@ static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
|
||||
return msecs_to_jiffies(user_timeout_ms) + 1;
|
||||
}
|
||||
|
||||
static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
|
||||
static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters,
|
||||
bool undo_auto_reset)
|
||||
{
|
||||
uint32_t i;
|
||||
|
||||
@ -903,6 +904,9 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
|
||||
spin_lock(&waiters[i].event->lock);
|
||||
remove_wait_queue(&waiters[i].event->wq,
|
||||
&waiters[i].wait);
|
||||
if (undo_auto_reset && waiters[i].activated &&
|
||||
waiters[i].event && waiters[i].event->auto_reset)
|
||||
set_event(waiters[i].event);
|
||||
spin_unlock(&waiters[i].event->lock);
|
||||
}
|
||||
|
||||
@ -911,7 +915,7 @@ static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
|
||||
|
||||
int kfd_wait_on_events(struct kfd_process *p,
|
||||
uint32_t num_events, void __user *data,
|
||||
bool all, uint32_t user_timeout_ms,
|
||||
bool all, uint32_t *user_timeout_ms,
|
||||
uint32_t *wait_result)
|
||||
{
|
||||
struct kfd_event_data __user *events =
|
||||
@ -920,7 +924,7 @@ int kfd_wait_on_events(struct kfd_process *p,
|
||||
int ret = 0;
|
||||
|
||||
struct kfd_event_waiter *event_waiters = NULL;
|
||||
long timeout = user_timeout_to_jiffies(user_timeout_ms);
|
||||
long timeout = user_timeout_to_jiffies(*user_timeout_ms);
|
||||
|
||||
event_waiters = alloc_event_waiters(num_events);
|
||||
if (!event_waiters) {
|
||||
@ -970,15 +974,11 @@ int kfd_wait_on_events(struct kfd_process *p,
|
||||
}
|
||||
|
||||
if (signal_pending(current)) {
|
||||
/*
|
||||
* This is wrong when a nonzero, non-infinite timeout
|
||||
* is specified. We need to use
|
||||
* ERESTARTSYS_RESTARTBLOCK, but struct restart_block
|
||||
* contains a union with data for each user and it's
|
||||
* in generic kernel code that I don't want to
|
||||
* touch yet.
|
||||
*/
|
||||
ret = -ERESTARTSYS;
|
||||
if (*user_timeout_ms != KFD_EVENT_TIMEOUT_IMMEDIATE &&
|
||||
*user_timeout_ms != KFD_EVENT_TIMEOUT_INFINITE)
|
||||
*user_timeout_ms = jiffies_to_msecs(
|
||||
max(0l, timeout-1));
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1019,7 +1019,7 @@ int kfd_wait_on_events(struct kfd_process *p,
|
||||
event_waiters, events);
|
||||
|
||||
out_unlock:
|
||||
free_waiters(num_events, event_waiters);
|
||||
free_waiters(num_events, event_waiters, ret == -ERESTARTSYS);
|
||||
mutex_unlock(&p->event_mutex);
|
||||
out:
|
||||
if (ret)
|
||||
|
@ -1317,7 +1317,7 @@ void kfd_event_free_process(struct kfd_process *p);
|
||||
int kfd_event_mmap(struct kfd_process *process, struct vm_area_struct *vma);
|
||||
int kfd_wait_on_events(struct kfd_process *p,
|
||||
uint32_t num_events, void __user *data,
|
||||
bool all, uint32_t user_timeout_ms,
|
||||
bool all, uint32_t *user_timeout_ms,
|
||||
uint32_t *wait_result);
|
||||
void kfd_signal_event_interrupt(u32 pasid, uint32_t partial_id,
|
||||
uint32_t valid_id_bits);
|
||||
|
@ -541,7 +541,6 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
|
||||
kfree(svm_bo);
|
||||
return -ESRCH;
|
||||
}
|
||||
svm_bo->svms = prange->svms;
|
||||
svm_bo->eviction_fence =
|
||||
amdgpu_amdkfd_fence_create(dma_fence_context_alloc(1),
|
||||
mm,
|
||||
@ -3273,7 +3272,6 @@ int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
|
||||
static void svm_range_evict_svm_bo_worker(struct work_struct *work)
|
||||
{
|
||||
struct svm_range_bo *svm_bo;
|
||||
struct kfd_process *p;
|
||||
struct mm_struct *mm;
|
||||
int r = 0;
|
||||
|
||||
@ -3281,13 +3279,12 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
|
||||
if (!svm_bo_ref_unless_zero(svm_bo))
|
||||
return; /* svm_bo was freed while eviction was pending */
|
||||
|
||||
/* svm_range_bo_release destroys this worker thread. So during
|
||||
* the lifetime of this thread, kfd_process and mm will be valid.
|
||||
*/
|
||||
p = container_of(svm_bo->svms, struct kfd_process, svms);
|
||||
mm = p->mm;
|
||||
if (!mm)
|
||||
if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
|
||||
mm = svm_bo->eviction_fence->mm;
|
||||
} else {
|
||||
svm_range_bo_unref(svm_bo);
|
||||
return;
|
||||
}
|
||||
|
||||
mmap_read_lock(mm);
|
||||
spin_lock(&svm_bo->list_lock);
|
||||
@ -3305,8 +3302,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
|
||||
|
||||
mutex_lock(&prange->migrate_mutex);
|
||||
do {
|
||||
r = svm_migrate_vram_to_ram(prange,
|
||||
svm_bo->eviction_fence->mm,
|
||||
r = svm_migrate_vram_to_ram(prange, mm,
|
||||
KFD_MIGRATE_TRIGGER_TTM_EVICTION);
|
||||
} while (!r && prange->actual_loc && --retries);
|
||||
|
||||
@ -3324,6 +3320,7 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
|
||||
}
|
||||
spin_unlock(&svm_bo->list_lock);
|
||||
mmap_read_unlock(mm);
|
||||
mmput(mm);
|
||||
|
||||
dma_fence_signal(&svm_bo->eviction_fence->base);
|
||||
|
||||
|
@ -46,7 +46,6 @@ struct svm_range_bo {
|
||||
spinlock_t list_lock;
|
||||
struct amdgpu_amdkfd_fence *eviction_fence;
|
||||
struct work_struct eviction_work;
|
||||
struct svm_range_list *svms;
|
||||
uint32_t evicting;
|
||||
struct work_struct release_work;
|
||||
};
|
||||
|
@ -1392,8 +1392,8 @@ static int kfd_build_p2p_node_entry(struct kfd_topology_device *dev,
|
||||
|
||||
static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int gpu_node)
|
||||
{
|
||||
struct kfd_iolink_properties *gpu_link, *tmp_link, *cpu_link;
|
||||
struct kfd_iolink_properties *props = NULL, *props2 = NULL;
|
||||
struct kfd_iolink_properties *gpu_link, *cpu_link;
|
||||
struct kfd_topology_device *cpu_dev;
|
||||
int ret = 0;
|
||||
int i, num_cpu;
|
||||
@ -1416,16 +1416,19 @@ static int kfd_create_indirect_link_prop(struct kfd_topology_device *kdev, int g
|
||||
continue;
|
||||
|
||||
/* find CPU <--> CPU links */
|
||||
cpu_link = NULL;
|
||||
cpu_dev = kfd_topology_device_by_proximity_domain(i);
|
||||
if (cpu_dev) {
|
||||
list_for_each_entry(cpu_link,
|
||||
list_for_each_entry(tmp_link,
|
||||
&cpu_dev->io_link_props, list) {
|
||||
if (cpu_link->node_to == gpu_link->node_to)
|
||||
if (tmp_link->node_to == gpu_link->node_to) {
|
||||
cpu_link = tmp_link;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (cpu_link->node_to != gpu_link->node_to)
|
||||
if (!cpu_link)
|
||||
return -ENOMEM;
|
||||
|
||||
/* CPU <--> CPU <--> GPU, GPU node*/
|
||||
|
@ -3825,8 +3825,11 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
|
||||
adev_to_drm(adev)->mode_config.max_height = 16384;
|
||||
|
||||
adev_to_drm(adev)->mode_config.preferred_depth = 24;
|
||||
if (adev->asic_type == CHIP_HAWAII)
|
||||
/* disable prefer shadow for now due to hibernation issues */
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 0;
|
||||
else
|
||||
adev_to_drm(adev)->mode_config.prefer_shadow = 1;
|
||||
/* indicates support for immediate flip */
|
||||
adev_to_drm(adev)->mode_config.async_page_flip = true;
|
||||
|
||||
@ -4135,6 +4138,7 @@ static void register_backlight_device(struct amdgpu_display_manager *dm,
|
||||
}
|
||||
}
|
||||
|
||||
static void amdgpu_set_panel_orientation(struct drm_connector *connector);
|
||||
|
||||
/*
|
||||
* In this architecture, the association
|
||||
@ -4326,6 +4330,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
||||
adev_to_drm(adev)->vblank_disable_immediate = false;
|
||||
}
|
||||
}
|
||||
amdgpu_set_panel_orientation(&aconnector->base);
|
||||
}
|
||||
|
||||
/* Software is initialized. Now we can register interrupt handlers. */
|
||||
@ -6684,6 +6689,10 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector)
|
||||
connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
|
||||
return;
|
||||
|
||||
mutex_lock(&connector->dev->mode_config.mutex);
|
||||
amdgpu_dm_connector_get_modes(connector);
|
||||
mutex_unlock(&connector->dev->mode_config.mutex);
|
||||
|
||||
encoder = amdgpu_dm_connector_to_encoder(connector);
|
||||
if (!encoder)
|
||||
return;
|
||||
@ -6728,8 +6737,6 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
|
||||
* restored here.
|
||||
*/
|
||||
amdgpu_dm_update_freesync_caps(connector, edid);
|
||||
|
||||
amdgpu_set_panel_orientation(connector);
|
||||
} else {
|
||||
amdgpu_dm_connector->num_modes = 0;
|
||||
}
|
||||
|
@ -660,7 +660,7 @@ static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_ty
|
||||
add_gfx10_1_modifiers(adev, mods, &size, &capacity);
|
||||
break;
|
||||
case AMDGPU_FAMILY_GC_11_0_0:
|
||||
case AMDGPU_FAMILY_GC_11_0_2:
|
||||
case AMDGPU_FAMILY_GC_11_0_1:
|
||||
add_gfx11_modifiers(adev, mods, &size, &capacity);
|
||||
break;
|
||||
}
|
||||
@ -1412,7 +1412,7 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane,
|
||||
}
|
||||
break;
|
||||
case AMDGPU_FAMILY_GC_11_0_0:
|
||||
case AMDGPU_FAMILY_GC_11_0_2:
|
||||
case AMDGPU_FAMILY_GC_11_0_1:
|
||||
switch (AMD_FMT_MOD_GET(TILE, modifier)) {
|
||||
case AMD_FMT_MOD_TILE_GFX11_256K_R_X:
|
||||
case AMD_FMT_MOD_TILE_GFX9_64K_R_X:
|
||||
|
@ -100,3 +100,24 @@ void convert_float_matrix(
|
||||
matrix[i] = (uint16_t)reg_value;
|
||||
}
|
||||
}
|
||||
|
||||
static uint32_t find_gcd(uint32_t a, uint32_t b)
|
||||
{
|
||||
uint32_t remainder = 0;
|
||||
while (b != 0) {
|
||||
remainder = a % b;
|
||||
a = b;
|
||||
b = remainder;
|
||||
}
|
||||
return a;
|
||||
}
|
||||
|
||||
void reduce_fraction(uint32_t num, uint32_t den,
|
||||
uint32_t *out_num, uint32_t *out_den)
|
||||
{
|
||||
uint32_t gcd = 0;
|
||||
|
||||
gcd = find_gcd(num, den);
|
||||
*out_num = num / gcd;
|
||||
*out_den = den / gcd;
|
||||
}
|
||||
|
@ -38,6 +38,9 @@ void convert_float_matrix(
|
||||
struct fixed31_32 *flt,
|
||||
uint32_t buffer_size);
|
||||
|
||||
void reduce_fraction(uint32_t num, uint32_t den,
|
||||
uint32_t *out_num, uint32_t *out_den);
|
||||
|
||||
static inline unsigned int log_2(unsigned int num)
|
||||
{
|
||||
return ilog2(num);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user