mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR. Conflicts: drivers/net/ethernet/faraday/ftgmac100.c4186c8d9e6
("net: ftgmac100: Ensure tx descriptor updates are visible")e24a6c8746
("net: ftgmac100: Get link speed and duplex for NC-SI") https://lore.kernel.org/0b851ec5-f91d-4dd3-99da-e81b98c9ed28@kernel.org net/ipv4/tcp.cbac76cf898
("tcp: fix forever orphan socket caused by tcp_abort")edefba66d9
("tcp: rstreason: introduce SK_RST_REASON_TCP_STATE for active reset") https://lore.kernel.org/20240828112207.5c199d41@canb.auug.org.au No adjacent changes. Link: https://patch.msgid.link/20240829130829.39148-1-pabeni@redhat.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
3cbd2090d3
1
.mailmap
1
.mailmap
@ -614,6 +614,7 @@ Simon Kelley <simon@thekelleys.org.uk>
|
||||
Sricharan Ramabadhran <quic_srichara@quicinc.com> <sricharan@codeaurora.org>
|
||||
Srinivas Ramana <quic_sramana@quicinc.com> <sramana@codeaurora.org>
|
||||
Sriram R <quic_srirrama@quicinc.com> <srirrama@codeaurora.org>
|
||||
Sriram Yagnaraman <sriram.yagnaraman@ericsson.com> <sriram.yagnaraman@est.tech>
|
||||
Stanislav Fomichev <sdf@fomichev.me> <sdf@google.com>
|
||||
Stefan Wahren <wahrenst@gmx.net> <stefan.wahren@i2se.com>
|
||||
Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
|
||||
|
@ -260,7 +260,7 @@ Some users depend on strict execution ordering where only one work item
|
||||
is in flight at any given time and the work items are processed in
|
||||
queueing order. While the combination of ``@max_active`` of 1 and
|
||||
``WQ_UNBOUND`` used to achieve this behavior, this is no longer the
|
||||
case. Use ``alloc_ordered_queue()`` instead.
|
||||
case. Use alloc_ordered_workqueue() instead.
|
||||
|
||||
|
||||
Example Execution Scenarios
|
||||
|
@ -42,6 +42,7 @@ properties:
|
||||
- focaltech,ft5426
|
||||
- focaltech,ft5452
|
||||
- focaltech,ft6236
|
||||
- focaltech,ft8201
|
||||
- focaltech,ft8719
|
||||
|
||||
reg:
|
||||
|
@ -629,18 +629,6 @@ The preferred style for long (multi-line) comments is:
|
||||
* with beginning and ending almost-blank lines.
|
||||
*/
|
||||
|
||||
For files in net/ and drivers/net/ the preferred style for long (multi-line)
|
||||
comments is a little different.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
/* The preferred comment style for files in net/ and drivers/net
|
||||
* looks like this.
|
||||
*
|
||||
* It is nearly the same as the generally preferred comment style,
|
||||
* but there is no initial almost-blank line.
|
||||
*/
|
||||
|
||||
It's also important to comment data, whether they are basic types or derived
|
||||
types. To this end, use just one data declaration per line (no commas for
|
||||
multiple data declarations). This leaves you room for a small comment on each
|
||||
|
@ -355,23 +355,6 @@ just do it. As a result, a sequence of smaller series gets merged quicker and
|
||||
with better review coverage. Re-posting large series also increases the mailing
|
||||
list traffic.
|
||||
|
||||
Multi-line comments
|
||||
~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Comment style convention is slightly different for networking and most of
|
||||
the tree. Instead of this::
|
||||
|
||||
/*
|
||||
* foobar blah blah blah
|
||||
* another line of text
|
||||
*/
|
||||
|
||||
it is requested that you make it look like this::
|
||||
|
||||
/* foobar blah blah blah
|
||||
* another line of text
|
||||
*/
|
||||
|
||||
Local variable ordering ("reverse xmas tree", "RCS")
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
@ -10176,7 +10176,7 @@ F: Documentation/devicetree/bindings/infiniband/hisilicon-hns-roce.txt
|
||||
F: drivers/infiniband/hw/hns/
|
||||
|
||||
HISILICON SAS Controller
|
||||
M: Xiang Chen <chenxiang66@hisilicon.com>
|
||||
M: Yihang Li <liyihang9@huawei.com>
|
||||
S: Supported
|
||||
W: http://www.hisilicon.com
|
||||
F: Documentation/devicetree/bindings/scsi/hisilicon-sas.txt
|
||||
@ -12168,7 +12168,7 @@ KERNEL NFSD, SUNRPC, AND LOCKD SERVERS
|
||||
M: Chuck Lever <chuck.lever@oracle.com>
|
||||
M: Jeff Layton <jlayton@kernel.org>
|
||||
R: Neil Brown <neilb@suse.de>
|
||||
R: Olga Kornievskaia <kolga@netapp.com>
|
||||
R: Olga Kornievskaia <okorniev@redhat.com>
|
||||
R: Dai Ngo <Dai.Ngo@oracle.com>
|
||||
R: Tom Talpey <tom@talpey.com>
|
||||
L: linux-nfs@vger.kernel.org
|
||||
@ -18547,7 +18547,6 @@ F: drivers/crypto/intel/qat/
|
||||
|
||||
QCOM AUDIO (ASoC) DRIVERS
|
||||
M: Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
|
||||
M: Banajit Goswami <bgoswami@quicinc.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-arm-msm@vger.kernel.org
|
||||
S: Supported
|
||||
@ -20377,6 +20376,7 @@ F: Documentation/devicetree/bindings/scsi/
|
||||
F: drivers/scsi/
|
||||
F: drivers/ufs/
|
||||
F: include/scsi/
|
||||
F: include/uapi/scsi/
|
||||
|
||||
SCSI TAPE DRIVER
|
||||
M: Kai Mäkisara <Kai.Makisara@kolumbus.fi>
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 11
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Baby Opossum Posse
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -1540,8 +1540,15 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
||||
vma_pagesize = min(vma_pagesize, (long)max_map_size);
|
||||
}
|
||||
|
||||
if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
|
||||
/*
|
||||
* Both the canonical IPA and fault IPA must be hugepage-aligned to
|
||||
* ensure we find the right PFN and lay down the mapping in the right
|
||||
* place.
|
||||
*/
|
||||
if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) {
|
||||
fault_ipa &= ~(vma_pagesize - 1);
|
||||
ipa &= ~(vma_pagesize - 1);
|
||||
}
|
||||
|
||||
gfn = ipa >> PAGE_SHIFT;
|
||||
mte_allowed = kvm_vma_mte_allowed(vma);
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include <trace/events/kvm.h>
|
||||
|
||||
#include "sys_regs.h"
|
||||
#include "vgic/vgic.h"
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
@ -435,6 +436,11 @@ static bool access_gic_sgi(struct kvm_vcpu *vcpu,
|
||||
{
|
||||
bool g1;
|
||||
|
||||
if (!kvm_has_gicv3(vcpu->kvm)) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!p->is_write)
|
||||
return read_from_write_only(vcpu, p, r);
|
||||
|
||||
|
@ -85,7 +85,7 @@ static void iter_unmark_lpis(struct kvm *kvm)
|
||||
struct vgic_irq *irq;
|
||||
unsigned long intid;
|
||||
|
||||
xa_for_each(&dist->lpi_xa, intid, irq) {
|
||||
xa_for_each_marked(&dist->lpi_xa, intid, irq, LPI_XA_MARK_DEBUG_ITER) {
|
||||
xa_clear_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER);
|
||||
vgic_put_irq(kvm, irq);
|
||||
}
|
||||
|
@ -417,10 +417,8 @@ static void __kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
kfree(vgic_cpu->private_irqs);
|
||||
vgic_cpu->private_irqs = NULL;
|
||||
|
||||
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
|
||||
vgic_unregister_redist_iodev(vcpu);
|
||||
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
|
||||
vgic_cpu->rd_iodev.base_addr = VGIC_ADDR_UNDEF;
|
||||
}
|
||||
}
|
||||
|
||||
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
|
||||
@ -448,6 +446,11 @@ void kvm_vgic_destroy(struct kvm *kvm)
|
||||
kvm_vgic_dist_destroy(kvm);
|
||||
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
|
||||
if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
|
||||
kvm_for_each_vcpu(i, vcpu, kvm)
|
||||
vgic_unregister_redist_iodev(vcpu);
|
||||
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
}
|
||||
|
||||
|
@ -36,6 +36,11 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = {
|
||||
* we have to disable IRQs before taking this lock and everything lower
|
||||
* than it.
|
||||
*
|
||||
* The config_lock has additional ordering requirements:
|
||||
* kvm->slots_lock
|
||||
* kvm->srcu
|
||||
* kvm->arch.config_lock
|
||||
*
|
||||
* If you need to take multiple locks, always take the upper lock first,
|
||||
* then the lower ones, e.g. first take the its_lock, then the irq_lock.
|
||||
* If you are already holding a lock and need to take a higher one, you
|
||||
|
@ -346,4 +346,11 @@ void vgic_v4_configure_vsgis(struct kvm *kvm);
|
||||
void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val);
|
||||
int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq);
|
||||
|
||||
static inline bool kvm_has_gicv3(struct kvm *kvm)
|
||||
{
|
||||
return (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) &&
|
||||
irqchip_in_kernel(kvm) &&
|
||||
kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -1,11 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2020-2022 Loongson Technology Corporation Limited
|
||||
*/
|
||||
#ifndef _LOONGARCH_DMA_DIRECT_H
|
||||
#define _LOONGARCH_DMA_DIRECT_H
|
||||
|
||||
dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
|
||||
phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr);
|
||||
|
||||
#endif /* _LOONGARCH_DMA_DIRECT_H */
|
@ -9,6 +9,8 @@
|
||||
|
||||
extern atomic_t irq_err_count;
|
||||
|
||||
#define ARCH_IRQ_INIT_FLAGS IRQ_NOPROBE
|
||||
|
||||
/*
|
||||
* interrupt-retrigger: NOP for now. This may not be appropriate for all
|
||||
* machines, we'll see ...
|
||||
|
@ -76,7 +76,6 @@ static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
|
||||
#endif
|
||||
|
||||
void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz);
|
||||
void kvm_reset_timer(struct kvm_vcpu *vcpu);
|
||||
void kvm_save_timer(struct kvm_vcpu *vcpu);
|
||||
void kvm_restore_timer(struct kvm_vcpu *vcpu);
|
||||
|
||||
|
@ -530,6 +530,10 @@ SYM_FUNC_END(_restore_lasx_context)
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_LBT
|
||||
STACK_FRAME_NON_STANDARD _restore_fp
|
||||
#ifdef CONFIG_CPU_HAS_LSX
|
||||
STACK_FRAME_NON_STANDARD _restore_lsx
|
||||
#endif
|
||||
#ifdef CONFIG_CPU_HAS_LASX
|
||||
STACK_FRAME_NON_STANDARD _restore_lasx
|
||||
#endif
|
||||
#endif
|
||||
|
@ -102,9 +102,6 @@ void __init init_IRQ(void)
|
||||
mp_ops.init_ipi();
|
||||
#endif
|
||||
|
||||
for (i = 0; i < NR_IRQS; i++)
|
||||
irq_set_noprobe(i);
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, order);
|
||||
|
||||
|
@ -277,6 +277,10 @@ SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest)
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_LBT
|
||||
STACK_FRAME_NON_STANDARD kvm_restore_fpu
|
||||
#ifdef CONFIG_CPU_HAS_LSX
|
||||
STACK_FRAME_NON_STANDARD kvm_restore_lsx
|
||||
#endif
|
||||
#ifdef CONFIG_CPU_HAS_LASX
|
||||
STACK_FRAME_NON_STANDARD kvm_restore_lasx
|
||||
#endif
|
||||
#endif
|
||||
|
@ -188,10 +188,3 @@ void kvm_save_timer(struct kvm_vcpu *vcpu)
|
||||
kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
void kvm_reset_timer(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
write_gcsr_timercfg(0);
|
||||
kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG, 0);
|
||||
hrtimer_cancel(&vcpu->arch.swtimer);
|
||||
}
|
||||
|
@ -647,7 +647,7 @@ static int kvm_set_one_reg(struct kvm_vcpu *vcpu,
|
||||
vcpu->kvm->arch.time_offset = (signed long)(v - drdtime());
|
||||
break;
|
||||
case KVM_REG_LOONGARCH_VCPU_RESET:
|
||||
kvm_reset_timer(vcpu);
|
||||
vcpu->arch.st.guest_addr = 0;
|
||||
memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending));
|
||||
memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear));
|
||||
break;
|
||||
|
@ -303,13 +303,6 @@ int r4k_clockevent_init(void)
|
||||
if (!c0_compare_int_usable())
|
||||
return -ENXIO;
|
||||
|
||||
/*
|
||||
* With vectored interrupts things are getting platform specific.
|
||||
* get_c0_compare_int is a hook to allow a platform to return the
|
||||
* interrupt number of its liking.
|
||||
*/
|
||||
irq = get_c0_compare_int();
|
||||
|
||||
cd = &per_cpu(mips_clockevent_device, cpu);
|
||||
|
||||
cd->name = "MIPS";
|
||||
@ -320,7 +313,6 @@ int r4k_clockevent_init(void)
|
||||
min_delta = calculate_min_delta();
|
||||
|
||||
cd->rating = 300;
|
||||
cd->irq = irq;
|
||||
cd->cpumask = cpumask_of(cpu);
|
||||
cd->set_next_event = mips_next_event;
|
||||
cd->event_handler = mips_event_handler;
|
||||
@ -332,6 +324,13 @@ int r4k_clockevent_init(void)
|
||||
|
||||
cp0_timer_irq_installed = 1;
|
||||
|
||||
/*
|
||||
* With vectored interrupts things are getting platform specific.
|
||||
* get_c0_compare_int is a hook to allow a platform to return the
|
||||
* interrupt number of its liking.
|
||||
*/
|
||||
irq = get_c0_compare_int();
|
||||
|
||||
if (request_irq(irq, c0_compare_interrupt, flags, "timer",
|
||||
c0_compare_interrupt))
|
||||
pr_err("Failed to request irq %d (timer)\n", irq);
|
||||
|
@ -1724,12 +1724,16 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
|
||||
c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
|
||||
MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
|
||||
c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */
|
||||
change_c0_config6(LOONGSON_CONF6_EXTIMER | LOONGSON_CONF6_INTIMER,
|
||||
LOONGSON_CONF6_INTIMER);
|
||||
break;
|
||||
case PRID_IMP_LOONGSON_64G:
|
||||
__cpu_name[cpu] = "ICT Loongson-3";
|
||||
set_elf_platform(cpu, "loongson3a");
|
||||
set_isa(c, MIPS_CPU_ISA_M64R2);
|
||||
decode_cpucfg(c);
|
||||
change_c0_config6(LOONGSON_CONF6_EXTIMER | LOONGSON_CONF6_INTIMER,
|
||||
LOONGSON_CONF6_INTIMER);
|
||||
break;
|
||||
default:
|
||||
panic("Unknown Loongson Processor ID!");
|
||||
|
@ -604,6 +604,19 @@ config RANDOMIZE_BASE
|
||||
as a security feature that deters exploit attempts relying on
|
||||
knowledge of the location of kernel internals.
|
||||
|
||||
config RANDOMIZE_IDENTITY_BASE
|
||||
bool "Randomize the address of the identity mapping base"
|
||||
depends on RANDOMIZE_BASE
|
||||
default DEBUG_VM
|
||||
help
|
||||
The identity mapping base address is pinned to zero by default.
|
||||
Allow randomization of that base to expose otherwise missed
|
||||
notion of physical and virtual addresses of data structures.
|
||||
That does not have any impact on the base address at which the
|
||||
kernel image is loaded.
|
||||
|
||||
If unsure, say N
|
||||
|
||||
config KERNEL_IMAGE_BASE
|
||||
hex "Kernel image base address"
|
||||
range 0x100000 0x1FFFFFE0000000 if !KASAN
|
||||
|
@ -162,7 +162,7 @@ static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr,
|
||||
loc = (long)*reloc + phys_offset;
|
||||
if (loc < min_addr || loc > max_addr)
|
||||
error("64-bit relocation outside of kernel!\n");
|
||||
*(u64 *)loc += offset - __START_KERNEL;
|
||||
*(u64 *)loc += offset;
|
||||
}
|
||||
}
|
||||
|
||||
@ -177,7 +177,7 @@ static void kaslr_adjust_got(unsigned long offset)
|
||||
*/
|
||||
for (entry = (u64 *)vmlinux.got_start; entry < (u64 *)vmlinux.got_end; entry++) {
|
||||
if (*entry)
|
||||
*entry += offset - __START_KERNEL;
|
||||
*entry += offset;
|
||||
}
|
||||
}
|
||||
|
||||
@ -252,7 +252,7 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
|
||||
vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
|
||||
|
||||
/* choose kernel address space layout: 4 or 3 levels. */
|
||||
BUILD_BUG_ON(!IS_ALIGNED(__START_KERNEL, THREAD_SIZE));
|
||||
BUILD_BUG_ON(!IS_ALIGNED(TEXT_OFFSET, THREAD_SIZE));
|
||||
BUILD_BUG_ON(!IS_ALIGNED(__NO_KASLR_START_KERNEL, THREAD_SIZE));
|
||||
BUILD_BUG_ON(__NO_KASLR_END_KERNEL > _REGION1_SIZE);
|
||||
vsize = get_vmem_size(ident_map_size, vmemmap_size, vmalloc_size, _REGION3_SIZE);
|
||||
@ -341,7 +341,8 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
|
||||
BUILD_BUG_ON(MAX_DCSS_ADDR > (1UL << MAX_PHYSMEM_BITS));
|
||||
max_mappable = max(ident_map_size, MAX_DCSS_ADDR);
|
||||
max_mappable = min(max_mappable, vmemmap_start);
|
||||
__identity_base = round_down(vmemmap_start - max_mappable, rte_size);
|
||||
if (IS_ENABLED(CONFIG_RANDOMIZE_IDENTITY_BASE))
|
||||
__identity_base = round_down(vmemmap_start - max_mappable, rte_size);
|
||||
|
||||
return asce_limit;
|
||||
}
|
||||
@ -388,31 +389,25 @@ static void kaslr_adjust_vmlinux_info(long offset)
|
||||
#endif
|
||||
}
|
||||
|
||||
static void fixup_vmlinux_info(void)
|
||||
{
|
||||
vmlinux.entry -= __START_KERNEL;
|
||||
kaslr_adjust_vmlinux_info(-__START_KERNEL);
|
||||
}
|
||||
|
||||
void startup_kernel(void)
|
||||
{
|
||||
unsigned long kernel_size = vmlinux.image_size + vmlinux.bss_size;
|
||||
unsigned long nokaslr_offset_phys, kaslr_large_page_offset;
|
||||
unsigned long amode31_lma = 0;
|
||||
unsigned long vmlinux_size = vmlinux.image_size + vmlinux.bss_size;
|
||||
unsigned long nokaslr_text_lma, text_lma = 0, amode31_lma = 0;
|
||||
unsigned long kernel_size = TEXT_OFFSET + vmlinux_size;
|
||||
unsigned long kaslr_large_page_offset;
|
||||
unsigned long max_physmem_end;
|
||||
unsigned long asce_limit;
|
||||
unsigned long safe_addr;
|
||||
psw_t psw;
|
||||
|
||||
fixup_vmlinux_info();
|
||||
setup_lpp();
|
||||
|
||||
/*
|
||||
* Non-randomized kernel physical start address must be _SEGMENT_SIZE
|
||||
* aligned (see blow).
|
||||
*/
|
||||
nokaslr_offset_phys = ALIGN(mem_safe_offset(), _SEGMENT_SIZE);
|
||||
safe_addr = PAGE_ALIGN(nokaslr_offset_phys + kernel_size);
|
||||
nokaslr_text_lma = ALIGN(mem_safe_offset(), _SEGMENT_SIZE);
|
||||
safe_addr = PAGE_ALIGN(nokaslr_text_lma + vmlinux_size);
|
||||
|
||||
/*
|
||||
* Reserve decompressor memory together with decompression heap,
|
||||
@ -456,16 +451,27 @@ void startup_kernel(void)
|
||||
*/
|
||||
kaslr_large_page_offset = __kaslr_offset & ~_SEGMENT_MASK;
|
||||
if (kaslr_enabled()) {
|
||||
unsigned long end = ident_map_size - kaslr_large_page_offset;
|
||||
unsigned long size = vmlinux_size + kaslr_large_page_offset;
|
||||
|
||||
__kaslr_offset_phys = randomize_within_range(kernel_size, _SEGMENT_SIZE, 0, end);
|
||||
text_lma = randomize_within_range(size, _SEGMENT_SIZE, TEXT_OFFSET, ident_map_size);
|
||||
}
|
||||
if (!__kaslr_offset_phys)
|
||||
__kaslr_offset_phys = nokaslr_offset_phys;
|
||||
__kaslr_offset_phys |= kaslr_large_page_offset;
|
||||
if (!text_lma)
|
||||
text_lma = nokaslr_text_lma;
|
||||
text_lma |= kaslr_large_page_offset;
|
||||
|
||||
/*
|
||||
* [__kaslr_offset_phys..__kaslr_offset_phys + TEXT_OFFSET] region is
|
||||
* never accessed via the kernel image mapping as per the linker script:
|
||||
*
|
||||
* . = TEXT_OFFSET;
|
||||
*
|
||||
* Therefore, this region could be used for something else and does
|
||||
* not need to be reserved. See how it is skipped in setup_vmem().
|
||||
*/
|
||||
__kaslr_offset_phys = text_lma - TEXT_OFFSET;
|
||||
kaslr_adjust_vmlinux_info(__kaslr_offset_phys);
|
||||
physmem_reserve(RR_VMLINUX, __kaslr_offset_phys, kernel_size);
|
||||
deploy_kernel((void *)__kaslr_offset_phys);
|
||||
physmem_reserve(RR_VMLINUX, text_lma, vmlinux_size);
|
||||
deploy_kernel((void *)text_lma);
|
||||
|
||||
/* vmlinux decompression is done, shrink reserved low memory */
|
||||
physmem_reserve(RR_DECOMPRESSOR, 0, (unsigned long)_decompressor_end);
|
||||
@ -488,7 +494,7 @@ void startup_kernel(void)
|
||||
amode31_lma = randomize_within_range(vmlinux.amode31_size, PAGE_SIZE, amode31_min, SZ_2G);
|
||||
}
|
||||
if (!amode31_lma)
|
||||
amode31_lma = __kaslr_offset_phys - vmlinux.amode31_size;
|
||||
amode31_lma = text_lma - vmlinux.amode31_size;
|
||||
physmem_reserve(RR_AMODE31, amode31_lma, vmlinux.amode31_size);
|
||||
|
||||
/*
|
||||
@ -504,8 +510,8 @@ void startup_kernel(void)
|
||||
* - copy_bootdata() must follow setup_vmem() to propagate changes
|
||||
* to bootdata made by setup_vmem()
|
||||
*/
|
||||
clear_bss_section(__kaslr_offset_phys);
|
||||
kaslr_adjust_relocs(__kaslr_offset_phys, __kaslr_offset_phys + vmlinux.image_size,
|
||||
clear_bss_section(text_lma);
|
||||
kaslr_adjust_relocs(text_lma, text_lma + vmlinux.image_size,
|
||||
__kaslr_offset, __kaslr_offset_phys);
|
||||
kaslr_adjust_got(__kaslr_offset);
|
||||
setup_vmem(__kaslr_offset, __kaslr_offset + kernel_size, asce_limit);
|
||||
|
@ -90,7 +90,7 @@ static void kasan_populate_shadow(unsigned long kernel_start, unsigned long kern
|
||||
}
|
||||
memgap_start = end;
|
||||
}
|
||||
kasan_populate(kernel_start, kernel_end, POPULATE_KASAN_MAP_SHADOW);
|
||||
kasan_populate(kernel_start + TEXT_OFFSET, kernel_end, POPULATE_KASAN_MAP_SHADOW);
|
||||
kasan_populate(0, (unsigned long)__identity_va(0), POPULATE_KASAN_ZERO_SHADOW);
|
||||
kasan_populate(AMODE31_START, AMODE31_END, POPULATE_KASAN_ZERO_SHADOW);
|
||||
if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
|
||||
@ -475,7 +475,17 @@ void setup_vmem(unsigned long kernel_start, unsigned long kernel_end, unsigned l
|
||||
(unsigned long)__identity_va(end),
|
||||
POPULATE_IDENTITY);
|
||||
}
|
||||
pgtable_populate(kernel_start, kernel_end, POPULATE_KERNEL);
|
||||
|
||||
/*
|
||||
* [kernel_start..kernel_start + TEXT_OFFSET] region is never
|
||||
* accessed as per the linker script:
|
||||
*
|
||||
* . = TEXT_OFFSET;
|
||||
*
|
||||
* Therefore, skip mapping TEXT_OFFSET bytes to prevent access to
|
||||
* [__kaslr_offset_phys..__kaslr_offset_phys + TEXT_OFFSET] region.
|
||||
*/
|
||||
pgtable_populate(kernel_start + TEXT_OFFSET, kernel_end, POPULATE_KERNEL);
|
||||
pgtable_populate(AMODE31_START, AMODE31_END, POPULATE_DIRECT);
|
||||
pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore),
|
||||
POPULATE_ABS_LOWCORE);
|
||||
|
@ -109,7 +109,12 @@ SECTIONS
|
||||
#ifdef CONFIG_KERNEL_UNCOMPRESSED
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
. += AMODE31_SIZE; /* .amode31 section */
|
||||
. = ALIGN(1 << 20); /* _SEGMENT_SIZE */
|
||||
|
||||
/*
|
||||
* Make sure the location counter is not less than TEXT_OFFSET.
|
||||
* _SEGMENT_SIZE is not available, use ALIGN(1 << 20) instead.
|
||||
*/
|
||||
. = MAX(TEXT_OFFSET, ALIGN(1 << 20));
|
||||
#else
|
||||
. = ALIGN(8);
|
||||
#endif
|
||||
|
@ -279,8 +279,9 @@ static inline unsigned long virt_to_pfn(const void *kaddr)
|
||||
#define AMODE31_SIZE (3 * PAGE_SIZE)
|
||||
|
||||
#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024)
|
||||
#define __START_KERNEL 0x100000
|
||||
#define __NO_KASLR_START_KERNEL CONFIG_KERNEL_IMAGE_BASE
|
||||
#define __NO_KASLR_END_KERNEL (__NO_KASLR_START_KERNEL + KERNEL_IMAGE_SIZE)
|
||||
|
||||
#define TEXT_OFFSET 0x100000
|
||||
|
||||
#endif /* _S390_PAGE_H */
|
||||
|
@ -734,7 +734,23 @@ static void __init memblock_add_physmem_info(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Reserve memory used for lowcore/command line/kernel image.
|
||||
* Reserve memory used for lowcore.
|
||||
*/
|
||||
static void __init reserve_lowcore(void)
|
||||
{
|
||||
void *lowcore_start = get_lowcore();
|
||||
void *lowcore_end = lowcore_start + sizeof(struct lowcore);
|
||||
void *start, *end;
|
||||
|
||||
if ((void *)__identity_base < lowcore_end) {
|
||||
start = max(lowcore_start, (void *)__identity_base);
|
||||
end = min(lowcore_end, (void *)(__identity_base + ident_map_size));
|
||||
memblock_reserve(__pa(start), __pa(end));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Reserve memory used for absolute lowcore/command line/kernel image.
|
||||
*/
|
||||
static void __init reserve_kernel(void)
|
||||
{
|
||||
@ -918,6 +934,7 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
/* Do some memory reservations *before* memory is added to memblock */
|
||||
reserve_pgtables();
|
||||
reserve_lowcore();
|
||||
reserve_kernel();
|
||||
reserve_initrd();
|
||||
reserve_certificate_list();
|
||||
|
@ -39,7 +39,7 @@ PHDRS {
|
||||
|
||||
SECTIONS
|
||||
{
|
||||
. = __START_KERNEL;
|
||||
. = TEXT_OFFSET;
|
||||
.text : {
|
||||
_stext = .; /* Start of text section */
|
||||
_text = .; /* Text and read-only data */
|
||||
|
@ -280,7 +280,7 @@ static int do_reloc(struct section *sec, Elf_Rel *rel)
|
||||
case R_390_GOTOFF64:
|
||||
break;
|
||||
case R_390_64:
|
||||
add_reloc(&relocs64, offset - ehdr.e_entry);
|
||||
add_reloc(&relocs64, offset);
|
||||
break;
|
||||
default:
|
||||
die("Unsupported relocation type: %d\n", r_type);
|
||||
|
@ -111,13 +111,20 @@ static sector_t bio_write_zeroes_limit(struct block_device *bdev)
|
||||
(UINT_MAX >> SECTOR_SHIFT) & ~bs_mask);
|
||||
}
|
||||
|
||||
/*
|
||||
* There is no reliable way for the SCSI subsystem to determine whether a
|
||||
* device supports a WRITE SAME operation without actually performing a write
|
||||
* to media. As a result, write_zeroes is enabled by default and will be
|
||||
* disabled if a zeroing operation subsequently fails. This means that this
|
||||
* queue limit is likely to change at runtime.
|
||||
*/
|
||||
static void __blkdev_issue_write_zeroes(struct block_device *bdev,
|
||||
sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
|
||||
struct bio **biop, unsigned flags)
|
||||
struct bio **biop, unsigned flags, sector_t limit)
|
||||
{
|
||||
|
||||
while (nr_sects) {
|
||||
unsigned int len = min_t(sector_t, nr_sects,
|
||||
bio_write_zeroes_limit(bdev));
|
||||
unsigned int len = min(nr_sects, limit);
|
||||
struct bio *bio;
|
||||
|
||||
if ((flags & BLKDEV_ZERO_KILLABLE) &&
|
||||
@ -141,12 +148,14 @@ static void __blkdev_issue_write_zeroes(struct block_device *bdev,
|
||||
static int blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector,
|
||||
sector_t nr_sects, gfp_t gfp, unsigned flags)
|
||||
{
|
||||
sector_t limit = bio_write_zeroes_limit(bdev);
|
||||
struct bio *bio = NULL;
|
||||
struct blk_plug plug;
|
||||
int ret = 0;
|
||||
|
||||
blk_start_plug(&plug);
|
||||
__blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp, &bio, flags);
|
||||
__blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp, &bio,
|
||||
flags, limit);
|
||||
if (bio) {
|
||||
if ((flags & BLKDEV_ZERO_KILLABLE) &&
|
||||
fatal_signal_pending(current)) {
|
||||
@ -165,7 +174,7 @@ static int blkdev_issue_write_zeroes(struct block_device *bdev, sector_t sector,
|
||||
* on an I/O error, in which case we'll turn any error into
|
||||
* "not supported" here.
|
||||
*/
|
||||
if (ret && !bdev_write_zeroes_sectors(bdev))
|
||||
if (ret && !limit)
|
||||
return -EOPNOTSUPP;
|
||||
return ret;
|
||||
}
|
||||
@ -265,12 +274,14 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
|
||||
sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
|
||||
unsigned flags)
|
||||
{
|
||||
sector_t limit = bio_write_zeroes_limit(bdev);
|
||||
|
||||
if (bdev_read_only(bdev))
|
||||
return -EPERM;
|
||||
|
||||
if (bdev_write_zeroes_sectors(bdev)) {
|
||||
if (limit) {
|
||||
__blkdev_issue_write_zeroes(bdev, sector, nr_sects,
|
||||
gfp_mask, biop, flags);
|
||||
gfp_mask, biop, flags, limit);
|
||||
} else {
|
||||
if (flags & BLKDEV_ZERO_NOFALLBACK)
|
||||
return -EOPNOTSUPP;
|
||||
|
@ -54,6 +54,8 @@ static void acpi_video_parse_cmdline(void)
|
||||
acpi_backlight_cmdline = acpi_backlight_nvidia_wmi_ec;
|
||||
if (!strcmp("apple_gmux", acpi_video_backlight_string))
|
||||
acpi_backlight_cmdline = acpi_backlight_apple_gmux;
|
||||
if (!strcmp("dell_uart", acpi_video_backlight_string))
|
||||
acpi_backlight_cmdline = acpi_backlight_dell_uart;
|
||||
if (!strcmp("none", acpi_video_backlight_string))
|
||||
acpi_backlight_cmdline = acpi_backlight_none;
|
||||
}
|
||||
@ -821,6 +823,21 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
},
|
||||
},
|
||||
|
||||
/*
|
||||
* Dell AIO (All in Ones) which advertise an UART attached backlight
|
||||
* controller board in their ACPI tables (and may even have one), but
|
||||
* which need native backlight control nevertheless.
|
||||
*/
|
||||
{
|
||||
/* https://bugzilla.redhat.com/show_bug.cgi?id=2303936 */
|
||||
.callback = video_detect_force_native,
|
||||
/* Dell OptiPlex 7760 AIO */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 7760 AIO"),
|
||||
},
|
||||
},
|
||||
|
||||
/*
|
||||
* Models which have nvidia-ec-wmi support, but should not use it.
|
||||
* Note this indicates a likely firmware bug on these models and should
|
||||
@ -918,6 +935,7 @@ enum acpi_backlight_type __acpi_video_get_backlight_type(bool native, bool *auto
|
||||
static DEFINE_MUTEX(init_mutex);
|
||||
static bool nvidia_wmi_ec_present;
|
||||
static bool apple_gmux_present;
|
||||
static bool dell_uart_present;
|
||||
static bool native_available;
|
||||
static bool init_done;
|
||||
static long video_caps;
|
||||
@ -932,6 +950,7 @@ enum acpi_backlight_type __acpi_video_get_backlight_type(bool native, bool *auto
|
||||
&video_caps, NULL);
|
||||
nvidia_wmi_ec_present = nvidia_wmi_ec_supported();
|
||||
apple_gmux_present = apple_gmux_detect(NULL, NULL);
|
||||
dell_uart_present = acpi_dev_present("DELL0501", NULL, -1);
|
||||
init_done = true;
|
||||
}
|
||||
if (native)
|
||||
@ -962,6 +981,9 @@ enum acpi_backlight_type __acpi_video_get_backlight_type(bool native, bool *auto
|
||||
if (apple_gmux_present)
|
||||
return acpi_backlight_apple_gmux;
|
||||
|
||||
if (dell_uart_present)
|
||||
return acpi_backlight_dell_uart;
|
||||
|
||||
/* Use ACPI video if available, except when native should be preferred. */
|
||||
if ((video_caps & ACPI_VIDEO_BACKLIGHT) &&
|
||||
!(native_available && prefer_native_over_acpi_video()))
|
||||
|
@ -208,6 +208,19 @@ static const char* macio_ata_names[] = {
|
||||
/* Don't let a DMA segment go all the way to 64K */
|
||||
#define MAX_DBDMA_SEG 0xff00
|
||||
|
||||
#ifdef CONFIG_PAGE_SIZE_64KB
|
||||
/*
|
||||
* The SCSI core requires the segment size to cover at least a page, so
|
||||
* for 64K page size kernels it must be at least 64K. However the
|
||||
* hardware can't handle 64K, so pata_macio_qc_prep() will split large
|
||||
* requests. To handle the split requests the tablesize must be halved.
|
||||
*/
|
||||
#define PATA_MACIO_MAX_SEGMENT_SIZE SZ_64K
|
||||
#define PATA_MACIO_SG_TABLESIZE (MAX_DCMDS / 2)
|
||||
#else
|
||||
#define PATA_MACIO_MAX_SEGMENT_SIZE MAX_DBDMA_SEG
|
||||
#define PATA_MACIO_SG_TABLESIZE MAX_DCMDS
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Wait 1s for disk to answer on IDE bus after a hard reset
|
||||
@ -541,7 +554,8 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
|
||||
|
||||
while (sg_len) {
|
||||
/* table overflow should never happen */
|
||||
BUG_ON (pi++ >= MAX_DCMDS);
|
||||
if (WARN_ON_ONCE(pi >= MAX_DCMDS))
|
||||
return AC_ERR_SYSTEM;
|
||||
|
||||
len = (sg_len < MAX_DBDMA_SEG) ? sg_len : MAX_DBDMA_SEG;
|
||||
table->command = cpu_to_le16(write ? OUTPUT_MORE: INPUT_MORE);
|
||||
@ -553,11 +567,13 @@ static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc)
|
||||
addr += len;
|
||||
sg_len -= len;
|
||||
++table;
|
||||
++pi;
|
||||
}
|
||||
}
|
||||
|
||||
/* Should never happen according to Tejun */
|
||||
BUG_ON(!pi);
|
||||
if (WARN_ON_ONCE(!pi))
|
||||
return AC_ERR_SYSTEM;
|
||||
|
||||
/* Convert the last command to an input/output */
|
||||
table--;
|
||||
@ -912,16 +928,10 @@ static int pata_macio_do_resume(struct pata_macio_priv *priv)
|
||||
|
||||
static const struct scsi_host_template pata_macio_sht = {
|
||||
__ATA_BASE_SHT(DRV_NAME),
|
||||
.sg_tablesize = MAX_DCMDS,
|
||||
.sg_tablesize = PATA_MACIO_SG_TABLESIZE,
|
||||
/* We may not need that strict one */
|
||||
.dma_boundary = ATA_DMA_BOUNDARY,
|
||||
/*
|
||||
* The SCSI core requires the segment size to cover at least a page, so
|
||||
* for 64K page size kernels this must be at least 64K. However the
|
||||
* hardware can't handle 64K, so pata_macio_qc_prep() will split large
|
||||
* requests.
|
||||
*/
|
||||
.max_segment_size = SZ_64K,
|
||||
.max_segment_size = PATA_MACIO_MAX_SEGMENT_SIZE,
|
||||
.device_configure = pata_macio_device_configure,
|
||||
.sdev_groups = ata_common_sdev_groups,
|
||||
.can_queue = ATA_DEF_QUEUE,
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/acpi.h>
|
||||
#include <acpi/acpi_bus.h>
|
||||
#include <asm/unaligned.h>
|
||||
#include <linux/efi.h>
|
||||
|
||||
#include <net/bluetooth/bluetooth.h>
|
||||
#include <net/bluetooth/hci_core.h>
|
||||
@ -26,6 +27,8 @@
|
||||
#define ECDSA_OFFSET 644
|
||||
#define ECDSA_HEADER_LEN 320
|
||||
|
||||
#define BTINTEL_EFI_DSBR L"UefiCnvCommonDSBR"
|
||||
|
||||
enum {
|
||||
DSM_SET_WDISABLE2_DELAY = 1,
|
||||
DSM_SET_RESET_METHOD = 3,
|
||||
@ -2616,6 +2619,120 @@ static u8 btintel_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
return hci_skb_pkt_type(skb);
|
||||
}
|
||||
|
||||
/*
|
||||
* UefiCnvCommonDSBR UEFI variable provides information from the OEM platforms
|
||||
* if they have replaced the BRI (Bluetooth Radio Interface) resistor to
|
||||
* overcome the potential STEP errors on their designs. Based on the
|
||||
* configauration, bluetooth firmware shall adjust the BRI response line drive
|
||||
* strength. The below structure represents DSBR data.
|
||||
* struct {
|
||||
* u8 header;
|
||||
* u32 dsbr;
|
||||
* } __packed;
|
||||
*
|
||||
* header - defines revision number of the structure
|
||||
* dsbr - defines drive strength BRI response
|
||||
* bit0
|
||||
* 0 - instructs bluetooth firmware to use default values
|
||||
* 1 - instructs bluetooth firmware to override default values
|
||||
* bit3:1
|
||||
* Reserved
|
||||
* bit7:4
|
||||
* DSBR override values (only if bit0 is set. Default value is 0xF
|
||||
* bit31:7
|
||||
* Reserved
|
||||
* Expected values for dsbr field:
|
||||
* 1. 0xF1 - indicates that the resistor on board is 33 Ohm
|
||||
* 2. 0x00 or 0xB1 - indicates that the resistor on board is 10 Ohm
|
||||
* 3. Non existing UEFI variable or invalid (none of the above) - indicates
|
||||
* that the resistor on board is 10 Ohm
|
||||
* Even if uefi variable is not present, driver shall send 0xfc0a command to
|
||||
* firmware to use default values.
|
||||
*
|
||||
*/
|
||||
static int btintel_uefi_get_dsbr(u32 *dsbr_var)
|
||||
{
|
||||
struct btintel_dsbr {
|
||||
u8 header;
|
||||
u32 dsbr;
|
||||
} __packed data;
|
||||
|
||||
efi_status_t status;
|
||||
unsigned long data_size = 0;
|
||||
efi_guid_t guid = EFI_GUID(0xe65d8884, 0xd4af, 0x4b20, 0x8d, 0x03,
|
||||
0x77, 0x2e, 0xcc, 0x3d, 0xa5, 0x31);
|
||||
|
||||
if (!IS_ENABLED(CONFIG_EFI))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
status = efi.get_variable(BTINTEL_EFI_DSBR, &guid, NULL, &data_size,
|
||||
NULL);
|
||||
|
||||
if (status != EFI_BUFFER_TOO_SMALL || !data_size)
|
||||
return -EIO;
|
||||
|
||||
status = efi.get_variable(BTINTEL_EFI_DSBR, &guid, NULL, &data_size,
|
||||
&data);
|
||||
|
||||
if (status != EFI_SUCCESS)
|
||||
return -ENXIO;
|
||||
|
||||
*dsbr_var = data.dsbr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int btintel_set_dsbr(struct hci_dev *hdev, struct intel_version_tlv *ver)
|
||||
{
|
||||
struct btintel_dsbr_cmd {
|
||||
u8 enable;
|
||||
u8 dsbr;
|
||||
} __packed;
|
||||
|
||||
struct btintel_dsbr_cmd cmd;
|
||||
struct sk_buff *skb;
|
||||
u8 status;
|
||||
u32 dsbr;
|
||||
bool apply_dsbr;
|
||||
int err;
|
||||
|
||||
/* DSBR command needs to be sent for BlazarI + B0 step product after
|
||||
* downloading IML image.
|
||||
*/
|
||||
apply_dsbr = (ver->img_type == BTINTEL_IMG_IML &&
|
||||
((ver->cnvi_top & 0xfff) == BTINTEL_CNVI_BLAZARI) &&
|
||||
INTEL_CNVX_TOP_STEP(ver->cnvi_top) == 0x01);
|
||||
|
||||
if (!apply_dsbr)
|
||||
return 0;
|
||||
|
||||
dsbr = 0;
|
||||
err = btintel_uefi_get_dsbr(&dsbr);
|
||||
if (err < 0)
|
||||
bt_dev_dbg(hdev, "Error reading efi: %ls (%d)",
|
||||
BTINTEL_EFI_DSBR, err);
|
||||
|
||||
cmd.enable = dsbr & BIT(0);
|
||||
cmd.dsbr = dsbr >> 4 & 0xF;
|
||||
|
||||
bt_dev_info(hdev, "dsbr: enable: 0x%2.2x value: 0x%2.2x", cmd.enable,
|
||||
cmd.dsbr);
|
||||
|
||||
skb = __hci_cmd_sync(hdev, 0xfc0a, sizeof(cmd), &cmd, HCI_CMD_TIMEOUT);
|
||||
if (IS_ERR(skb))
|
||||
return -bt_to_errno(PTR_ERR(skb));
|
||||
|
||||
status = skb->data[0];
|
||||
kfree_skb(skb);
|
||||
|
||||
if (status)
|
||||
return -bt_to_errno(status);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
|
||||
struct intel_version_tlv *ver)
|
||||
{
|
||||
@ -2650,6 +2767,13 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* set drive strength of BRI response */
|
||||
err = btintel_set_dsbr(hdev, ver);
|
||||
if (err) {
|
||||
bt_dev_err(hdev, "Failed to send dsbr command (%d)", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* If image type returned is BTINTEL_IMG_IML, then controller supports
|
||||
* intermediate loader image
|
||||
*/
|
||||
|
@ -449,6 +449,23 @@ static bool ps_wakeup(struct btnxpuart_dev *nxpdev)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void ps_cleanup(struct btnxpuart_dev *nxpdev)
|
||||
{
|
||||
struct ps_data *psdata = &nxpdev->psdata;
|
||||
u8 ps_state;
|
||||
|
||||
mutex_lock(&psdata->ps_lock);
|
||||
ps_state = psdata->ps_state;
|
||||
mutex_unlock(&psdata->ps_lock);
|
||||
|
||||
if (ps_state != PS_STATE_AWAKE)
|
||||
ps_control(psdata->hdev, PS_STATE_AWAKE);
|
||||
|
||||
ps_cancel_timer(nxpdev);
|
||||
cancel_work_sync(&psdata->work);
|
||||
mutex_destroy(&psdata->ps_lock);
|
||||
}
|
||||
|
||||
static int send_ps_cmd(struct hci_dev *hdev, void *data)
|
||||
{
|
||||
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
|
||||
@ -1363,7 +1380,6 @@ static int btnxpuart_close(struct hci_dev *hdev)
|
||||
{
|
||||
struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev);
|
||||
|
||||
ps_wakeup(nxpdev);
|
||||
serdev_device_close(nxpdev->serdev);
|
||||
skb_queue_purge(&nxpdev->txq);
|
||||
if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) {
|
||||
@ -1516,8 +1532,8 @@ static void nxp_serdev_remove(struct serdev_device *serdev)
|
||||
nxpdev->new_baudrate = nxpdev->fw_init_baudrate;
|
||||
nxp_set_baudrate_cmd(hdev, NULL);
|
||||
}
|
||||
ps_cancel_timer(nxpdev);
|
||||
}
|
||||
ps_cleanup(nxpdev);
|
||||
hci_unregister_dev(hdev);
|
||||
hci_free_dev(hdev);
|
||||
}
|
||||
|
@ -698,6 +698,10 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
|
||||
rc = tpm2_get_cc_attrs_tbl(chip);
|
||||
if (rc)
|
||||
goto init_irq_cleanup;
|
||||
|
||||
rc = tpm2_sessions_init(chip);
|
||||
if (rc)
|
||||
goto init_irq_cleanup;
|
||||
}
|
||||
|
||||
return tpm_chip_register(chip);
|
||||
|
@ -278,7 +278,7 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
|
||||
msg = RREG32(mmMP0_SMN_C2PMSG_33);
|
||||
if (msg & 0x80000000)
|
||||
break;
|
||||
usleep_range(1000, 1100);
|
||||
msleep(1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -166,6 +166,9 @@ static ssize_t ta_if_load_debugfs_write(struct file *fp, const char *buf, size_t
|
||||
if (ret)
|
||||
return -EFAULT;
|
||||
|
||||
if (ta_bin_len > PSP_1_MEG)
|
||||
return -EINVAL;
|
||||
|
||||
copy_pos += sizeof(uint32_t);
|
||||
|
||||
ta_bin = kzalloc(ta_bin_len, GFP_KERNEL);
|
||||
|
@ -4116,6 +4116,7 @@ static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
|
||||
|
||||
static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
|
||||
{
|
||||
char fw_name[53];
|
||||
char ucode_prefix[30];
|
||||
const char *wks = "";
|
||||
int err;
|
||||
@ -4149,8 +4150,8 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
|
||||
amdgpu_gfx_cp_init_microcode(adev, AMDGPU_UCODE_ID_CP_CE);
|
||||
|
||||
if (!amdgpu_sriov_vf(adev)) {
|
||||
err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw,
|
||||
"amdgpu/%s_rlc.bin", ucode_prefix);
|
||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", ucode_prefix);
|
||||
err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
|
@ -176,14 +176,16 @@ static void sdma_v5_2_ring_set_wptr(struct amdgpu_ring *ring)
|
||||
DRM_DEBUG("calling WDOORBELL64(0x%08x, 0x%016llx)\n",
|
||||
ring->doorbell_index, ring->wptr << 2);
|
||||
WDOORBELL64(ring->doorbell_index, ring->wptr << 2);
|
||||
/* SDMA seems to miss doorbells sometimes when powergating kicks in.
|
||||
* Updating the wptr directly will wake it. This is only safe because
|
||||
* we disallow gfxoff in begin_use() and then allow it again in end_use().
|
||||
*/
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR),
|
||||
lower_32_bits(ring->wptr << 2));
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI),
|
||||
upper_32_bits(ring->wptr << 2));
|
||||
if (amdgpu_ip_version(adev, SDMA0_HWIP, 0) == IP_VERSION(5, 2, 1)) {
|
||||
/* SDMA seems to miss doorbells sometimes when powergating kicks in.
|
||||
* Updating the wptr directly will wake it. This is only safe because
|
||||
* we disallow gfxoff in begin_use() and then allow it again in end_use().
|
||||
*/
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR),
|
||||
lower_32_bits(ring->wptr << 2));
|
||||
WREG32(sdma_v5_2_get_reg_offset(adev, ring->me, mmSDMA0_GFX_RB_WPTR_HI),
|
||||
upper_32_bits(ring->wptr << 2));
|
||||
}
|
||||
} else {
|
||||
DRM_DEBUG("Not using doorbell -- "
|
||||
"mmSDMA%i_GFX_RB_WPTR == 0x%08x "
|
||||
|
@ -39,7 +39,9 @@ static u32 transcoder_to_stream_enc_status(enum transcoder cpu_transcoder)
|
||||
static void intel_dp_hdcp_wait_for_cp_irq(struct intel_connector *connector,
|
||||
int timeout)
|
||||
{
|
||||
struct intel_hdcp *hdcp = &connector->hdcp;
|
||||
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
|
||||
struct intel_dp *dp = &dig_port->dp;
|
||||
struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
|
||||
long ret;
|
||||
|
||||
#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
|
||||
|
@ -99,7 +99,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname,
|
||||
* was a bad idea, and is only provided for backwards
|
||||
* compatibility for older targets.
|
||||
*/
|
||||
return -ENODEV;
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (IS_ERR(fw)) {
|
||||
|
@ -1171,8 +1171,6 @@ static void dpu_encoder_virt_atomic_mode_set(struct drm_encoder *drm_enc,
|
||||
|
||||
cstate->num_mixers = num_lm;
|
||||
|
||||
dpu_enc->connector = conn_state->connector;
|
||||
|
||||
for (i = 0; i < dpu_enc->num_phys_encs; i++) {
|
||||
struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
|
||||
|
||||
@ -1270,6 +1268,8 @@ static void dpu_encoder_virt_atomic_enable(struct drm_encoder *drm_enc,
|
||||
|
||||
dpu_enc->commit_done_timedout = false;
|
||||
|
||||
dpu_enc->connector = drm_atomic_get_new_connector_for_encoder(state, drm_enc);
|
||||
|
||||
cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
|
||||
|
||||
dpu_enc->wide_bus_en = dpu_encoder_is_widebus_enabled(drm_enc);
|
||||
|
@ -308,8 +308,8 @@ static const u32 wb2_formats_rgb_yuv[] = {
|
||||
{ \
|
||||
.maxdwnscale = SSPP_UNITY_SCALE, \
|
||||
.maxupscale = SSPP_UNITY_SCALE, \
|
||||
.format_list = plane_formats_yuv, \
|
||||
.num_formats = ARRAY_SIZE(plane_formats_yuv), \
|
||||
.format_list = plane_formats, \
|
||||
.num_formats = ARRAY_SIZE(plane_formats), \
|
||||
.virt_format_list = plane_formats, \
|
||||
.virt_num_formats = ARRAY_SIZE(plane_formats), \
|
||||
}
|
||||
|
@ -31,24 +31,14 @@
|
||||
* @fmt: Pointer to format string
|
||||
*/
|
||||
#define DPU_DEBUG(fmt, ...) \
|
||||
do { \
|
||||
if (drm_debug_enabled(DRM_UT_KMS)) \
|
||||
DRM_DEBUG(fmt, ##__VA_ARGS__); \
|
||||
else \
|
||||
pr_debug(fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
|
||||
|
||||
/**
|
||||
* DPU_DEBUG_DRIVER - macro for hardware driver logging
|
||||
* @fmt: Pointer to format string
|
||||
*/
|
||||
#define DPU_DEBUG_DRIVER(fmt, ...) \
|
||||
do { \
|
||||
if (drm_debug_enabled(DRM_UT_DRIVER)) \
|
||||
DRM_ERROR(fmt, ##__VA_ARGS__); \
|
||||
else \
|
||||
pr_debug(fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
DRM_DEBUG_DRIVER(fmt, ##__VA_ARGS__)
|
||||
|
||||
#define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__)
|
||||
#define DPU_ERROR_RATELIMITED(fmt, ...) pr_err_ratelimited("[dpu error]" fmt, ##__VA_ARGS__)
|
||||
|
@ -681,6 +681,9 @@ static int dpu_plane_prepare_fb(struct drm_plane *plane,
|
||||
new_state->fb, &layout);
|
||||
if (ret) {
|
||||
DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
|
||||
if (pstate->aspace)
|
||||
msm_framebuffer_cleanup(new_state->fb, pstate->aspace,
|
||||
pstate->needs_dirtyfb);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -744,10 +747,9 @@ static int dpu_plane_atomic_check_pipe(struct dpu_plane *pdpu,
|
||||
min_src_size = MSM_FORMAT_IS_YUV(fmt) ? 2 : 1;
|
||||
|
||||
if (MSM_FORMAT_IS_YUV(fmt) &&
|
||||
(!pipe->sspp->cap->sblk->scaler_blk.len ||
|
||||
!pipe->sspp->cap->sblk->csc_blk.len)) {
|
||||
!pipe->sspp->cap->sblk->csc_blk.len) {
|
||||
DPU_DEBUG_PLANE(pdpu,
|
||||
"plane doesn't have scaler/csc for yuv\n");
|
||||
"plane doesn't have csc for yuv\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -864,6 +866,10 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
|
||||
|
||||
max_linewidth = pdpu->catalog->caps->max_linewidth;
|
||||
|
||||
drm_rect_rotate(&pipe_cfg->src_rect,
|
||||
new_plane_state->fb->width, new_plane_state->fb->height,
|
||||
new_plane_state->rotation);
|
||||
|
||||
if ((drm_rect_width(&pipe_cfg->src_rect) > max_linewidth) ||
|
||||
_dpu_plane_calc_clk(&crtc_state->adjusted_mode, pipe_cfg) > max_mdp_clk_rate) {
|
||||
/*
|
||||
@ -913,6 +919,14 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
|
||||
r_pipe_cfg->dst_rect.x1 = pipe_cfg->dst_rect.x2;
|
||||
}
|
||||
|
||||
drm_rect_rotate_inv(&pipe_cfg->src_rect,
|
||||
new_plane_state->fb->width, new_plane_state->fb->height,
|
||||
new_plane_state->rotation);
|
||||
if (r_pipe->sspp)
|
||||
drm_rect_rotate_inv(&r_pipe_cfg->src_rect,
|
||||
new_plane_state->fb->width, new_plane_state->fb->height,
|
||||
new_plane_state->rotation);
|
||||
|
||||
ret = dpu_plane_atomic_check_pipe(pdpu, pipe, pipe_cfg, fmt, &crtc_state->adjusted_mode);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1286,6 +1286,8 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
|
||||
link_info.rate = ctrl->link->link_params.rate;
|
||||
link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING;
|
||||
|
||||
dp_link_reset_phy_params_vx_px(ctrl->link);
|
||||
|
||||
dp_aux_link_configure(ctrl->aux, &link_info);
|
||||
|
||||
if (drm_dp_max_downspread(dpcd))
|
||||
|
@ -90,22 +90,22 @@ static int dp_panel_read_dpcd(struct dp_panel *dp_panel)
|
||||
static u32 dp_panel_get_supported_bpp(struct dp_panel *dp_panel,
|
||||
u32 mode_edid_bpp, u32 mode_pclk_khz)
|
||||
{
|
||||
struct dp_link_info *link_info;
|
||||
const struct dp_link_info *link_info;
|
||||
const u32 max_supported_bpp = 30, min_supported_bpp = 18;
|
||||
u32 bpp = 0, data_rate_khz = 0;
|
||||
u32 bpp, data_rate_khz;
|
||||
|
||||
bpp = min_t(u32, mode_edid_bpp, max_supported_bpp);
|
||||
bpp = min(mode_edid_bpp, max_supported_bpp);
|
||||
|
||||
link_info = &dp_panel->link_info;
|
||||
data_rate_khz = link_info->num_lanes * link_info->rate * 8;
|
||||
|
||||
while (bpp > min_supported_bpp) {
|
||||
do {
|
||||
if (mode_pclk_khz * bpp <= data_rate_khz)
|
||||
break;
|
||||
return bpp;
|
||||
bpp -= 6;
|
||||
}
|
||||
} while (bpp > min_supported_bpp);
|
||||
|
||||
return bpp;
|
||||
return min_supported_bpp;
|
||||
}
|
||||
|
||||
int dp_panel_read_sink_caps(struct dp_panel *dp_panel,
|
||||
@ -423,8 +423,9 @@ int dp_panel_init_panel_info(struct dp_panel *dp_panel)
|
||||
drm_mode->clock);
|
||||
drm_dbg_dp(panel->drm_dev, "bpp = %d\n", dp_panel->dp_mode.bpp);
|
||||
|
||||
dp_panel->dp_mode.bpp = max_t(u32, 18,
|
||||
min_t(u32, dp_panel->dp_mode.bpp, 30));
|
||||
dp_panel->dp_mode.bpp = dp_panel_get_mode_bpp(dp_panel, dp_panel->dp_mode.bpp,
|
||||
dp_panel->dp_mode.drm_mode.clock);
|
||||
|
||||
drm_dbg_dp(panel->drm_dev, "updated bpp = %d\n",
|
||||
dp_panel->dp_mode.bpp);
|
||||
|
||||
|
@ -577,7 +577,7 @@ static const struct msm_mdss_data sc7180_data = {
|
||||
.ubwc_enc_version = UBWC_2_0,
|
||||
.ubwc_dec_version = UBWC_2_0,
|
||||
.ubwc_static = 0x1e,
|
||||
.highest_bank_bit = 0x3,
|
||||
.highest_bank_bit = 0x1,
|
||||
.reg_bus_bw = 76800,
|
||||
};
|
||||
|
||||
|
@ -205,7 +205,8 @@ nvkm_firmware_dtor(struct nvkm_firmware *fw)
|
||||
break;
|
||||
case NVKM_FIRMWARE_IMG_DMA:
|
||||
nvkm_memory_unref(&memory);
|
||||
dma_free_coherent(fw->device->dev, sg_dma_len(&fw->mem.sgl), fw->img, fw->phys);
|
||||
dma_free_noncoherent(fw->device->dev, sg_dma_len(&fw->mem.sgl),
|
||||
fw->img, fw->phys, DMA_TO_DEVICE);
|
||||
break;
|
||||
case NVKM_FIRMWARE_IMG_SGT:
|
||||
nvkm_memory_unref(&memory);
|
||||
@ -236,10 +237,12 @@ nvkm_firmware_ctor(const struct nvkm_firmware_func *func, const char *name,
|
||||
break;
|
||||
case NVKM_FIRMWARE_IMG_DMA: {
|
||||
dma_addr_t addr;
|
||||
|
||||
len = ALIGN(fw->len, PAGE_SIZE);
|
||||
|
||||
fw->img = dma_alloc_coherent(fw->device->dev, len, &addr, GFP_KERNEL);
|
||||
fw->img = dma_alloc_noncoherent(fw->device->dev,
|
||||
len, &addr,
|
||||
DMA_TO_DEVICE,
|
||||
GFP_KERNEL);
|
||||
if (fw->img) {
|
||||
memcpy(fw->img, src, fw->len);
|
||||
fw->phys = addr;
|
||||
|
@ -89,6 +89,12 @@ nvkm_falcon_fw_boot(struct nvkm_falcon_fw *fw, struct nvkm_subdev *user,
|
||||
nvkm_falcon_fw_dtor_sigs(fw);
|
||||
}
|
||||
|
||||
/* after last write to the img, sync dma mappings */
|
||||
dma_sync_single_for_device(fw->fw.device->dev,
|
||||
fw->fw.phys,
|
||||
sg_dma_len(&fw->fw.mem.sgl),
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
FLCNFW_DBG(fw, "resetting");
|
||||
fw->func->reset(fw);
|
||||
|
||||
|
@ -25,12 +25,14 @@ $(obj)/generated/%_wa_oob.c $(obj)/generated/%_wa_oob.h: $(obj)/xe_gen_wa_oob \
|
||||
|
||||
uses_generated_oob := \
|
||||
$(obj)/xe_ggtt.o \
|
||||
$(obj)/xe_device.o \
|
||||
$(obj)/xe_gsc.o \
|
||||
$(obj)/xe_gt.o \
|
||||
$(obj)/xe_guc.o \
|
||||
$(obj)/xe_guc_ads.o \
|
||||
$(obj)/xe_guc_pc.o \
|
||||
$(obj)/xe_migrate.o \
|
||||
$(obj)/xe_pat.o \
|
||||
$(obj)/xe_ring_ops.o \
|
||||
$(obj)/xe_vm.o \
|
||||
$(obj)/xe_wa.o \
|
||||
|
@ -132,6 +132,7 @@ static void xe_display_fini_noirq(void *arg)
|
||||
return;
|
||||
|
||||
intel_display_driver_remove_noirq(xe);
|
||||
intel_opregion_cleanup(xe);
|
||||
}
|
||||
|
||||
int xe_display_init_noirq(struct xe_device *xe)
|
||||
@ -157,8 +158,10 @@ int xe_display_init_noirq(struct xe_device *xe)
|
||||
intel_display_device_info_runtime_init(xe);
|
||||
|
||||
err = intel_display_driver_probe_noirq(xe);
|
||||
if (err)
|
||||
if (err) {
|
||||
intel_opregion_cleanup(xe);
|
||||
return err;
|
||||
}
|
||||
|
||||
return devm_add_action_or_reset(xe->drm.dev, xe_display_fini_noirq, xe);
|
||||
}
|
||||
@ -280,6 +283,27 @@ static bool suspend_to_idle(void)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void xe_display_flush_cleanup_work(struct xe_device *xe)
|
||||
{
|
||||
struct intel_crtc *crtc;
|
||||
|
||||
for_each_intel_crtc(&xe->drm, crtc) {
|
||||
struct drm_crtc_commit *commit;
|
||||
|
||||
spin_lock(&crtc->base.commit_lock);
|
||||
commit = list_first_entry_or_null(&crtc->base.commit_list,
|
||||
struct drm_crtc_commit, commit_entry);
|
||||
if (commit)
|
||||
drm_crtc_commit_get(commit);
|
||||
spin_unlock(&crtc->base.commit_lock);
|
||||
|
||||
if (commit) {
|
||||
wait_for_completion(&commit->cleanup_done);
|
||||
drm_crtc_commit_put(commit);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
|
||||
{
|
||||
bool s2idle = suspend_to_idle();
|
||||
@ -297,6 +321,8 @@ void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
|
||||
if (!runtime)
|
||||
intel_display_driver_suspend(xe);
|
||||
|
||||
xe_display_flush_cleanup_work(xe);
|
||||
|
||||
intel_dp_mst_suspend(xe);
|
||||
|
||||
intel_hpd_cancel_work(xe);
|
||||
|
@ -7,6 +7,8 @@
|
||||
#include "intel_display_types.h"
|
||||
#include "intel_dsb_buffer.h"
|
||||
#include "xe_bo.h"
|
||||
#include "xe_device.h"
|
||||
#include "xe_device_types.h"
|
||||
#include "xe_gt.h"
|
||||
|
||||
u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
|
||||
@ -16,7 +18,10 @@ u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
|
||||
|
||||
void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val)
|
||||
{
|
||||
struct xe_device *xe = dsb_buf->vma->bo->tile->xe;
|
||||
|
||||
iosys_map_wr(&dsb_buf->vma->bo->vmap, idx * 4, u32, val);
|
||||
xe_device_l2_flush(xe);
|
||||
}
|
||||
|
||||
u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
|
||||
@ -26,9 +31,12 @@ u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
|
||||
|
||||
void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size)
|
||||
{
|
||||
struct xe_device *xe = dsb_buf->vma->bo->tile->xe;
|
||||
|
||||
WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf));
|
||||
|
||||
iosys_map_memset(&dsb_buf->vma->bo->vmap, idx * 4, val, size);
|
||||
xe_device_l2_flush(xe);
|
||||
}
|
||||
|
||||
bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, size_t size)
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "intel_fb.h"
|
||||
#include "intel_fb_pin.h"
|
||||
#include "xe_bo.h"
|
||||
#include "xe_device.h"
|
||||
#include "xe_ggtt.h"
|
||||
#include "xe_gt.h"
|
||||
#include "xe_pm.h"
|
||||
@ -304,6 +305,8 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb,
|
||||
if (ret)
|
||||
goto err_unpin;
|
||||
|
||||
/* Ensure DPT writes are flushed */
|
||||
xe_device_l2_flush(xe);
|
||||
return vma;
|
||||
|
||||
err_unpin:
|
||||
|
@ -80,6 +80,9 @@
|
||||
#define LE_CACHEABILITY_MASK REG_GENMASK(1, 0)
|
||||
#define LE_CACHEABILITY(value) REG_FIELD_PREP(LE_CACHEABILITY_MASK, value)
|
||||
|
||||
#define XE2_GAMREQSTRM_CTRL XE_REG(0x4194)
|
||||
#define CG_DIS_CNTLBUS REG_BIT(6)
|
||||
|
||||
#define CCS_AUX_INV XE_REG(0x4208)
|
||||
|
||||
#define VD0_AUX_INV XE_REG(0x4218)
|
||||
@ -372,6 +375,11 @@
|
||||
|
||||
#define XEHPC_L3CLOS_MASK(i) XE_REG_MCR(0xb194 + (i) * 8)
|
||||
|
||||
#define XE2_GLOBAL_INVAL XE_REG(0xb404)
|
||||
|
||||
#define SCRATCH1LPFC XE_REG(0xb474)
|
||||
#define EN_L3_RW_CCS_CACHE_FLUSH REG_BIT(0)
|
||||
|
||||
#define XE2LPM_L3SQCREG5 XE_REG_MCR(0xb658)
|
||||
|
||||
#define XE2_TDF_CTRL XE_REG(0xb418)
|
||||
@ -429,6 +437,7 @@
|
||||
#define DIS_FIX_EOT1_FLUSH REG_BIT(9)
|
||||
|
||||
#define TDL_TSL_CHICKEN XE_REG_MCR(0xe4c4, XE_REG_OPTION_MASKED)
|
||||
#define STK_ID_RESTRICT REG_BIT(12)
|
||||
#define SLM_WMTP_RESTORE REG_BIT(11)
|
||||
|
||||
#define ROW_CHICKEN XE_REG_MCR(0xe4f0, XE_REG_OPTION_MASKED)
|
||||
|
@ -1575,7 +1575,7 @@ struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
|
||||
return bo;
|
||||
}
|
||||
|
||||
static void __xe_bo_unpin_map_no_vm(struct drm_device *drm, void *arg)
|
||||
static void __xe_bo_unpin_map_no_vm(void *arg)
|
||||
{
|
||||
xe_bo_unpin_map_no_vm(arg);
|
||||
}
|
||||
@ -1590,7 +1590,7 @@ struct xe_bo *xe_managed_bo_create_pin_map(struct xe_device *xe, struct xe_tile
|
||||
if (IS_ERR(bo))
|
||||
return bo;
|
||||
|
||||
ret = drmm_add_action_or_reset(&xe->drm, __xe_bo_unpin_map_no_vm, bo);
|
||||
ret = devm_add_action_or_reset(xe->drm.dev, __xe_bo_unpin_map_no_vm, bo);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
@ -1638,7 +1638,7 @@ int xe_managed_bo_reinit_in_vram(struct xe_device *xe, struct xe_tile *tile, str
|
||||
if (IS_ERR(bo))
|
||||
return PTR_ERR(bo);
|
||||
|
||||
drmm_release_action(&xe->drm, __xe_bo_unpin_map_no_vm, *src);
|
||||
devm_release_action(xe->drm.dev, __xe_bo_unpin_map_no_vm, *src);
|
||||
*src = bo;
|
||||
|
||||
return 0;
|
||||
|
@ -54,6 +54,9 @@
|
||||
#include "xe_vm.h"
|
||||
#include "xe_vram.h"
|
||||
#include "xe_wait_user_fence.h"
|
||||
#include "xe_wa.h"
|
||||
|
||||
#include <generated/xe_wa_oob.h>
|
||||
|
||||
static int xe_file_open(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
@ -820,6 +823,11 @@ void xe_device_td_flush(struct xe_device *xe)
|
||||
if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20)
|
||||
return;
|
||||
|
||||
if (XE_WA(xe_root_mmio_gt(xe), 16023588340)) {
|
||||
xe_device_l2_flush(xe);
|
||||
return;
|
||||
}
|
||||
|
||||
for_each_gt(gt, xe, id) {
|
||||
if (xe_gt_is_media_type(gt))
|
||||
continue;
|
||||
@ -843,6 +851,30 @@ void xe_device_td_flush(struct xe_device *xe)
|
||||
}
|
||||
}
|
||||
|
||||
void xe_device_l2_flush(struct xe_device *xe)
|
||||
{
|
||||
struct xe_gt *gt;
|
||||
int err;
|
||||
|
||||
gt = xe_root_mmio_gt(xe);
|
||||
|
||||
if (!XE_WA(gt, 16023588340))
|
||||
return;
|
||||
|
||||
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
|
||||
if (err)
|
||||
return;
|
||||
|
||||
spin_lock(>->global_invl_lock);
|
||||
xe_mmio_write32(gt, XE2_GLOBAL_INVAL, 0x1);
|
||||
|
||||
if (xe_mmio_wait32(gt, XE2_GLOBAL_INVAL, 0x1, 0x0, 150, NULL, true))
|
||||
xe_gt_err_once(gt, "Global invalidation timeout\n");
|
||||
spin_unlock(>->global_invl_lock);
|
||||
|
||||
xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
|
||||
}
|
||||
|
||||
u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
|
||||
{
|
||||
return xe_device_has_flat_ccs(xe) ?
|
||||
|
@ -162,6 +162,7 @@ u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address);
|
||||
u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address);
|
||||
|
||||
void xe_device_td_flush(struct xe_device *xe);
|
||||
void xe_device_l2_flush(struct xe_device *xe);
|
||||
|
||||
static inline bool xe_device_wedged(struct xe_device *xe)
|
||||
{
|
||||
|
@ -105,22 +105,35 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
|
||||
|
||||
static int __xe_exec_queue_init(struct xe_exec_queue *q)
|
||||
{
|
||||
struct xe_vm *vm = q->vm;
|
||||
int i, err;
|
||||
|
||||
if (vm) {
|
||||
err = xe_vm_lock(vm, true);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
for (i = 0; i < q->width; ++i) {
|
||||
q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K);
|
||||
if (IS_ERR(q->lrc[i])) {
|
||||
err = PTR_ERR(q->lrc[i]);
|
||||
goto err_lrc;
|
||||
goto err_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
if (vm)
|
||||
xe_vm_unlock(vm);
|
||||
|
||||
err = q->ops->init(q);
|
||||
if (err)
|
||||
goto err_lrc;
|
||||
|
||||
return 0;
|
||||
|
||||
err_unlock:
|
||||
if (vm)
|
||||
xe_vm_unlock(vm);
|
||||
err_lrc:
|
||||
for (i = i - 1; i >= 0; --i)
|
||||
xe_lrc_put(q->lrc[i]);
|
||||
@ -140,15 +153,7 @@ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *v
|
||||
if (IS_ERR(q))
|
||||
return q;
|
||||
|
||||
if (vm) {
|
||||
err = xe_vm_lock(vm, true);
|
||||
if (err)
|
||||
goto err_post_alloc;
|
||||
}
|
||||
|
||||
err = __xe_exec_queue_init(q);
|
||||
if (vm)
|
||||
xe_vm_unlock(vm);
|
||||
if (err)
|
||||
goto err_post_alloc;
|
||||
|
||||
@ -638,7 +643,6 @@ int xe_exec_queue_create_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
if (xe_vm_in_preempt_fence_mode(vm)) {
|
||||
q->lr.context = dma_fence_context_alloc(1);
|
||||
spin_lock_init(&q->lr.lock);
|
||||
|
||||
err = xe_vm_add_compute_exec_queue(vm, q);
|
||||
if (XE_IOCTL_DBG(xe, err))
|
||||
|
@ -126,8 +126,6 @@ struct xe_exec_queue {
|
||||
u32 seqno;
|
||||
/** @lr.link: link into VM's list of exec queues */
|
||||
struct list_head link;
|
||||
/** @lr.lock: preemption fences lock */
|
||||
spinlock_t lock;
|
||||
} lr;
|
||||
|
||||
/** @ops: submission backend exec queue operations */
|
||||
|
@ -260,7 +260,7 @@ static int gsc_upload_and_init(struct xe_gsc *gsc)
|
||||
struct xe_tile *tile = gt_to_tile(gt);
|
||||
int ret;
|
||||
|
||||
if (XE_WA(gt, 14018094691)) {
|
||||
if (XE_WA(tile->primary_gt, 14018094691)) {
|
||||
ret = xe_force_wake_get(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL);
|
||||
|
||||
/*
|
||||
@ -278,7 +278,7 @@ static int gsc_upload_and_init(struct xe_gsc *gsc)
|
||||
|
||||
ret = gsc_upload(gsc);
|
||||
|
||||
if (XE_WA(gt, 14018094691))
|
||||
if (XE_WA(tile->primary_gt, 14018094691))
|
||||
xe_force_wake_put(gt_to_fw(tile->primary_gt), XE_FORCEWAKE_ALL);
|
||||
|
||||
if (ret)
|
||||
@ -437,7 +437,7 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void free_resources(struct drm_device *drm, void *arg)
|
||||
static void free_resources(void *arg)
|
||||
{
|
||||
struct xe_gsc *gsc = arg;
|
||||
|
||||
@ -501,7 +501,7 @@ int xe_gsc_init_post_hwconfig(struct xe_gsc *gsc)
|
||||
gsc->q = q;
|
||||
gsc->wq = wq;
|
||||
|
||||
err = drmm_add_action_or_reset(&xe->drm, free_resources, gsc);
|
||||
err = devm_add_action_or_reset(xe->drm.dev, free_resources, gsc);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -11,6 +11,8 @@
|
||||
#include <drm/xe_drm.h>
|
||||
#include <generated/xe_wa_oob.h>
|
||||
|
||||
#include <generated/xe_wa_oob.h>
|
||||
|
||||
#include "instructions/xe_gfxpipe_commands.h"
|
||||
#include "instructions/xe_mi_commands.h"
|
||||
#include "regs/xe_gt_regs.h"
|
||||
@ -95,6 +97,51 @@ void xe_gt_sanitize(struct xe_gt *gt)
|
||||
gt->uc.guc.submission_state.enabled = false;
|
||||
}
|
||||
|
||||
static void xe_gt_enable_host_l2_vram(struct xe_gt *gt)
|
||||
{
|
||||
u32 reg;
|
||||
int err;
|
||||
|
||||
if (!XE_WA(gt, 16023588340))
|
||||
return;
|
||||
|
||||
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
|
||||
if (WARN_ON(err))
|
||||
return;
|
||||
|
||||
if (!xe_gt_is_media_type(gt)) {
|
||||
xe_mmio_write32(gt, SCRATCH1LPFC, EN_L3_RW_CCS_CACHE_FLUSH);
|
||||
reg = xe_mmio_read32(gt, XE2_GAMREQSTRM_CTRL);
|
||||
reg |= CG_DIS_CNTLBUS;
|
||||
xe_mmio_write32(gt, XE2_GAMREQSTRM_CTRL, reg);
|
||||
}
|
||||
|
||||
xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0x3);
|
||||
xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
|
||||
}
|
||||
|
||||
static void xe_gt_disable_host_l2_vram(struct xe_gt *gt)
|
||||
{
|
||||
u32 reg;
|
||||
int err;
|
||||
|
||||
if (!XE_WA(gt, 16023588340))
|
||||
return;
|
||||
|
||||
if (xe_gt_is_media_type(gt))
|
||||
return;
|
||||
|
||||
err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
|
||||
if (WARN_ON(err))
|
||||
return;
|
||||
|
||||
reg = xe_mmio_read32(gt, XE2_GAMREQSTRM_CTRL);
|
||||
reg &= ~CG_DIS_CNTLBUS;
|
||||
xe_mmio_write32(gt, XE2_GAMREQSTRM_CTRL, reg);
|
||||
|
||||
xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
|
||||
}
|
||||
|
||||
/**
|
||||
* xe_gt_remove() - Clean up the GT structures before driver removal
|
||||
* @gt: the GT object
|
||||
@ -111,6 +158,8 @@ void xe_gt_remove(struct xe_gt *gt)
|
||||
|
||||
for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i)
|
||||
xe_hw_fence_irq_finish(>->fence_irq[i]);
|
||||
|
||||
xe_gt_disable_host_l2_vram(gt);
|
||||
}
|
||||
|
||||
static void gt_reset_worker(struct work_struct *w);
|
||||
@ -339,6 +388,7 @@ int xe_gt_init_early(struct xe_gt *gt)
|
||||
|
||||
xe_force_wake_init_gt(gt, gt_to_fw(gt));
|
||||
xe_pcode_init(gt);
|
||||
spin_lock_init(>->global_invl_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -508,6 +558,7 @@ int xe_gt_init_hwconfig(struct xe_gt *gt)
|
||||
|
||||
xe_gt_mcr_init_early(gt);
|
||||
xe_pat_init(gt);
|
||||
xe_gt_enable_host_l2_vram(gt);
|
||||
|
||||
err = xe_uc_init(>->uc);
|
||||
if (err)
|
||||
@ -643,6 +694,8 @@ static int do_gt_restart(struct xe_gt *gt)
|
||||
|
||||
xe_pat_init(gt);
|
||||
|
||||
xe_gt_enable_host_l2_vram(gt);
|
||||
|
||||
xe_gt_mcr_set_implicit_defaults(gt);
|
||||
xe_reg_sr_apply_mmio(>->reg_sr, gt);
|
||||
|
||||
@ -796,6 +849,8 @@ int xe_gt_suspend(struct xe_gt *gt)
|
||||
|
||||
xe_gt_idle_disable_pg(gt);
|
||||
|
||||
xe_gt_disable_host_l2_vram(gt);
|
||||
|
||||
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
|
||||
xe_gt_dbg(gt, "suspended\n");
|
||||
|
||||
|
@ -382,6 +382,18 @@ static void pf_queue_work_func(struct work_struct *w)
|
||||
|
||||
static void acc_queue_work_func(struct work_struct *w);
|
||||
|
||||
static void pagefault_fini(void *arg)
|
||||
{
|
||||
struct xe_gt *gt = arg;
|
||||
struct xe_device *xe = gt_to_xe(gt);
|
||||
|
||||
if (!xe->info.has_usm)
|
||||
return;
|
||||
|
||||
destroy_workqueue(gt->usm.acc_wq);
|
||||
destroy_workqueue(gt->usm.pf_wq);
|
||||
}
|
||||
|
||||
int xe_gt_pagefault_init(struct xe_gt *gt)
|
||||
{
|
||||
struct xe_device *xe = gt_to_xe(gt);
|
||||
@ -409,10 +421,12 @@ int xe_gt_pagefault_init(struct xe_gt *gt)
|
||||
gt->usm.acc_wq = alloc_workqueue("xe_gt_access_counter_work_queue",
|
||||
WQ_UNBOUND | WQ_HIGHPRI,
|
||||
NUM_ACC_QUEUE);
|
||||
if (!gt->usm.acc_wq)
|
||||
if (!gt->usm.acc_wq) {
|
||||
destroy_workqueue(gt->usm.pf_wq);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return devm_add_action_or_reset(xe->drm.dev, pagefault_fini, gt);
|
||||
}
|
||||
|
||||
void xe_gt_pagefault_reset(struct xe_gt *gt)
|
||||
|
@ -362,6 +362,12 @@ struct xe_gt {
|
||||
*/
|
||||
spinlock_t mcr_lock;
|
||||
|
||||
/**
|
||||
* @global_invl_lock: protects the register for the duration
|
||||
* of a global invalidation of l2 cache
|
||||
*/
|
||||
spinlock_t global_invl_lock;
|
||||
|
||||
/** @wa_active: keep track of active workarounds */
|
||||
struct {
|
||||
/** @wa_active.gt: bitmap with active GT workarounds */
|
||||
|
@ -284,7 +284,7 @@ static void guc_submit_fini(struct drm_device *drm, void *arg)
|
||||
free_submit_wq(guc);
|
||||
}
|
||||
|
||||
static void guc_submit_wedged_fini(struct drm_device *drm, void *arg)
|
||||
static void guc_submit_wedged_fini(void *arg)
|
||||
{
|
||||
struct xe_guc *guc = arg;
|
||||
struct xe_exec_queue *q;
|
||||
@ -877,7 +877,7 @@ void xe_guc_submit_wedge(struct xe_guc *guc)
|
||||
|
||||
xe_gt_assert(guc_to_gt(guc), guc_to_xe(guc)->wedged.mode);
|
||||
|
||||
err = drmm_add_action_or_reset(&guc_to_xe(guc)->drm,
|
||||
err = devm_add_action_or_reset(guc_to_xe(guc)->drm.dev,
|
||||
guc_submit_wedged_fini, guc);
|
||||
if (err) {
|
||||
drm_err(&xe->drm, "Failed to register xe_guc_submit clean-up on wedged.mode=2. Although device is wedged.\n");
|
||||
|
@ -148,20 +148,20 @@ static const char *xe_hw_fence_get_driver_name(struct dma_fence *dma_fence)
|
||||
{
|
||||
struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
|
||||
|
||||
return dev_name(gt_to_xe(fence->ctx->gt)->drm.dev);
|
||||
return dev_name(fence->xe->drm.dev);
|
||||
}
|
||||
|
||||
static const char *xe_hw_fence_get_timeline_name(struct dma_fence *dma_fence)
|
||||
{
|
||||
struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
|
||||
|
||||
return fence->ctx->name;
|
||||
return fence->name;
|
||||
}
|
||||
|
||||
static bool xe_hw_fence_signaled(struct dma_fence *dma_fence)
|
||||
{
|
||||
struct xe_hw_fence *fence = to_xe_hw_fence(dma_fence);
|
||||
struct xe_device *xe = gt_to_xe(fence->ctx->gt);
|
||||
struct xe_device *xe = fence->xe;
|
||||
u32 seqno = xe_map_rd(xe, &fence->seqno_map, 0, u32);
|
||||
|
||||
return dma_fence->error ||
|
||||
@ -253,7 +253,8 @@ void xe_hw_fence_init(struct dma_fence *fence, struct xe_hw_fence_ctx *ctx,
|
||||
struct xe_hw_fence *hw_fence =
|
||||
container_of(fence, typeof(*hw_fence), dma);
|
||||
|
||||
hw_fence->ctx = ctx;
|
||||
hw_fence->xe = gt_to_xe(ctx->gt);
|
||||
snprintf(hw_fence->name, sizeof(hw_fence->name), "%s", ctx->name);
|
||||
hw_fence->seqno_map = seqno_map;
|
||||
INIT_LIST_HEAD(&hw_fence->irq_link);
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
#include <linux/list.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
struct xe_device;
|
||||
struct xe_gt;
|
||||
|
||||
/**
|
||||
@ -61,8 +62,10 @@ struct xe_hw_fence_ctx {
|
||||
struct xe_hw_fence {
|
||||
/** @dma: base dma fence for hardware fence context */
|
||||
struct dma_fence dma;
|
||||
/** @ctx: hardware fence context */
|
||||
struct xe_hw_fence_ctx *ctx;
|
||||
/** @xe: Xe device for hw fence driver name */
|
||||
struct xe_device *xe;
|
||||
/** @name: name of hardware fence context */
|
||||
char name[MAX_FENCE_NAME_LEN];
|
||||
/** @seqno_map: I/O map for seqno */
|
||||
struct iosys_map seqno_map;
|
||||
/** @irq_link: Link in struct xe_hw_fence_irq.pending */
|
||||
|
@ -30,7 +30,8 @@ static void tiles_fini(void *arg)
|
||||
int id;
|
||||
|
||||
for_each_tile(tile, xe, id)
|
||||
tile->mmio.regs = NULL;
|
||||
if (tile != xe_device_get_root_tile(xe))
|
||||
tile->mmio.regs = NULL;
|
||||
}
|
||||
|
||||
int xe_mmio_probe_tiles(struct xe_device *xe)
|
||||
@ -91,9 +92,11 @@ add_mmio_ext:
|
||||
static void mmio_fini(void *arg)
|
||||
{
|
||||
struct xe_device *xe = arg;
|
||||
struct xe_tile *root_tile = xe_device_get_root_tile(xe);
|
||||
|
||||
pci_iounmap(to_pci_dev(xe->drm.dev), xe->mmio.regs);
|
||||
xe->mmio.regs = NULL;
|
||||
root_tile->mmio.regs = NULL;
|
||||
}
|
||||
|
||||
int xe_mmio_init(struct xe_device *xe)
|
||||
@ -121,12 +124,29 @@ int xe_mmio_init(struct xe_device *xe)
|
||||
return devm_add_action_or_reset(xe->drm.dev, mmio_fini, xe);
|
||||
}
|
||||
|
||||
static void mmio_flush_pending_writes(struct xe_gt *gt)
|
||||
{
|
||||
#define DUMMY_REG_OFFSET 0x130030
|
||||
struct xe_tile *tile = gt_to_tile(gt);
|
||||
int i;
|
||||
|
||||
if (tile->xe->info.platform != XE_LUNARLAKE)
|
||||
return;
|
||||
|
||||
/* 4 dummy writes */
|
||||
for (i = 0; i < 4; i++)
|
||||
writel(0, tile->mmio.regs + DUMMY_REG_OFFSET);
|
||||
}
|
||||
|
||||
u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
|
||||
{
|
||||
struct xe_tile *tile = gt_to_tile(gt);
|
||||
u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
|
||||
u8 val;
|
||||
|
||||
/* Wa_15015404425 */
|
||||
mmio_flush_pending_writes(gt);
|
||||
|
||||
val = readb((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
|
||||
trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
|
||||
|
||||
@ -139,6 +159,9 @@ u16 xe_mmio_read16(struct xe_gt *gt, struct xe_reg reg)
|
||||
u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
|
||||
u16 val;
|
||||
|
||||
/* Wa_15015404425 */
|
||||
mmio_flush_pending_writes(gt);
|
||||
|
||||
val = readw((reg.ext ? tile->mmio_ext.regs : tile->mmio.regs) + addr);
|
||||
trace_xe_reg_rw(gt, false, addr, val, sizeof(val));
|
||||
|
||||
@ -160,6 +183,9 @@ u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
|
||||
u32 addr = xe_mmio_adjusted_addr(gt, reg.addr);
|
||||
u32 val;
|
||||
|
||||
/* Wa_15015404425 */
|
||||
mmio_flush_pending_writes(gt);
|
||||
|
||||
if (!reg.vf && IS_SRIOV_VF(gt_to_xe(gt)))
|
||||
val = xe_gt_sriov_vf_read32(gt, reg);
|
||||
else
|
||||
|
@ -66,7 +66,6 @@ static struct ctl_table observation_ctl_table[] = {
|
||||
.extra1 = SYSCTL_ZERO,
|
||||
.extra2 = SYSCTL_ONE,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -7,6 +7,8 @@
|
||||
|
||||
#include <drm/xe_drm.h>
|
||||
|
||||
#include <generated/xe_wa_oob.h>
|
||||
|
||||
#include "regs/xe_reg_defs.h"
|
||||
#include "xe_assert.h"
|
||||
#include "xe_device.h"
|
||||
@ -15,6 +17,7 @@
|
||||
#include "xe_gt_mcr.h"
|
||||
#include "xe_mmio.h"
|
||||
#include "xe_sriov.h"
|
||||
#include "xe_wa.h"
|
||||
|
||||
#define _PAT_ATS 0x47fc
|
||||
#define _PAT_INDEX(index) _PICK_EVEN_2RANGES(index, 8, \
|
||||
@ -382,7 +385,13 @@ void xe_pat_init_early(struct xe_device *xe)
|
||||
if (GRAPHICS_VER(xe) == 20) {
|
||||
xe->pat.ops = &xe2_pat_ops;
|
||||
xe->pat.table = xe2_pat_table;
|
||||
xe->pat.n_entries = ARRAY_SIZE(xe2_pat_table);
|
||||
|
||||
/* Wa_16023588340. XXX: Should use XE_WA */
|
||||
if (GRAPHICS_VERx100(xe) == 2001)
|
||||
xe->pat.n_entries = 28; /* Disable CLOS3 */
|
||||
else
|
||||
xe->pat.n_entries = ARRAY_SIZE(xe2_pat_table);
|
||||
|
||||
xe->pat.idx[XE_CACHE_NONE] = 3;
|
||||
xe->pat.idx[XE_CACHE_WT] = 15;
|
||||
xe->pat.idx[XE_CACHE_WB] = 2;
|
||||
|
@ -91,13 +91,13 @@ int xe_pm_suspend(struct xe_device *xe)
|
||||
for_each_gt(gt, xe, id)
|
||||
xe_gt_suspend_prepare(gt);
|
||||
|
||||
xe_display_pm_suspend(xe, false);
|
||||
|
||||
/* FIXME: Super racey... */
|
||||
err = xe_bo_evict_all(xe);
|
||||
if (err)
|
||||
goto err;
|
||||
|
||||
xe_display_pm_suspend(xe, false);
|
||||
|
||||
for_each_gt(gt, xe, id) {
|
||||
err = xe_gt_suspend(gt);
|
||||
if (err) {
|
||||
@ -151,11 +151,11 @@ int xe_pm_resume(struct xe_device *xe)
|
||||
|
||||
xe_irq_resume(xe);
|
||||
|
||||
xe_display_pm_resume(xe, false);
|
||||
|
||||
for_each_gt(gt, xe, id)
|
||||
xe_gt_resume(gt);
|
||||
|
||||
xe_display_pm_resume(xe, false);
|
||||
|
||||
err = xe_bo_restore_user(xe);
|
||||
if (err)
|
||||
goto err;
|
||||
@ -363,10 +363,11 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
|
||||
mutex_unlock(&xe->mem_access.vram_userfault.lock);
|
||||
|
||||
if (xe->d3cold.allowed) {
|
||||
xe_display_pm_suspend(xe, true);
|
||||
|
||||
err = xe_bo_evict_all(xe);
|
||||
if (err)
|
||||
goto out;
|
||||
xe_display_pm_suspend(xe, true);
|
||||
}
|
||||
|
||||
for_each_gt(gt, xe, id) {
|
||||
|
@ -128,8 +128,9 @@ xe_preempt_fence_arm(struct xe_preempt_fence *pfence, struct xe_exec_queue *q,
|
||||
{
|
||||
list_del_init(&pfence->link);
|
||||
pfence->q = xe_exec_queue_get(q);
|
||||
spin_lock_init(&pfence->lock);
|
||||
dma_fence_init(&pfence->base, &preempt_fence_ops,
|
||||
&q->lr.lock, context, seqno);
|
||||
&pfence->lock, context, seqno);
|
||||
|
||||
return &pfence->base;
|
||||
}
|
||||
|
@ -25,6 +25,8 @@ struct xe_preempt_fence {
|
||||
struct xe_exec_queue *q;
|
||||
/** @preempt_work: work struct which issues preemption */
|
||||
struct work_struct preempt_work;
|
||||
/** @lock: dma-fence fence lock */
|
||||
spinlock_t lock;
|
||||
/** @error: preempt fence is in error state */
|
||||
int error;
|
||||
};
|
||||
|
@ -171,12 +171,13 @@ void xe_sched_job_destroy(struct kref *ref)
|
||||
struct xe_sched_job *job =
|
||||
container_of(ref, struct xe_sched_job, refcount);
|
||||
struct xe_device *xe = job_to_xe(job);
|
||||
struct xe_exec_queue *q = job->q;
|
||||
|
||||
xe_sched_job_free_fences(job);
|
||||
xe_exec_queue_put(job->q);
|
||||
dma_fence_put(job->fence);
|
||||
drm_sched_job_cleanup(&job->drm);
|
||||
job_free(job);
|
||||
xe_exec_queue_put(q);
|
||||
xe_pm_runtime_put(xe);
|
||||
}
|
||||
|
||||
|
@ -309,7 +309,7 @@ DECLARE_EVENT_CLASS(xe_hw_fence,
|
||||
TP_ARGS(fence),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__string(dev, __dev_name_gt(fence->ctx->gt))
|
||||
__string(dev, __dev_name_xe(fence->xe))
|
||||
__field(u64, ctx)
|
||||
__field(u32, seqno)
|
||||
__field(struct xe_hw_fence *, fence)
|
||||
|
@ -486,6 +486,10 @@ static const struct xe_rtp_entry_sr engine_was[] = {
|
||||
XE_RTP_RULES(GRAPHICS_VERSION(2004), FUNC(xe_rtp_match_first_render_or_compute)),
|
||||
XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, SLM_WMTP_RESTORE))
|
||||
},
|
||||
{ XE_RTP_NAME("14021402888"),
|
||||
XE_RTP_RULES(GRAPHICS_VERSION(2004), ENGINE_CLASS(RENDER)),
|
||||
XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE))
|
||||
},
|
||||
|
||||
/* Xe2_HPG */
|
||||
|
||||
@ -538,6 +542,20 @@ static const struct xe_rtp_entry_sr engine_was[] = {
|
||||
XE_RTP_RULES(GRAPHICS_VERSION(2001), ENGINE_CLASS(RENDER)),
|
||||
XE_RTP_ACTIONS(SET(HALF_SLICE_CHICKEN7, CLEAR_OPTIMIZATION_DISABLE))
|
||||
},
|
||||
{ XE_RTP_NAME("14021821874"),
|
||||
XE_RTP_RULES(GRAPHICS_VERSION(2001), FUNC(xe_rtp_match_first_render_or_compute)),
|
||||
XE_RTP_ACTIONS(SET(TDL_TSL_CHICKEN, STK_ID_RESTRICT))
|
||||
},
|
||||
|
||||
/* Xe2_LPM */
|
||||
|
||||
{ XE_RTP_NAME("16021639441"),
|
||||
XE_RTP_RULES(MEDIA_VERSION(2000)),
|
||||
XE_RTP_ACTIONS(SET(CSFE_CHICKEN1(0),
|
||||
GHWSP_CSB_REPORT_DIS |
|
||||
PPHWSP_CSB_AND_TIMESTAMP_REPORT_DIS,
|
||||
XE_RTP_ACTION_FLAG(ENGINE_BASE)))
|
||||
},
|
||||
|
||||
/* Xe2_HPM */
|
||||
|
||||
|
@ -29,3 +29,4 @@
|
||||
13011645652 GRAPHICS_VERSION(2004)
|
||||
22019338487 MEDIA_VERSION(2000)
|
||||
GRAPHICS_VERSION(2001)
|
||||
16023588340 GRAPHICS_VERSION(2001)
|
||||
|
@ -182,8 +182,11 @@ static int adc_joystick_set_axes(struct device *dev, struct adc_joystick *joy)
|
||||
swap(range[0], range[1]);
|
||||
}
|
||||
|
||||
fwnode_property_read_u32(child, "abs-fuzz", &fuzz);
|
||||
fwnode_property_read_u32(child, "abs-flat", &flat);
|
||||
if (fwnode_property_read_u32(child, "abs-fuzz", &fuzz))
|
||||
fuzz = 0;
|
||||
|
||||
if (fwnode_property_read_u32(child, "abs-flat", &flat))
|
||||
flat = 0;
|
||||
|
||||
input_set_abs_params(joy->input, axes[i].code,
|
||||
range[0], range[1], fuzz, flat);
|
||||
|
@ -417,6 +417,20 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Limit number of contacts to a reasonable value (100). This
|
||||
* ensures that we need less than 2 pages for struct input_mt
|
||||
* (we are not using in-kernel slot assignment so not going to
|
||||
* allocate memory for the "red" table), and we should have no
|
||||
* trouble getting this much memory.
|
||||
*/
|
||||
if (code == ABS_MT_SLOT && max > 99) {
|
||||
printk(KERN_DEBUG
|
||||
"%s: unreasonably large number of slots requested: %d\n",
|
||||
UINPUT_NAME, max);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -189,6 +189,7 @@ static const char * const smbus_pnp_ids[] = {
|
||||
"LEN2054", /* E480 */
|
||||
"LEN2055", /* E580 */
|
||||
"LEN2068", /* T14 Gen 1 */
|
||||
"SYN3015", /* HP EliteBook 840 G2 */
|
||||
"SYN3052", /* HP EliteBook 840 G4 */
|
||||
"SYN3221", /* HP 15-ay000 */
|
||||
"SYN323d", /* HP Spectre X360 13-w013dx */
|
||||
|
@ -83,6 +83,7 @@ static inline void i8042_write_command(int val)
|
||||
#define SERIO_QUIRK_KBDRESET BIT(12)
|
||||
#define SERIO_QUIRK_DRITEK BIT(13)
|
||||
#define SERIO_QUIRK_NOPNP BIT(14)
|
||||
#define SERIO_QUIRK_FORCENORESTORE BIT(15)
|
||||
|
||||
/* Quirk table for different mainboards. Options similar or identical to i8042
|
||||
* module parameters.
|
||||
@ -626,6 +627,15 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
|
||||
},
|
||||
.driver_data = (void *)(SERIO_QUIRK_NOMUX)
|
||||
},
|
||||
{
|
||||
/* Fujitsu Lifebook E756 */
|
||||
/* https://bugzilla.suse.com/show_bug.cgi?id=1229056 */
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E756"),
|
||||
},
|
||||
.driver_data = (void *)(SERIO_QUIRK_NOMUX)
|
||||
},
|
||||
{
|
||||
/* Fujitsu Lifebook E5411 */
|
||||
.matches = {
|
||||
@ -1149,18 +1159,10 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = {
|
||||
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
|
||||
},
|
||||
{
|
||||
/*
|
||||
* Setting SERIO_QUIRK_NOMUX or SERIO_QUIRK_RESET_ALWAYS makes
|
||||
* the keyboard very laggy for ~5 seconds after boot and
|
||||
* sometimes also after resume.
|
||||
* However both are required for the keyboard to not fail
|
||||
* completely sometimes after boot or resume.
|
||||
*/
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "N150CU"),
|
||||
},
|
||||
.driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_ALWAYS |
|
||||
SERIO_QUIRK_NOLOOP | SERIO_QUIRK_NOPNP)
|
||||
.driver_data = (void *)(SERIO_QUIRK_FORCENORESTORE)
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
@ -1685,6 +1687,8 @@ static void __init i8042_check_quirks(void)
|
||||
if (quirks & SERIO_QUIRK_NOPNP)
|
||||
i8042_nopnp = true;
|
||||
#endif
|
||||
if (quirks & SERIO_QUIRK_FORCENORESTORE)
|
||||
i8042_forcenorestore = true;
|
||||
}
|
||||
#else
|
||||
static inline void i8042_check_quirks(void) {}
|
||||
@ -1718,7 +1722,7 @@ static int __init i8042_platform_init(void)
|
||||
|
||||
i8042_check_quirks();
|
||||
|
||||
pr_debug("Active quirks (empty means none):%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
|
||||
pr_debug("Active quirks (empty means none):%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
|
||||
i8042_nokbd ? " nokbd" : "",
|
||||
i8042_noaux ? " noaux" : "",
|
||||
i8042_nomux ? " nomux" : "",
|
||||
@ -1738,10 +1742,11 @@ static int __init i8042_platform_init(void)
|
||||
"",
|
||||
#endif
|
||||
#ifdef CONFIG_PNP
|
||||
i8042_nopnp ? " nopnp" : "");
|
||||
i8042_nopnp ? " nopnp" : "",
|
||||
#else
|
||||
"");
|
||||
"",
|
||||
#endif
|
||||
i8042_forcenorestore ? " forcenorestore" : "");
|
||||
|
||||
retval = i8042_pnp_init();
|
||||
if (retval)
|
||||
|
@ -115,6 +115,10 @@ module_param_named(nopnp, i8042_nopnp, bool, 0);
|
||||
MODULE_PARM_DESC(nopnp, "Do not use PNP to detect controller settings");
|
||||
#endif
|
||||
|
||||
static bool i8042_forcenorestore;
|
||||
module_param_named(forcenorestore, i8042_forcenorestore, bool, 0);
|
||||
MODULE_PARM_DESC(forcenorestore, "Force no restore on s3 resume, copying s2idle behaviour");
|
||||
|
||||
#define DEBUG
|
||||
#ifdef DEBUG
|
||||
static bool i8042_debug;
|
||||
@ -1232,7 +1236,7 @@ static int i8042_pm_suspend(struct device *dev)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (pm_suspend_via_firmware())
|
||||
if (!i8042_forcenorestore && pm_suspend_via_firmware())
|
||||
i8042_controller_reset(true);
|
||||
|
||||
/* Set up serio interrupts for system wakeup. */
|
||||
@ -1248,7 +1252,7 @@ static int i8042_pm_suspend(struct device *dev)
|
||||
|
||||
static int i8042_pm_resume_noirq(struct device *dev)
|
||||
{
|
||||
if (!pm_resume_via_firmware())
|
||||
if (i8042_forcenorestore || !pm_resume_via_firmware())
|
||||
i8042_interrupt(0, NULL);
|
||||
|
||||
return 0;
|
||||
@ -1271,7 +1275,7 @@ static int i8042_pm_resume(struct device *dev)
|
||||
* not restore the controller state to whatever it had been at boot
|
||||
* time, so we do not need to do anything.
|
||||
*/
|
||||
if (!pm_suspend_via_firmware())
|
||||
if (i8042_forcenorestore || !pm_suspend_via_firmware())
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -824,7 +824,7 @@ static void ads7846_read_state(struct ads7846 *ts)
|
||||
m = &ts->msg[msg_idx];
|
||||
error = spi_sync(ts->spi, m);
|
||||
if (error) {
|
||||
dev_err(&ts->spi->dev, "spi_sync --> %d\n", error);
|
||||
dev_err_ratelimited(&ts->spi->dev, "spi_sync --> %d\n", error);
|
||||
packet->ignore = true;
|
||||
return;
|
||||
}
|
||||
|
@ -1474,6 +1474,10 @@ static const struct edt_i2c_chip_data edt_ft6236_data = {
|
||||
.max_support_points = 2,
|
||||
};
|
||||
|
||||
static const struct edt_i2c_chip_data edt_ft8201_data = {
|
||||
.max_support_points = 10,
|
||||
};
|
||||
|
||||
static const struct edt_i2c_chip_data edt_ft8719_data = {
|
||||
.max_support_points = 10,
|
||||
};
|
||||
@ -1485,6 +1489,7 @@ static const struct i2c_device_id edt_ft5x06_ts_id[] = {
|
||||
{ .name = "ft5452", .driver_data = (long)&edt_ft5452_data },
|
||||
/* Note no edt- prefix for compatibility with the ft6236.c driver */
|
||||
{ .name = "ft6236", .driver_data = (long)&edt_ft6236_data },
|
||||
{ .name = "ft8201", .driver_data = (long)&edt_ft8201_data },
|
||||
{ .name = "ft8719", .driver_data = (long)&edt_ft8719_data },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
@ -1500,6 +1505,7 @@ static const struct of_device_id edt_ft5x06_of_match[] = {
|
||||
{ .compatible = "focaltech,ft5452", .data = &edt_ft5452_data },
|
||||
/* Note focaltech vendor prefix for compatibility with ft6236.c */
|
||||
{ .compatible = "focaltech,ft6236", .data = &edt_ft6236_data },
|
||||
{ .compatible = "focaltech,ft8201", .data = &edt_ft8201_data },
|
||||
{ .compatible = "focaltech,ft8719", .data = &edt_ft8719_data },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
@ -130,17 +130,6 @@ static int himax_bus_read(struct himax_ts_data *ts, u32 address, void *dst,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int himax_read_mcu(struct himax_ts_data *ts, u32 address, u32 *dst)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = himax_bus_read(ts, address, dst, sizeof(dst));
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void himax_reset(struct himax_ts_data *ts)
|
||||
{
|
||||
gpiod_set_value_cansleep(ts->gpiod_rst, 1);
|
||||
@ -160,7 +149,8 @@ static int himax_read_product_id(struct himax_ts_data *ts, u32 *product_id)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = himax_read_mcu(ts, HIMAX_REG_ADDR_ICID, product_id);
|
||||
error = himax_bus_read(ts, HIMAX_REG_ADDR_ICID, product_id,
|
||||
sizeof(*product_id));
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
|
@ -3125,13 +3125,13 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf,
|
||||
test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
|
||||
if (!test->highmem) {
|
||||
count = -ENOMEM;
|
||||
goto free_test_buffer;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
if (test->buffer && test->highmem) {
|
||||
#else
|
||||
if (test->buffer) {
|
||||
#endif
|
||||
mutex_lock(&mmc_test_lock);
|
||||
mmc_test_run(test, testcase);
|
||||
mutex_unlock(&mmc_test_lock);
|
||||
@ -3139,6 +3139,7 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf,
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
__free_pages(test->highmem, BUFFER_ORDER);
|
||||
free_test_buffer:
|
||||
#endif
|
||||
kfree(test->buffer);
|
||||
kfree(test);
|
||||
|
@ -3299,6 +3299,10 @@ int dw_mci_probe(struct dw_mci *host)
|
||||
host->biu_clk = devm_clk_get(host->dev, "biu");
|
||||
if (IS_ERR(host->biu_clk)) {
|
||||
dev_dbg(host->dev, "biu clock not available\n");
|
||||
ret = PTR_ERR(host->biu_clk);
|
||||
if (ret == -EPROBE_DEFER)
|
||||
return ret;
|
||||
|
||||
} else {
|
||||
ret = clk_prepare_enable(host->biu_clk);
|
||||
if (ret) {
|
||||
@ -3310,6 +3314,10 @@ int dw_mci_probe(struct dw_mci *host)
|
||||
host->ciu_clk = devm_clk_get(host->dev, "ciu");
|
||||
if (IS_ERR(host->ciu_clk)) {
|
||||
dev_dbg(host->dev, "ciu clock not available\n");
|
||||
ret = PTR_ERR(host->ciu_clk);
|
||||
if (ret == -EPROBE_DEFER)
|
||||
goto err_clk_biu;
|
||||
|
||||
host->bus_hz = host->pdata->bus_hz;
|
||||
} else {
|
||||
ret = clk_prepare_enable(host->ciu_clk);
|
||||
|
@ -1230,7 +1230,7 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
|
||||
}
|
||||
|
||||
if (!sbc_error && !(events & MSDC_INT_CMDRDY)) {
|
||||
if (events & MSDC_INT_CMDTMO ||
|
||||
if ((events & MSDC_INT_CMDTMO && !host->hs400_tuning) ||
|
||||
(!mmc_op_tuning(cmd->opcode) && !host->hs400_tuning))
|
||||
/*
|
||||
* should not clear fifo/interrupt as the tune data
|
||||
@ -1323,9 +1323,9 @@ static void msdc_start_command(struct msdc_host *host,
|
||||
static void msdc_cmd_next(struct msdc_host *host,
|
||||
struct mmc_request *mrq, struct mmc_command *cmd)
|
||||
{
|
||||
if ((cmd->error &&
|
||||
!(cmd->error == -EILSEQ &&
|
||||
(mmc_op_tuning(cmd->opcode) || host->hs400_tuning))) ||
|
||||
if ((cmd->error && !host->hs400_tuning &&
|
||||
!(cmd->error == -EILSEQ &&
|
||||
mmc_op_tuning(cmd->opcode))) ||
|
||||
(mrq->sbc && mrq->sbc->error))
|
||||
msdc_request_done(host, mrq);
|
||||
else if (cmd == mrq->sbc)
|
||||
|
@ -427,6 +427,8 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct net_device *bond_dev = xs->xso.dev;
|
||||
struct net_device *real_dev;
|
||||
netdevice_tracker tracker;
|
||||
struct bond_ipsec *ipsec;
|
||||
struct bonding *bond;
|
||||
struct slave *slave;
|
||||
@ -438,74 +440,80 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs,
|
||||
rcu_read_lock();
|
||||
bond = netdev_priv(bond_dev);
|
||||
slave = rcu_dereference(bond->curr_active_slave);
|
||||
if (!slave) {
|
||||
rcu_read_unlock();
|
||||
return -ENODEV;
|
||||
real_dev = slave ? slave->dev : NULL;
|
||||
netdev_hold(real_dev, &tracker, GFP_ATOMIC);
|
||||
rcu_read_unlock();
|
||||
if (!real_dev) {
|
||||
err = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!slave->dev->xfrmdev_ops ||
|
||||
!slave->dev->xfrmdev_ops->xdo_dev_state_add ||
|
||||
netif_is_bond_master(slave->dev)) {
|
||||
if (!real_dev->xfrmdev_ops ||
|
||||
!real_dev->xfrmdev_ops->xdo_dev_state_add ||
|
||||
netif_is_bond_master(real_dev)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Slave does not support ipsec offload");
|
||||
rcu_read_unlock();
|
||||
return -EINVAL;
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC);
|
||||
ipsec = kmalloc(sizeof(*ipsec), GFP_KERNEL);
|
||||
if (!ipsec) {
|
||||
rcu_read_unlock();
|
||||
return -ENOMEM;
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
xs->xso.real_dev = slave->dev;
|
||||
|
||||
err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs, extack);
|
||||
xs->xso.real_dev = real_dev;
|
||||
err = real_dev->xfrmdev_ops->xdo_dev_state_add(xs, extack);
|
||||
if (!err) {
|
||||
ipsec->xs = xs;
|
||||
INIT_LIST_HEAD(&ipsec->list);
|
||||
spin_lock_bh(&bond->ipsec_lock);
|
||||
mutex_lock(&bond->ipsec_lock);
|
||||
list_add(&ipsec->list, &bond->ipsec_list);
|
||||
spin_unlock_bh(&bond->ipsec_lock);
|
||||
mutex_unlock(&bond->ipsec_lock);
|
||||
} else {
|
||||
kfree(ipsec);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
out:
|
||||
netdev_put(real_dev, &tracker);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void bond_ipsec_add_sa_all(struct bonding *bond)
|
||||
{
|
||||
struct net_device *bond_dev = bond->dev;
|
||||
struct net_device *real_dev;
|
||||
struct bond_ipsec *ipsec;
|
||||
struct slave *slave;
|
||||
|
||||
rcu_read_lock();
|
||||
slave = rcu_dereference(bond->curr_active_slave);
|
||||
if (!slave)
|
||||
goto out;
|
||||
slave = rtnl_dereference(bond->curr_active_slave);
|
||||
real_dev = slave ? slave->dev : NULL;
|
||||
if (!real_dev)
|
||||
return;
|
||||
|
||||
if (!slave->dev->xfrmdev_ops ||
|
||||
!slave->dev->xfrmdev_ops->xdo_dev_state_add ||
|
||||
netif_is_bond_master(slave->dev)) {
|
||||
spin_lock_bh(&bond->ipsec_lock);
|
||||
mutex_lock(&bond->ipsec_lock);
|
||||
if (!real_dev->xfrmdev_ops ||
|
||||
!real_dev->xfrmdev_ops->xdo_dev_state_add ||
|
||||
netif_is_bond_master(real_dev)) {
|
||||
if (!list_empty(&bond->ipsec_list))
|
||||
slave_warn(bond_dev, slave->dev,
|
||||
slave_warn(bond_dev, real_dev,
|
||||
"%s: no slave xdo_dev_state_add\n",
|
||||
__func__);
|
||||
spin_unlock_bh(&bond->ipsec_lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
spin_lock_bh(&bond->ipsec_lock);
|
||||
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
|
||||
ipsec->xs->xso.real_dev = slave->dev;
|
||||
if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) {
|
||||
slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__);
|
||||
/* If new state is added before ipsec_lock acquired */
|
||||
if (ipsec->xs->xso.real_dev == real_dev)
|
||||
continue;
|
||||
|
||||
ipsec->xs->xso.real_dev = real_dev;
|
||||
if (real_dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) {
|
||||
slave_warn(bond_dev, real_dev, "%s: failed to add SA\n", __func__);
|
||||
ipsec->xs->xso.real_dev = NULL;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&bond->ipsec_lock);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
mutex_unlock(&bond->ipsec_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -515,6 +523,8 @@ out:
|
||||
static void bond_ipsec_del_sa(struct xfrm_state *xs)
|
||||
{
|
||||
struct net_device *bond_dev = xs->xso.dev;
|
||||
struct net_device *real_dev;
|
||||
netdevice_tracker tracker;
|
||||
struct bond_ipsec *ipsec;
|
||||
struct bonding *bond;
|
||||
struct slave *slave;
|
||||
@ -525,6 +535,9 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs)
|
||||
rcu_read_lock();
|
||||
bond = netdev_priv(bond_dev);
|
||||
slave = rcu_dereference(bond->curr_active_slave);
|
||||
real_dev = slave ? slave->dev : NULL;
|
||||
netdev_hold(real_dev, &tracker, GFP_ATOMIC);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!slave)
|
||||
goto out;
|
||||
@ -532,18 +545,19 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs)
|
||||
if (!xs->xso.real_dev)
|
||||
goto out;
|
||||
|
||||
WARN_ON(xs->xso.real_dev != slave->dev);
|
||||
WARN_ON(xs->xso.real_dev != real_dev);
|
||||
|
||||
if (!slave->dev->xfrmdev_ops ||
|
||||
!slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
|
||||
netif_is_bond_master(slave->dev)) {
|
||||
slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__);
|
||||
if (!real_dev->xfrmdev_ops ||
|
||||
!real_dev->xfrmdev_ops->xdo_dev_state_delete ||
|
||||
netif_is_bond_master(real_dev)) {
|
||||
slave_warn(bond_dev, real_dev, "%s: no slave xdo_dev_state_delete\n", __func__);
|
||||
goto out;
|
||||
}
|
||||
|
||||
slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs);
|
||||
real_dev->xfrmdev_ops->xdo_dev_state_delete(xs);
|
||||
out:
|
||||
spin_lock_bh(&bond->ipsec_lock);
|
||||
netdev_put(real_dev, &tracker);
|
||||
mutex_lock(&bond->ipsec_lock);
|
||||
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
|
||||
if (ipsec->xs == xs) {
|
||||
list_del(&ipsec->list);
|
||||
@ -551,40 +565,72 @@ out:
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&bond->ipsec_lock);
|
||||
rcu_read_unlock();
|
||||
mutex_unlock(&bond->ipsec_lock);
|
||||
}
|
||||
|
||||
static void bond_ipsec_del_sa_all(struct bonding *bond)
|
||||
{
|
||||
struct net_device *bond_dev = bond->dev;
|
||||
struct net_device *real_dev;
|
||||
struct bond_ipsec *ipsec;
|
||||
struct slave *slave;
|
||||
|
||||
rcu_read_lock();
|
||||
slave = rcu_dereference(bond->curr_active_slave);
|
||||
if (!slave) {
|
||||
rcu_read_unlock();
|
||||
slave = rtnl_dereference(bond->curr_active_slave);
|
||||
real_dev = slave ? slave->dev : NULL;
|
||||
if (!real_dev)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_bh(&bond->ipsec_lock);
|
||||
mutex_lock(&bond->ipsec_lock);
|
||||
list_for_each_entry(ipsec, &bond->ipsec_list, list) {
|
||||
if (!ipsec->xs->xso.real_dev)
|
||||
continue;
|
||||
|
||||
if (!slave->dev->xfrmdev_ops ||
|
||||
!slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
|
||||
netif_is_bond_master(slave->dev)) {
|
||||
slave_warn(bond_dev, slave->dev,
|
||||
if (!real_dev->xfrmdev_ops ||
|
||||
!real_dev->xfrmdev_ops->xdo_dev_state_delete ||
|
||||
netif_is_bond_master(real_dev)) {
|
||||
slave_warn(bond_dev, real_dev,
|
||||
"%s: no slave xdo_dev_state_delete\n",
|
||||
__func__);
|
||||
} else {
|
||||
slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
|
||||
real_dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
|
||||
if (real_dev->xfrmdev_ops->xdo_dev_state_free)
|
||||
real_dev->xfrmdev_ops->xdo_dev_state_free(ipsec->xs);
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&bond->ipsec_lock);
|
||||
mutex_unlock(&bond->ipsec_lock);
|
||||
}
|
||||
|
||||
static void bond_ipsec_free_sa(struct xfrm_state *xs)
|
||||
{
|
||||
struct net_device *bond_dev = xs->xso.dev;
|
||||
struct net_device *real_dev;
|
||||
netdevice_tracker tracker;
|
||||
struct bonding *bond;
|
||||
struct slave *slave;
|
||||
|
||||
if (!bond_dev)
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
bond = netdev_priv(bond_dev);
|
||||
slave = rcu_dereference(bond->curr_active_slave);
|
||||
real_dev = slave ? slave->dev : NULL;
|
||||
netdev_hold(real_dev, &tracker, GFP_ATOMIC);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!slave)
|
||||
goto out;
|
||||
|
||||
if (!xs->xso.real_dev)
|
||||
goto out;
|
||||
|
||||
WARN_ON(xs->xso.real_dev != real_dev);
|
||||
|
||||
if (real_dev && real_dev->xfrmdev_ops &&
|
||||
real_dev->xfrmdev_ops->xdo_dev_state_free)
|
||||
real_dev->xfrmdev_ops->xdo_dev_state_free(xs);
|
||||
out:
|
||||
netdev_put(real_dev, &tracker);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -627,6 +673,7 @@ out:
|
||||
static const struct xfrmdev_ops bond_xfrmdev_ops = {
|
||||
.xdo_dev_state_add = bond_ipsec_add_sa,
|
||||
.xdo_dev_state_delete = bond_ipsec_del_sa,
|
||||
.xdo_dev_state_free = bond_ipsec_free_sa,
|
||||
.xdo_dev_offload_ok = bond_ipsec_offload_ok,
|
||||
};
|
||||
#endif /* CONFIG_XFRM_OFFLOAD */
|
||||
@ -5877,7 +5924,7 @@ void bond_setup(struct net_device *bond_dev)
|
||||
/* set up xfrm device ops (only supported in active-backup right now) */
|
||||
bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
|
||||
INIT_LIST_HEAD(&bond->ipsec_list);
|
||||
spin_lock_init(&bond->ipsec_lock);
|
||||
mutex_init(&bond->ipsec_lock);
|
||||
#endif /* CONFIG_XFRM_OFFLOAD */
|
||||
|
||||
/* don't acquire bond device's netif_tx_lock when transmitting */
|
||||
@ -5926,6 +5973,10 @@ static void bond_uninit(struct net_device *bond_dev)
|
||||
__bond_release_one(bond_dev, slave->dev, true, true);
|
||||
netdev_info(bond_dev, "Released all slaves\n");
|
||||
|
||||
#ifdef CONFIG_XFRM_OFFLOAD
|
||||
mutex_destroy(&bond->ipsec_lock);
|
||||
#endif /* CONFIG_XFRM_OFFLOAD */
|
||||
|
||||
bond_set_slave_arr(bond, NULL, NULL);
|
||||
|
||||
list_del_rcu(&bond->bond_list);
|
||||
|
@ -582,7 +582,7 @@ static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
|
||||
(*processed)++;
|
||||
return true;
|
||||
|
||||
drop:
|
||||
drop:
|
||||
/* Clean rxdes0 (which resets own bit) */
|
||||
rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask);
|
||||
priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
|
||||
@ -666,6 +666,11 @@ static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
|
||||
ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
|
||||
txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
|
||||
|
||||
/* Ensure the descriptor config is visible before setting the tx
|
||||
* pointer.
|
||||
*/
|
||||
smp_wmb();
|
||||
|
||||
priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer);
|
||||
|
||||
return true;
|
||||
@ -819,6 +824,11 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
|
||||
dma_wmb();
|
||||
first->txdes0 = cpu_to_le32(f_ctl_stat);
|
||||
|
||||
/* Ensure the descriptor config is visible before setting the tx
|
||||
* pointer.
|
||||
*/
|
||||
smp_wmb();
|
||||
|
||||
/* Update next TX pointer */
|
||||
priv->tx_pointer = pointer;
|
||||
|
||||
@ -839,7 +849,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
dma_err:
|
||||
dma_err:
|
||||
if (net_ratelimit())
|
||||
netdev_err(netdev, "map tx fragment failed\n");
|
||||
|
||||
@ -861,7 +871,7 @@ static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
|
||||
* last fragment, so we know ftgmac100_free_tx_packet()
|
||||
* hasn't freed the skb yet.
|
||||
*/
|
||||
drop:
|
||||
drop:
|
||||
/* Drop the packet */
|
||||
dev_kfree_skb_any(skb);
|
||||
netdev->stats.tx_dropped++;
|
||||
@ -1354,7 +1364,7 @@ static void ftgmac100_reset(struct ftgmac100 *priv)
|
||||
ftgmac100_init_all(priv, true);
|
||||
|
||||
netdev_dbg(netdev, "Reset done !\n");
|
||||
bail:
|
||||
bail:
|
||||
if (priv->mii_bus)
|
||||
mutex_unlock(&priv->mii_bus->mdio_lock);
|
||||
if (netdev->phydev)
|
||||
@ -1554,16 +1564,16 @@ static int ftgmac100_open(struct net_device *netdev)
|
||||
|
||||
return 0;
|
||||
|
||||
err_ncsi:
|
||||
err_ncsi:
|
||||
phy_stop(netdev->phydev);
|
||||
napi_disable(&priv->napi);
|
||||
netif_stop_queue(netdev);
|
||||
err_alloc:
|
||||
err_alloc:
|
||||
ftgmac100_free_buffers(priv);
|
||||
free_irq(netdev->irq, netdev);
|
||||
err_irq:
|
||||
err_irq:
|
||||
netif_napi_del(&priv->napi);
|
||||
err_hw:
|
||||
err_hw:
|
||||
iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
|
||||
ftgmac100_free_rings(priv);
|
||||
return err;
|
||||
|
@ -52,32 +52,6 @@ static int mana_hwc_verify_resp_msg(const struct hwc_caller_ctx *caller_ctx,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
|
||||
const struct gdma_resp_hdr *resp_msg)
|
||||
{
|
||||
struct hwc_caller_ctx *ctx;
|
||||
int err;
|
||||
|
||||
if (!test_bit(resp_msg->response.hwc_msg_id,
|
||||
hwc->inflight_msg_res.map)) {
|
||||
dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
|
||||
resp_msg->response.hwc_msg_id);
|
||||
return;
|
||||
}
|
||||
|
||||
ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
|
||||
err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
ctx->status_code = resp_msg->status;
|
||||
|
||||
memcpy(ctx->output_buf, resp_msg, resp_len);
|
||||
out:
|
||||
ctx->error = err;
|
||||
complete(&ctx->comp_event);
|
||||
}
|
||||
|
||||
static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
|
||||
struct hwc_work_request *req)
|
||||
{
|
||||
@ -101,6 +75,40 @@ static int mana_hwc_post_rx_wqe(const struct hwc_wq *hwc_rxq,
|
||||
return err;
|
||||
}
|
||||
|
||||
static void mana_hwc_handle_resp(struct hw_channel_context *hwc, u32 resp_len,
|
||||
struct hwc_work_request *rx_req)
|
||||
{
|
||||
const struct gdma_resp_hdr *resp_msg = rx_req->buf_va;
|
||||
struct hwc_caller_ctx *ctx;
|
||||
int err;
|
||||
|
||||
if (!test_bit(resp_msg->response.hwc_msg_id,
|
||||
hwc->inflight_msg_res.map)) {
|
||||
dev_err(hwc->dev, "hwc_rx: invalid msg_id = %u\n",
|
||||
resp_msg->response.hwc_msg_id);
|
||||
mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
|
||||
return;
|
||||
}
|
||||
|
||||
ctx = hwc->caller_ctx + resp_msg->response.hwc_msg_id;
|
||||
err = mana_hwc_verify_resp_msg(ctx, resp_msg, resp_len);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
ctx->status_code = resp_msg->status;
|
||||
|
||||
memcpy(ctx->output_buf, resp_msg, resp_len);
|
||||
out:
|
||||
ctx->error = err;
|
||||
|
||||
/* Must post rx wqe before complete(), otherwise the next rx may
|
||||
* hit no_wqe error.
|
||||
*/
|
||||
mana_hwc_post_rx_wqe(hwc->rxq, rx_req);
|
||||
|
||||
complete(&ctx->comp_event);
|
||||
}
|
||||
|
||||
static void mana_hwc_init_event_handler(void *ctx, struct gdma_queue *q_self,
|
||||
struct gdma_event *event)
|
||||
{
|
||||
@ -235,14 +243,12 @@ static void mana_hwc_rx_event_handler(void *ctx, u32 gdma_rxq_id,
|
||||
return;
|
||||
}
|
||||
|
||||
mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, resp);
|
||||
mana_hwc_handle_resp(hwc, rx_oob->tx_oob_data_size, rx_req);
|
||||
|
||||
/* Do no longer use 'resp', because the buffer is posted to the HW
|
||||
* in the below mana_hwc_post_rx_wqe().
|
||||
/* Can no longer use 'resp', because the buffer is posted to the HW
|
||||
* in mana_hwc_handle_resp() above.
|
||||
*/
|
||||
resp = NULL;
|
||||
|
||||
mana_hwc_post_rx_wqe(hwc_rxq, rx_req);
|
||||
}
|
||||
|
||||
static void mana_hwc_tx_event_handler(void *ctx, u32 gdma_txq_id,
|
||||
|
@ -32,7 +32,7 @@
|
||||
#define IONIC_ADMIN_DOORBELL_DEADLINE (HZ / 2) /* 500ms */
|
||||
#define IONIC_TX_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
|
||||
#define IONIC_RX_MIN_DOORBELL_DEADLINE (HZ / 100) /* 10ms */
|
||||
#define IONIC_RX_MAX_DOORBELL_DEADLINE (HZ * 5) /* 5s */
|
||||
#define IONIC_RX_MAX_DOORBELL_DEADLINE (HZ * 4) /* 4s */
|
||||
|
||||
struct ionic_dev_bar {
|
||||
void __iomem *vaddr;
|
||||
|
@ -3220,7 +3220,7 @@ int ionic_lif_alloc(struct ionic *ionic)
|
||||
netdev->netdev_ops = &ionic_netdev_ops;
|
||||
ionic_ethtool_set_ops(netdev);
|
||||
|
||||
netdev->watchdog_timeo = 2 * HZ;
|
||||
netdev->watchdog_timeo = 5 * HZ;
|
||||
netif_carrier_off(netdev);
|
||||
|
||||
lif->identity = lid;
|
||||
|
@ -1459,6 +1459,7 @@ static const struct prueth_pdata am654_icssg_pdata = {
|
||||
|
||||
static const struct prueth_pdata am64x_icssg_pdata = {
|
||||
.fdqring_mode = K3_RINGACC_RING_MODE_RING,
|
||||
.quirk_10m_link_issue = 1,
|
||||
.switch_mode = 1,
|
||||
};
|
||||
|
||||
|
@ -1653,7 +1653,7 @@ static struct sock *gtp_encap_enable_socket(int fd, int type,
|
||||
sock = sockfd_lookup(fd, &err);
|
||||
if (!sock) {
|
||||
pr_debug("gtp socket fd=%d not found\n", fd);
|
||||
return NULL;
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
sk = sock->sk;
|
||||
|
@ -725,22 +725,25 @@ int iwl_acpi_get_wgds_table(struct iwl_fw_runtime *fwrt)
|
||||
entry = &wifi_pkg->package.elements[entry_idx];
|
||||
entry_idx++;
|
||||
if (entry->type != ACPI_TYPE_INTEGER ||
|
||||
entry->integer.value > num_profiles) {
|
||||
entry->integer.value > num_profiles ||
|
||||
entry->integer.value <
|
||||
rev_data[idx].min_profiles) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
num_profiles = entry->integer.value;
|
||||
|
||||
/*
|
||||
* this also validates >= min_profiles since we
|
||||
* otherwise wouldn't have gotten the data when
|
||||
* looking up in ACPI
|
||||
* Check to see if we received package count
|
||||
* same as max # of profiles
|
||||
*/
|
||||
if (wifi_pkg->package.count !=
|
||||
hdr_size + profile_size * num_profiles) {
|
||||
ret = -EINVAL;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
/* Number of valid profiles */
|
||||
num_profiles = entry->integer.value;
|
||||
}
|
||||
goto read_table;
|
||||
}
|
||||
|
@ -3348,7 +3348,7 @@ void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt,
|
||||
{
|
||||
int ret __maybe_unused = 0;
|
||||
|
||||
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status))
|
||||
if (!iwl_trans_fw_running(fwrt->trans))
|
||||
return;
|
||||
|
||||
if (fw_has_capa(&fwrt->fw->ucode_capa,
|
||||
|
@ -85,6 +85,10 @@ struct iwl_cfg;
|
||||
* May sleep
|
||||
* @wimax_active: invoked when WiMax becomes active. May sleep
|
||||
* @time_point: called when transport layer wants to collect debug data
|
||||
* @device_powered_off: called upon resume from hibernation but not only.
|
||||
* Op_mode needs to reset its internal state because the device did not
|
||||
* survive the system state transition. The firmware is no longer running,
|
||||
* etc...
|
||||
*/
|
||||
struct iwl_op_mode_ops {
|
||||
struct iwl_op_mode *(*start)(struct iwl_trans *trans,
|
||||
@ -107,6 +111,7 @@ struct iwl_op_mode_ops {
|
||||
void (*time_point)(struct iwl_op_mode *op_mode,
|
||||
enum iwl_fw_ini_time_point tp_id,
|
||||
union iwl_dbg_tlv_tp_data *tp_data);
|
||||
void (*device_powered_off)(struct iwl_op_mode *op_mode);
|
||||
};
|
||||
|
||||
int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
|
||||
@ -204,4 +209,11 @@ static inline void iwl_op_mode_time_point(struct iwl_op_mode *op_mode,
|
||||
op_mode->ops->time_point(op_mode, tp_id, tp_data);
|
||||
}
|
||||
|
||||
static inline void iwl_op_mode_device_powered_off(struct iwl_op_mode *op_mode)
|
||||
{
|
||||
if (!op_mode || !op_mode->ops || !op_mode->ops->device_powered_off)
|
||||
return;
|
||||
op_mode->ops->device_powered_off(op_mode);
|
||||
}
|
||||
|
||||
#endif /* __iwl_op_mode_h__ */
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user