Merge branch 'i2c/for-current-fixed' into i2c/for-4.18

This commit is contained in:
Wolfram Sang 2018-05-15 10:41:01 +02:00
commit e6218bf390
338 changed files with 2218 additions and 1252 deletions

View File

@ -557,6 +557,14 @@ A: Although LLVM IR generation and optimization try to stay architecture
pulls in some header files containing file scope host assembly codes. pulls in some header files containing file scope host assembly codes.
- You can add "-fno-jump-tables" to work around the switch table issue. - You can add "-fno-jump-tables" to work around the switch table issue.
Otherwise, you can use bpf target. Otherwise, you can use bpf target. Additionally, you _must_ use bpf target
when:
- Your program uses data structures with pointer or long / unsigned long
types that interface with BPF helpers or context data structures. Access
into these structures is verified by the BPF verifier and may result
in verification failures if the native architecture is not aligned with
the BPF architecture, e.g. 64-bit. An example of this is
BPF_PROG_TYPE_SK_MSG require '-target bpf'
Happy BPF hacking! Happy BPF hacking!

View File

@ -4,6 +4,13 @@ Required properties:
- compatible: - compatible:
atmel,maxtouch atmel,maxtouch
The following compatibles have been used in various products but are
deprecated:
atmel,qt602240_ts
atmel,atmel_mxt_ts
atmel,atmel_mxt_tp
atmel,mXT224
- reg: The I2C address of the device - reg: The I2C address of the device
- interrupts: The sink for the touchpad's IRQ output - interrupts: The sink for the touchpad's IRQ output

View File

@ -177,14 +177,14 @@ BUGS
**** ****
Report bugs to Mauro Carvalho Chehab <mchehab@s-opensource.com> Report bugs to Mauro Carvalho Chehab <mchehab@kernel.org>
COPYRIGHT COPYRIGHT
********* *********
Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab@s-opensource.com>. Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab+samsung@kernel.org>.
License GPLv2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>. License GPLv2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>.

View File

@ -7,7 +7,7 @@ file: uapi/v4l/keytable.c
/* keytable.c - This program allows checking/replacing keys at IR /* keytable.c - This program allows checking/replacing keys at IR
Copyright (C) 2006-2009 Mauro Carvalho Chehab <mchehab@infradead.org> Copyright (C) 2006-2009 Mauro Carvalho Chehab <mchehab@kernel.org>
This program is free software; you can redistribute it and/or modify This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by it under the terms of the GNU General Public License as published by

View File

@ -6,7 +6,7 @@ file: media/v4l/v4l2grab.c
.. code-block:: c .. code-block:: c
/* V4L2 video picture grabber /* V4L2 video picture grabber
Copyright (C) 2009 Mauro Carvalho Chehab <mchehab@infradead.org> Copyright (C) 2009 Mauro Carvalho Chehab <mchehab@kernel.org>
This program is free software; you can redistribute it and/or modify This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by it under the terms of the GNU General Public License as published by

View File

@ -387,11 +387,11 @@ tree for more details.
=head1 BUGS =head1 BUGS
Report bugs to Mauro Carvalho Chehab <mchehab@s-opensource.com> Report bugs to Mauro Carvalho Chehab <mchehab@kernel.org>
=head1 COPYRIGHT =head1 COPYRIGHT
Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab@s-opensource.com>. Copyright (c) 2016 by Mauro Carvalho Chehab <mchehab+samsung@kernel.org>.
License GPLv2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>. License GPLv2: GNU GPL version 2 <http://gnu.org/licenses/gpl.html>.

View File

@ -6,7 +6,7 @@ communicating in English you can also ask the Chinese maintainer for
help. Contact the Chinese maintainer if this translation is outdated help. Contact the Chinese maintainer if this translation is outdated
or if there is a problem with the translation. or if there is a problem with the translation.
Maintainer: Mauro Carvalho Chehab <mchehab@infradead.org> Maintainer: Mauro Carvalho Chehab <mchehab@kernel.org>
Chinese maintainer: Fu Wei <tekkamanninja@gmail.com> Chinese maintainer: Fu Wei <tekkamanninja@gmail.com>
--------------------------------------------------------------------- ---------------------------------------------------------------------
Documentation/video4linux/v4l2-framework.txt 的中文翻译 Documentation/video4linux/v4l2-framework.txt 的中文翻译
@ -14,7 +14,7 @@ Documentation/video4linux/v4l2-framework.txt 的中文翻译
如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文 如果想评论或更新本文的内容,请直接联系原文档的维护者。如果你使用英文
交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻 交流有困难的话,也可以向中文版维护者求助。如果本翻译更新不及时或者翻
译存在问题,请联系中文版维护者。 译存在问题,请联系中文版维护者。
英文版维护者: Mauro Carvalho Chehab <mchehab@infradead.org> 英文版维护者: Mauro Carvalho Chehab <mchehab@kernel.org>
中文版维护者: 傅炜 Fu Wei <tekkamanninja@gmail.com> 中文版维护者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
中文版翻译者: 傅炜 Fu Wei <tekkamanninja@gmail.com> 中文版翻译者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
中文版校译者: 傅炜 Fu Wei <tekkamanninja@gmail.com> 中文版校译者: 傅炜 Fu Wei <tekkamanninja@gmail.com>

View File

@ -2554,7 +2554,6 @@ F: Documentation/devicetree/bindings/sound/axentia,*
F: sound/soc/atmel/tse850-pcm5142.c F: sound/soc/atmel/tse850-pcm5142.c
AZ6007 DVB DRIVER AZ6007 DVB DRIVER
M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
M: Mauro Carvalho Chehab <mchehab@kernel.org> M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org L: linux-media@vger.kernel.org
W: https://linuxtv.org W: https://linuxtv.org
@ -3083,7 +3082,6 @@ F: include/linux/btrfs*
F: include/uapi/linux/btrfs* F: include/uapi/linux/btrfs*
BTTV VIDEO4LINUX DRIVER BTTV VIDEO4LINUX DRIVER
M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
M: Mauro Carvalho Chehab <mchehab@kernel.org> M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org L: linux-media@vger.kernel.org
W: https://linuxtv.org W: https://linuxtv.org
@ -3812,7 +3810,6 @@ S: Maintained
F: drivers/media/dvb-frontends/cx24120* F: drivers/media/dvb-frontends/cx24120*
CX88 VIDEO4LINUX DRIVER CX88 VIDEO4LINUX DRIVER
M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
M: Mauro Carvalho Chehab <mchehab@kernel.org> M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org L: linux-media@vger.kernel.org
W: https://linuxtv.org W: https://linuxtv.org
@ -5053,7 +5050,6 @@ F: drivers/edac/thunderx_edac*
EDAC-CORE EDAC-CORE
M: Borislav Petkov <bp@alien8.de> M: Borislav Petkov <bp@alien8.de>
M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
M: Mauro Carvalho Chehab <mchehab@kernel.org> M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-edac@vger.kernel.org L: linux-edac@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next T: git git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp.git for-next
@ -5082,7 +5078,6 @@ S: Maintained
F: drivers/edac/fsl_ddr_edac.* F: drivers/edac/fsl_ddr_edac.*
EDAC-GHES EDAC-GHES
M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
M: Mauro Carvalho Chehab <mchehab@kernel.org> M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-edac@vger.kernel.org L: linux-edac@vger.kernel.org
S: Maintained S: Maintained
@ -5099,21 +5094,18 @@ S: Maintained
F: drivers/edac/i5000_edac.c F: drivers/edac/i5000_edac.c
EDAC-I5400 EDAC-I5400
M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
M: Mauro Carvalho Chehab <mchehab@kernel.org> M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-edac@vger.kernel.org L: linux-edac@vger.kernel.org
S: Maintained S: Maintained
F: drivers/edac/i5400_edac.c F: drivers/edac/i5400_edac.c
EDAC-I7300 EDAC-I7300
M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
M: Mauro Carvalho Chehab <mchehab@kernel.org> M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-edac@vger.kernel.org L: linux-edac@vger.kernel.org
S: Maintained S: Maintained
F: drivers/edac/i7300_edac.c F: drivers/edac/i7300_edac.c
EDAC-I7CORE EDAC-I7CORE
M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
M: Mauro Carvalho Chehab <mchehab@kernel.org> M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-edac@vger.kernel.org L: linux-edac@vger.kernel.org
S: Maintained S: Maintained
@ -5163,7 +5155,6 @@ S: Maintained
F: drivers/edac/r82600_edac.c F: drivers/edac/r82600_edac.c
EDAC-SBRIDGE EDAC-SBRIDGE
M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
M: Mauro Carvalho Chehab <mchehab@kernel.org> M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-edac@vger.kernel.org L: linux-edac@vger.kernel.org
S: Maintained S: Maintained
@ -5222,7 +5213,6 @@ S: Maintained
F: drivers/net/ethernet/ibm/ehea/ F: drivers/net/ethernet/ibm/ehea/
EM28XX VIDEO4LINUX DRIVER EM28XX VIDEO4LINUX DRIVER
M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
M: Mauro Carvalho Chehab <mchehab@kernel.org> M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org L: linux-media@vger.kernel.org
W: https://linuxtv.org W: https://linuxtv.org
@ -7677,9 +7667,11 @@ L: linux-kbuild@vger.kernel.org
S: Maintained S: Maintained
F: Documentation/kbuild/ F: Documentation/kbuild/
F: Makefile F: Makefile
F: scripts/Makefile.* F: scripts/Kbuild*
F: scripts/Makefile*
F: scripts/basic/ F: scripts/basic/
F: scripts/mk* F: scripts/mk*
F: scripts/mod/
F: scripts/package/ F: scripts/package/
KERNEL JANITORS KERNEL JANITORS
@ -8871,7 +8863,6 @@ F: Documentation/devicetree/bindings/media/nvidia,tegra-vde.txt
F: drivers/staging/media/tegra-vde/ F: drivers/staging/media/tegra-vde/
MEDIA INPUT INFRASTRUCTURE (V4L/DVB) MEDIA INPUT INFRASTRUCTURE (V4L/DVB)
M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
M: Mauro Carvalho Chehab <mchehab@kernel.org> M: Mauro Carvalho Chehab <mchehab@kernel.org>
P: LinuxTV.org Project P: LinuxTV.org Project
L: linux-media@vger.kernel.org L: linux-media@vger.kernel.org
@ -9725,6 +9716,7 @@ W: https://fedorahosted.org/dropwatch/
F: net/core/drop_monitor.c F: net/core/drop_monitor.c
NETWORKING DRIVERS NETWORKING DRIVERS
M: "David S. Miller" <davem@davemloft.net>
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
W: http://www.linuxfoundation.org/en/Net W: http://www.linuxfoundation.org/en/Net
Q: http://patchwork.ozlabs.org/project/netdev/list/ Q: http://patchwork.ozlabs.org/project/netdev/list/
@ -12259,7 +12251,6 @@ S: Odd Fixes
F: drivers/media/i2c/saa6588* F: drivers/media/i2c/saa6588*
SAA7134 VIDEO4LINUX DRIVER SAA7134 VIDEO4LINUX DRIVER
M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
M: Mauro Carvalho Chehab <mchehab@kernel.org> M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org L: linux-media@vger.kernel.org
W: https://linuxtv.org W: https://linuxtv.org
@ -12498,6 +12489,7 @@ F: drivers/scsi/st_*.h
SCTP PROTOCOL SCTP PROTOCOL
M: Vlad Yasevich <vyasevich@gmail.com> M: Vlad Yasevich <vyasevich@gmail.com>
M: Neil Horman <nhorman@tuxdriver.com> M: Neil Horman <nhorman@tuxdriver.com>
M: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
L: linux-sctp@vger.kernel.org L: linux-sctp@vger.kernel.org
W: http://lksctp.sourceforge.net W: http://lksctp.sourceforge.net
S: Maintained S: Maintained
@ -12763,7 +12755,6 @@ S: Maintained
F: drivers/media/radio/si4713/radio-usb-si4713.c F: drivers/media/radio/si4713/radio-usb-si4713.c
SIANO DVB DRIVER SIANO DVB DRIVER
M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
M: Mauro Carvalho Chehab <mchehab@kernel.org> M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org L: linux-media@vger.kernel.org
W: https://linuxtv.org W: https://linuxtv.org
@ -13754,7 +13745,6 @@ S: Maintained
F: drivers/media/i2c/tda9840* F: drivers/media/i2c/tda9840*
TEA5761 TUNER DRIVER TEA5761 TUNER DRIVER
M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
M: Mauro Carvalho Chehab <mchehab@kernel.org> M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org L: linux-media@vger.kernel.org
W: https://linuxtv.org W: https://linuxtv.org
@ -13763,7 +13753,6 @@ S: Odd fixes
F: drivers/media/tuners/tea5761.* F: drivers/media/tuners/tea5761.*
TEA5767 TUNER DRIVER TEA5767 TUNER DRIVER
M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
M: Mauro Carvalho Chehab <mchehab@kernel.org> M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org L: linux-media@vger.kernel.org
W: https://linuxtv.org W: https://linuxtv.org
@ -13853,7 +13842,6 @@ S: Supported
F: drivers/iommu/tegra* F: drivers/iommu/tegra*
TEGRA KBC DRIVER TEGRA KBC DRIVER
M: Rakesh Iyer <riyer@nvidia.com>
M: Laxman Dewangan <ldewangan@nvidia.com> M: Laxman Dewangan <ldewangan@nvidia.com>
S: Supported S: Supported
F: drivers/input/keyboard/tegra-kbc.c F: drivers/input/keyboard/tegra-kbc.c
@ -14180,7 +14168,6 @@ F: Documentation/networking/tlan.txt
F: drivers/net/ethernet/ti/tlan.* F: drivers/net/ethernet/ti/tlan.*
TM6000 VIDEO4LINUX DRIVER TM6000 VIDEO4LINUX DRIVER
M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
M: Mauro Carvalho Chehab <mchehab@kernel.org> M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org L: linux-media@vger.kernel.org
W: https://linuxtv.org W: https://linuxtv.org
@ -15407,7 +15394,6 @@ S: Maintained
F: arch/x86/entry/vdso/ F: arch/x86/entry/vdso/
XC2028/3028 TUNER DRIVER XC2028/3028 TUNER DRIVER
M: Mauro Carvalho Chehab <mchehab@s-opensource.com>
M: Mauro Carvalho Chehab <mchehab@kernel.org> M: Mauro Carvalho Chehab <mchehab@kernel.org>
L: linux-media@vger.kernel.org L: linux-media@vger.kernel.org
W: https://linuxtv.org W: https://linuxtv.org

View File

@ -2,8 +2,8 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 17 PATCHLEVEL = 17
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc3 EXTRAVERSION = -rc4
NAME = Fearless Coyote NAME = Merciless Moray
# *DOCUMENTATION* # *DOCUMENTATION*
# To see a list of typical targets execute "make help" # To see a list of typical targets execute "make help"

View File

@ -333,7 +333,7 @@ static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
} else { } else {
u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
sctlr |= (1 << 25); sctlr |= (1 << 25);
vcpu_write_sys_reg(vcpu, SCTLR_EL1, sctlr); vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
} }
} }

View File

@ -18,11 +18,20 @@
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/irqchip/arm-gic.h> #include <linux/irqchip/arm-gic.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/swab.h>
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
#include <asm/kvm_hyp.h> #include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
static bool __hyp_text __is_be(struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu))
return !!(read_sysreg_el2(spsr) & COMPAT_PSR_E_BIT);
return !!(read_sysreg(SCTLR_EL1) & SCTLR_ELx_EE);
}
/* /*
* __vgic_v2_perform_cpuif_access -- perform a GICV access on behalf of the * __vgic_v2_perform_cpuif_access -- perform a GICV access on behalf of the
* guest. * guest.
@ -64,14 +73,19 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
addr += fault_ipa - vgic->vgic_cpu_base; addr += fault_ipa - vgic->vgic_cpu_base;
if (kvm_vcpu_dabt_iswrite(vcpu)) { if (kvm_vcpu_dabt_iswrite(vcpu)) {
u32 data = vcpu_data_guest_to_host(vcpu, u32 data = vcpu_get_reg(vcpu, rd);
vcpu_get_reg(vcpu, rd), if (__is_be(vcpu)) {
sizeof(u32)); /* guest pre-swabbed data, undo this for writel() */
data = swab32(data);
}
writel_relaxed(data, addr); writel_relaxed(data, addr);
} else { } else {
u32 data = readl_relaxed(addr); u32 data = readl_relaxed(addr);
vcpu_set_reg(vcpu, rd, vcpu_data_host_to_guest(vcpu, data, if (__is_be(vcpu)) {
sizeof(u32))); /* guest expects swabbed data */
data = swab32(data);
}
vcpu_set_reg(vcpu, rd, data);
} }
return 1; return 1;

View File

@ -216,6 +216,12 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
memcpy((void *) dst, src, count); memcpy((void *) dst, src, count);
} }
static inline void memset_io(volatile void __iomem *addr, int value,
size_t size)
{
memset((void __force *)addr, value, size);
}
#define PCI_IO_ADDR (volatile void __iomem *) #define PCI_IO_ADDR (volatile void __iomem *)
/* /*

View File

@ -199,3 +199,4 @@ csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
memcpy(dst, src, len); memcpy(dst, src, len);
return csum_partial(dst, len, sum); return csum_partial(dst, len, sum);
} }
EXPORT_SYMBOL(csum_partial_copy_nocheck);

View File

@ -123,6 +123,9 @@ INSTALL_TARGETS = zinstall install
PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS) PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
# Default kernel to build
all: bzImage
zImage: vmlinuz zImage: vmlinuz
Image: vmlinux Image: vmlinux

View File

@ -448,7 +448,8 @@ static int match_by_id(struct device * dev, void * data)
* Checks all the children of @parent for a matching @id. If none * Checks all the children of @parent for a matching @id. If none
* found, it allocates a new device and returns it. * found, it allocates a new device and returns it.
*/ */
static struct parisc_device * alloc_tree_node(struct device *parent, char id) static struct parisc_device * __init alloc_tree_node(
struct device *parent, char id)
{ {
struct match_id_data d = { struct match_id_data d = {
.id = id, .id = id,
@ -825,8 +826,8 @@ static void walk_lower_bus(struct parisc_device *dev)
* devices which are not physically connected (such as extra serial & * devices which are not physically connected (such as extra serial &
* keyboard ports). This problem is not yet solved. * keyboard ports). This problem is not yet solved.
*/ */
static void walk_native_bus(unsigned long io_io_low, unsigned long io_io_high, static void __init walk_native_bus(unsigned long io_io_low,
struct device *parent) unsigned long io_io_high, struct device *parent)
{ {
int i, devices_found = 0; int i, devices_found = 0;
unsigned long hpa = io_io_low; unsigned long hpa = io_io_low;

View File

@ -174,7 +174,7 @@ void pcibios_set_master(struct pci_dev *dev)
* pcibios_init_bridge() initializes cache line and default latency * pcibios_init_bridge() initializes cache line and default latency
* for pci controllers and pci-pci bridges * for pci controllers and pci-pci bridges
*/ */
void __init pcibios_init_bridge(struct pci_dev *dev) void __ref pcibios_init_bridge(struct pci_dev *dev)
{ {
unsigned short bridge_ctl, bridge_ctl_new; unsigned short bridge_ctl, bridge_ctl_new;

View File

@ -205,7 +205,7 @@ static int __init rtc_init(void)
device_initcall(rtc_init); device_initcall(rtc_init);
#endif #endif
void read_persistent_clock(struct timespec *ts) void read_persistent_clock64(struct timespec64 *ts)
{ {
static struct pdc_tod tod_data; static struct pdc_tod tod_data;
if (pdc_tod_read(&tod_data) == 0) { if (pdc_tod_read(&tod_data) == 0) {

View File

@ -837,6 +837,17 @@ void __init initialize_ivt(const void *iva)
if (pdc_instr(&instr) == PDC_OK) if (pdc_instr(&instr) == PDC_OK)
ivap[0] = instr; ivap[0] = instr;
/*
* Rules for the checksum of the HPMC handler:
* 1. The IVA does not point to PDC/PDH space (ie: the OS has installed
* its own IVA).
* 2. The word at IVA + 32 is nonzero.
* 3. If Length (IVA + 60) is not zero, then Length (IVA + 60) and
* Address (IVA + 56) are word-aligned.
* 4. The checksum of the 8 words starting at IVA + 32 plus the sum of
* the Length/4 words starting at Address is zero.
*/
/* Compute Checksum for HPMC handler */ /* Compute Checksum for HPMC handler */
length = os_hpmc_size; length = os_hpmc_size;
ivap[7] = length; ivap[7] = length;

View File

@ -516,7 +516,7 @@ static void __init map_pages(unsigned long start_vaddr,
} }
} }
void free_initmem(void) void __ref free_initmem(void)
{ {
unsigned long init_begin = (unsigned long)__init_begin; unsigned long init_begin = (unsigned long)__init_begin;
unsigned long init_end = (unsigned long)__init_end; unsigned long init_end = (unsigned long)__init_end;

View File

@ -3,7 +3,7 @@
* *
* This program is free software: you can redistribute it and/or modify * This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by * it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or * the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version. * (at your option) any later version.
* *
* This program is distributed in the hope that it will be useful, * This program is distributed in the hope that it will be useful,

View File

@ -403,7 +403,7 @@ static struct vio_dev *vio_create_one(struct mdesc_handle *hp, u64 mp,
if (err) { if (err) {
printk(KERN_ERR "VIO: Could not register device %s, err=%d\n", printk(KERN_ERR "VIO: Could not register device %s, err=%d\n",
dev_name(&vdev->dev), err); dev_name(&vdev->dev), err);
kfree(vdev); put_device(&vdev->dev);
return NULL; return NULL;
} }
if (vdev->dp) if (vdev->dp)

View File

@ -848,6 +848,11 @@ void get_cpu_cap(struct cpuinfo_x86 *c)
c->x86_power = edx; c->x86_power = edx;
} }
if (c->extended_cpuid_level >= 0x80000008) {
cpuid(0x80000008, &eax, &ebx, &ecx, &edx);
c->x86_capability[CPUID_8000_0008_EBX] = ebx;
}
if (c->extended_cpuid_level >= 0x8000000a) if (c->extended_cpuid_level >= 0x8000000a)
c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a); c->x86_capability[CPUID_8000_000A_EDX] = cpuid_edx(0x8000000a);
@ -871,7 +876,6 @@ static void get_cpu_address_sizes(struct cpuinfo_x86 *c)
c->x86_virt_bits = (eax >> 8) & 0xff; c->x86_virt_bits = (eax >> 8) & 0xff;
c->x86_phys_bits = eax & 0xff; c->x86_phys_bits = eax & 0xff;
c->x86_capability[CPUID_8000_0008_EBX] = ebx;
} }
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36))

View File

@ -1067,6 +1067,7 @@ static struct clocksource clocksource_tsc_early = {
.resume = tsc_resume, .resume = tsc_resume,
.mark_unstable = tsc_cs_mark_unstable, .mark_unstable = tsc_cs_mark_unstable,
.tick_stable = tsc_cs_tick_stable, .tick_stable = tsc_cs_tick_stable,
.list = LIST_HEAD_INIT(clocksource_tsc_early.list),
}; };
/* /*
@ -1086,6 +1087,7 @@ static struct clocksource clocksource_tsc = {
.resume = tsc_resume, .resume = tsc_resume,
.mark_unstable = tsc_cs_mark_unstable, .mark_unstable = tsc_cs_mark_unstable,
.tick_stable = tsc_cs_tick_stable, .tick_stable = tsc_cs_tick_stable,
.list = LIST_HEAD_INIT(clocksource_tsc.list),
}; };
void mark_tsc_unstable(char *reason) void mark_tsc_unstable(char *reason)
@ -1098,13 +1100,9 @@ void mark_tsc_unstable(char *reason)
clear_sched_clock_stable(); clear_sched_clock_stable();
disable_sched_clock_irqtime(); disable_sched_clock_irqtime();
pr_info("Marking TSC unstable due to %s\n", reason); pr_info("Marking TSC unstable due to %s\n", reason);
/* Change only the rating, when not registered */
if (clocksource_tsc.mult) { clocksource_mark_unstable(&clocksource_tsc_early);
clocksource_mark_unstable(&clocksource_tsc); clocksource_mark_unstable(&clocksource_tsc);
} else {
clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE;
clocksource_tsc.rating = 0;
}
} }
EXPORT_SYMBOL_GPL(mark_tsc_unstable); EXPORT_SYMBOL_GPL(mark_tsc_unstable);
@ -1244,7 +1242,7 @@ static void tsc_refine_calibration_work(struct work_struct *work)
/* Don't bother refining TSC on unstable systems */ /* Don't bother refining TSC on unstable systems */
if (tsc_unstable) if (tsc_unstable)
return; goto unreg;
/* /*
* Since the work is started early in boot, we may be * Since the work is started early in boot, we may be
@ -1297,11 +1295,12 @@ static void tsc_refine_calibration_work(struct work_struct *work)
out: out:
if (tsc_unstable) if (tsc_unstable)
return; goto unreg;
if (boot_cpu_has(X86_FEATURE_ART)) if (boot_cpu_has(X86_FEATURE_ART))
art_related_clocksource = &clocksource_tsc; art_related_clocksource = &clocksource_tsc;
clocksource_register_khz(&clocksource_tsc, tsc_khz); clocksource_register_khz(&clocksource_tsc, tsc_khz);
unreg:
clocksource_unregister(&clocksource_tsc_early); clocksource_unregister(&clocksource_tsc_early);
} }
@ -1311,8 +1310,8 @@ static int __init init_tsc_clocksource(void)
if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz) if (!boot_cpu_has(X86_FEATURE_TSC) || tsc_disabled > 0 || !tsc_khz)
return 0; return 0;
if (check_tsc_unstable()) if (tsc_unstable)
return 0; goto unreg;
if (tsc_clocksource_reliable) if (tsc_clocksource_reliable)
clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY; clocksource_tsc.flags &= ~CLOCK_SOURCE_MUST_VERIFY;
@ -1328,6 +1327,7 @@ static int __init init_tsc_clocksource(void)
if (boot_cpu_has(X86_FEATURE_ART)) if (boot_cpu_has(X86_FEATURE_ART))
art_related_clocksource = &clocksource_tsc; art_related_clocksource = &clocksource_tsc;
clocksource_register_khz(&clocksource_tsc, tsc_khz); clocksource_register_khz(&clocksource_tsc, tsc_khz);
unreg:
clocksource_unregister(&clocksource_tsc_early); clocksource_unregister(&clocksource_tsc_early);
return 0; return 0;
} }

View File

@ -1463,23 +1463,6 @@ static void start_sw_tscdeadline(struct kvm_lapic *apic)
local_irq_restore(flags); local_irq_restore(flags);
} }
static void start_sw_period(struct kvm_lapic *apic)
{
if (!apic->lapic_timer.period)
return;
if (apic_lvtt_oneshot(apic) &&
ktime_after(ktime_get(),
apic->lapic_timer.target_expiration)) {
apic_timer_expired(apic);
return;
}
hrtimer_start(&apic->lapic_timer.timer,
apic->lapic_timer.target_expiration,
HRTIMER_MODE_ABS_PINNED);
}
static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor) static void update_target_expiration(struct kvm_lapic *apic, uint32_t old_divisor)
{ {
ktime_t now, remaining; ktime_t now, remaining;
@ -1546,6 +1529,26 @@ static void advance_periodic_target_expiration(struct kvm_lapic *apic)
apic->lapic_timer.period); apic->lapic_timer.period);
} }
static void start_sw_period(struct kvm_lapic *apic)
{
if (!apic->lapic_timer.period)
return;
if (ktime_after(ktime_get(),
apic->lapic_timer.target_expiration)) {
apic_timer_expired(apic);
if (apic_lvtt_oneshot(apic))
return;
advance_periodic_target_expiration(apic);
}
hrtimer_start(&apic->lapic_timer.timer,
apic->lapic_timer.target_expiration,
HRTIMER_MODE_ABS_PINNED);
}
bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu) bool kvm_lapic_hv_timer_in_use(struct kvm_vcpu *vcpu)
{ {
if (!lapic_in_kernel(vcpu)) if (!lapic_in_kernel(vcpu))

View File

@ -1027,7 +1027,17 @@ emit_cond_jmp: /* convert BPF opcode to x86 */
break; break;
case BPF_JMP | BPF_JA: case BPF_JMP | BPF_JA:
if (insn->off == -1)
/* -1 jmp instructions will always jump
* backwards two bytes. Explicitly handling
* this case avoids wasting too many passes
* when there are long sequences of replaced
* dead code.
*/
jmp_offset = -2;
else
jmp_offset = addrs[i + insn->off] - addrs[i]; jmp_offset = addrs[i + insn->off] - addrs[i];
if (!jmp_offset) if (!jmp_offset)
/* optimize out nop jumps */ /* optimize out nop jumps */
break; break;
@ -1226,6 +1236,7 @@ skip_init_addrs:
for (pass = 0; pass < 20 || image; pass++) { for (pass = 0; pass < 20 || image; pass++) {
proglen = do_jit(prog, addrs, image, oldproglen, &ctx); proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
if (proglen <= 0) { if (proglen <= 0) {
out_image:
image = NULL; image = NULL;
if (header) if (header)
bpf_jit_binary_free(header); bpf_jit_binary_free(header);
@ -1236,8 +1247,7 @@ skip_init_addrs:
if (proglen != oldproglen) { if (proglen != oldproglen) {
pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
proglen, oldproglen); proglen, oldproglen);
prog = orig_prog; goto out_image;
goto out_addrs;
} }
break; break;
} }
@ -1273,7 +1283,7 @@ skip_init_addrs:
prog = orig_prog; prog = orig_prog;
} }
if (!prog->is_func || extra_pass) { if (!image || !prog->is_func || extra_pass) {
out_addrs: out_addrs:
kfree(addrs); kfree(addrs);
kfree(jit_data); kfree(jit_data);

View File

@ -421,24 +421,15 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
{ {
unsigned long va = dtr->address; unsigned long va = dtr->address;
unsigned int size = dtr->size + 1; unsigned int size = dtr->size + 1;
unsigned pages = DIV_ROUND_UP(size, PAGE_SIZE); unsigned long pfn, mfn;
unsigned long frames[pages];
int f;
/*
* A GDT can be up to 64k in size, which corresponds to 8192
* 8-byte entries, or 16 4k pages..
*/
BUG_ON(size > 65536);
BUG_ON(va & ~PAGE_MASK);
for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
int level; int level;
pte_t *ptep; pte_t *ptep;
unsigned long pfn, mfn;
void *virt; void *virt;
/* @size should be at most GDT_SIZE which is smaller than PAGE_SIZE. */
BUG_ON(size > PAGE_SIZE);
BUG_ON(va & ~PAGE_MASK);
/* /*
* The GDT is per-cpu and is in the percpu data area. * The GDT is per-cpu and is in the percpu data area.
* That can be virtually mapped, so we need to do a * That can be virtually mapped, so we need to do a
@ -453,13 +444,10 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
mfn = pfn_to_mfn(pfn); mfn = pfn_to_mfn(pfn);
virt = __va(PFN_PHYS(pfn)); virt = __va(PFN_PHYS(pfn));
frames[f] = mfn;
make_lowmem_page_readonly((void *)va); make_lowmem_page_readonly((void *)va);
make_lowmem_page_readonly(virt); make_lowmem_page_readonly(virt);
}
if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct))) if (HYPERVISOR_set_gdt(&mfn, size / sizeof(struct desc_struct)))
BUG(); BUG();
} }
@ -470,21 +458,12 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
{ {
unsigned long va = dtr->address; unsigned long va = dtr->address;
unsigned int size = dtr->size + 1; unsigned int size = dtr->size + 1;
unsigned pages = DIV_ROUND_UP(size, PAGE_SIZE);
unsigned long frames[pages];
int f;
/*
* A GDT can be up to 64k in size, which corresponds to 8192
* 8-byte entries, or 16 4k pages..
*/
BUG_ON(size > 65536);
BUG_ON(va & ~PAGE_MASK);
for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
pte_t pte;
unsigned long pfn, mfn; unsigned long pfn, mfn;
pte_t pte;
/* @size should be at most GDT_SIZE which is smaller than PAGE_SIZE. */
BUG_ON(size > PAGE_SIZE);
BUG_ON(va & ~PAGE_MASK);
pfn = virt_to_pfn(va); pfn = virt_to_pfn(va);
mfn = pfn_to_mfn(pfn); mfn = pfn_to_mfn(pfn);
@ -494,10 +473,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0)) if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0))
BUG(); BUG();
frames[f] = mfn; if (HYPERVISOR_set_gdt(&mfn, size / sizeof(struct desc_struct)))
}
if (HYPERVISOR_set_gdt(frames, size / sizeof(struct desc_struct)))
BUG(); BUG();
} }

View File

@ -95,18 +95,15 @@ static void blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
{ {
struct mq_inflight *mi = priv; struct mq_inflight *mi = priv;
if (blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT) {
/* /*
* index[0] counts the specific partition that was asked * index[0] counts the specific partition that was asked for. index[1]
* for. index[1] counts the ones that are active on the * counts the ones that are active on the whole device, so increment
* whole device, so increment that if mi->part is indeed * that if mi->part is indeed a partition, and not a whole device.
* a partition, and not a whole device.
*/ */
if (rq->part == mi->part) if (rq->part == mi->part)
mi->inflight[0]++; mi->inflight[0]++;
if (mi->part->partno) if (mi->part->partno)
mi->inflight[1]++; mi->inflight[1]++;
}
} }
void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
@ -118,6 +115,25 @@ void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi); blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight, &mi);
} }
static void blk_mq_check_inflight_rw(struct blk_mq_hw_ctx *hctx,
struct request *rq, void *priv,
bool reserved)
{
struct mq_inflight *mi = priv;
if (rq->part == mi->part)
mi->inflight[rq_data_dir(rq)]++;
}
void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2])
{
struct mq_inflight mi = { .part = part, .inflight = inflight, };
inflight[0] = inflight[1] = 0;
blk_mq_queue_tag_busy_iter(q, blk_mq_check_inflight_rw, &mi);
}
void blk_freeze_queue_start(struct request_queue *q) void blk_freeze_queue_start(struct request_queue *q)
{ {
int freeze_depth; int freeze_depth;

View File

@ -189,6 +189,8 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part, void blk_mq_in_flight(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2]); unsigned int inflight[2]);
void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2]);
static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx) static inline void blk_mq_put_dispatch_budget(struct blk_mq_hw_ctx *hctx)
{ {

View File

@ -82,6 +82,18 @@ void part_in_flight(struct request_queue *q, struct hd_struct *part,
} }
} }
void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
unsigned int inflight[2])
{
if (q->mq_ops) {
blk_mq_in_flight_rw(q, part, inflight);
return;
}
inflight[0] = atomic_read(&part->in_flight[0]);
inflight[1] = atomic_read(&part->in_flight[1]);
}
struct hd_struct *__disk_get_part(struct gendisk *disk, int partno) struct hd_struct *__disk_get_part(struct gendisk *disk, int partno)
{ {
struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl); struct disk_part_tbl *ptbl = rcu_dereference(disk->part_tbl);

View File

@ -145,13 +145,15 @@ ssize_t part_stat_show(struct device *dev,
jiffies_to_msecs(part_stat_read(p, time_in_queue))); jiffies_to_msecs(part_stat_read(p, time_in_queue)));
} }
ssize_t part_inflight_show(struct device *dev, ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
struct device_attribute *attr, char *buf) char *buf)
{ {
struct hd_struct *p = dev_to_part(dev); struct hd_struct *p = dev_to_part(dev);
struct request_queue *q = part_to_disk(p)->queue;
unsigned int inflight[2];
return sprintf(buf, "%8u %8u\n", atomic_read(&p->in_flight[0]), part_in_flight_rw(q, p, inflight);
atomic_read(&p->in_flight[1])); return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]);
} }
#ifdef CONFIG_FAIL_MAKE_REQUEST #ifdef CONFIG_FAIL_MAKE_REQUEST

View File

@ -541,7 +541,7 @@ probe_err:
return ret; return ret;
} }
static int cs2000_resume(struct device *dev) static int __maybe_unused cs2000_resume(struct device *dev)
{ {
struct cs2000_priv *priv = dev_get_drvdata(dev); struct cs2000_priv *priv = dev_get_drvdata(dev);

View File

@ -112,10 +112,18 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
return 0; return 0;
} }
static int clk_mux_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct clk_mux *mux = to_clk_mux(hw);
return clk_mux_determine_rate_flags(hw, req, mux->flags);
}
const struct clk_ops clk_mux_ops = { const struct clk_ops clk_mux_ops = {
.get_parent = clk_mux_get_parent, .get_parent = clk_mux_get_parent,
.set_parent = clk_mux_set_parent, .set_parent = clk_mux_set_parent,
.determine_rate = __clk_mux_determine_rate, .determine_rate = clk_mux_determine_rate,
}; };
EXPORT_SYMBOL_GPL(clk_mux_ops); EXPORT_SYMBOL_GPL(clk_mux_ops);

View File

@ -216,7 +216,7 @@ static const char * const usart1_src[] = {
"pclk5", "pll3_q", "ck_hsi", "ck_csi", "pll4_q", "ck_hse" "pclk5", "pll3_q", "ck_hsi", "ck_csi", "pll4_q", "ck_hse"
}; };
const char * const usart234578_src[] = { static const char * const usart234578_src[] = {
"pclk1", "pll4_q", "ck_hsi", "ck_csi", "ck_hse" "pclk1", "pll4_q", "ck_hsi", "ck_csi", "ck_hse"
}; };
@ -224,10 +224,6 @@ static const char * const usart6_src[] = {
"pclk2", "pll4_q", "ck_hsi", "ck_csi", "ck_hse" "pclk2", "pll4_q", "ck_hsi", "ck_csi", "ck_hse"
}; };
static const char * const dfsdm_src[] = {
"pclk2", "ck_mcu"
};
static const char * const fdcan_src[] = { static const char * const fdcan_src[] = {
"ck_hse", "pll3_q", "pll4_q" "ck_hse", "pll3_q", "pll4_q"
}; };
@ -316,10 +312,8 @@ struct stm32_clk_mgate {
struct clock_config { struct clock_config {
u32 id; u32 id;
const char *name; const char *name;
union {
const char *parent_name; const char *parent_name;
const char * const *parent_names; const char * const *parent_names;
};
int num_parents; int num_parents;
unsigned long flags; unsigned long flags;
void *cfg; void *cfg;
@ -469,7 +463,7 @@ static void mp1_gate_clk_disable(struct clk_hw *hw)
} }
} }
const struct clk_ops mp1_gate_clk_ops = { static const struct clk_ops mp1_gate_clk_ops = {
.enable = mp1_gate_clk_enable, .enable = mp1_gate_clk_enable,
.disable = mp1_gate_clk_disable, .disable = mp1_gate_clk_disable,
.is_enabled = clk_gate_is_enabled, .is_enabled = clk_gate_is_enabled,
@ -698,7 +692,7 @@ static void mp1_mgate_clk_disable(struct clk_hw *hw)
mp1_gate_clk_disable(hw); mp1_gate_clk_disable(hw);
} }
const struct clk_ops mp1_mgate_clk_ops = { static const struct clk_ops mp1_mgate_clk_ops = {
.enable = mp1_mgate_clk_enable, .enable = mp1_mgate_clk_enable,
.disable = mp1_mgate_clk_disable, .disable = mp1_mgate_clk_disable,
.is_enabled = clk_gate_is_enabled, .is_enabled = clk_gate_is_enabled,
@ -732,7 +726,7 @@ static int clk_mmux_set_parent(struct clk_hw *hw, u8 index)
return 0; return 0;
} }
const struct clk_ops clk_mmux_ops = { static const struct clk_ops clk_mmux_ops = {
.get_parent = clk_mmux_get_parent, .get_parent = clk_mmux_get_parent,
.set_parent = clk_mmux_set_parent, .set_parent = clk_mmux_set_parent,
.determine_rate = __clk_mux_determine_rate, .determine_rate = __clk_mux_determine_rate,
@ -1048,7 +1042,7 @@ struct stm32_pll_cfg {
u32 offset; u32 offset;
}; };
struct clk_hw *_clk_register_pll(struct device *dev, static struct clk_hw *_clk_register_pll(struct device *dev,
struct clk_hw_onecell_data *clk_data, struct clk_hw_onecell_data *clk_data,
void __iomem *base, spinlock_t *lock, void __iomem *base, spinlock_t *lock,
const struct clock_config *cfg) const struct clock_config *cfg)
@ -1405,7 +1399,8 @@ enum {
G_USBH, G_USBH,
G_ETHSTP, G_ETHSTP,
G_RTCAPB, G_RTCAPB,
G_TZC, G_TZC1,
G_TZC2,
G_TZPC, G_TZPC,
G_IWDG1, G_IWDG1,
G_BSEC, G_BSEC,
@ -1417,7 +1412,7 @@ enum {
G_LAST G_LAST
}; };
struct stm32_mgate mp1_mgate[G_LAST]; static struct stm32_mgate mp1_mgate[G_LAST];
#define _K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\ #define _K_GATE(_id, _gate_offset, _gate_bit_idx, _gate_flags,\
_mgate, _ops)\ _mgate, _ops)\
@ -1440,7 +1435,7 @@ struct stm32_mgate mp1_mgate[G_LAST];
&mp1_mgate[_id], &mp1_mgate_clk_ops) &mp1_mgate[_id], &mp1_mgate_clk_ops)
/* Peripheral gates */ /* Peripheral gates */
struct stm32_gate_cfg per_gate_cfg[G_LAST] = { static struct stm32_gate_cfg per_gate_cfg[G_LAST] = {
/* Multi gates */ /* Multi gates */
K_GATE(G_MDIO, RCC_APB1ENSETR, 31, 0), K_GATE(G_MDIO, RCC_APB1ENSETR, 31, 0),
K_MGATE(G_DAC12, RCC_APB1ENSETR, 29, 0), K_MGATE(G_DAC12, RCC_APB1ENSETR, 29, 0),
@ -1506,7 +1501,8 @@ struct stm32_gate_cfg per_gate_cfg[G_LAST] = {
K_GATE(G_BSEC, RCC_APB5ENSETR, 16, 0), K_GATE(G_BSEC, RCC_APB5ENSETR, 16, 0),
K_GATE(G_IWDG1, RCC_APB5ENSETR, 15, 0), K_GATE(G_IWDG1, RCC_APB5ENSETR, 15, 0),
K_GATE(G_TZPC, RCC_APB5ENSETR, 13, 0), K_GATE(G_TZPC, RCC_APB5ENSETR, 13, 0),
K_GATE(G_TZC, RCC_APB5ENSETR, 12, 0), K_GATE(G_TZC2, RCC_APB5ENSETR, 12, 0),
K_GATE(G_TZC1, RCC_APB5ENSETR, 11, 0),
K_GATE(G_RTCAPB, RCC_APB5ENSETR, 8, 0), K_GATE(G_RTCAPB, RCC_APB5ENSETR, 8, 0),
K_MGATE(G_USART1, RCC_APB5ENSETR, 4, 0), K_MGATE(G_USART1, RCC_APB5ENSETR, 4, 0),
K_MGATE(G_I2C6, RCC_APB5ENSETR, 3, 0), K_MGATE(G_I2C6, RCC_APB5ENSETR, 3, 0),
@ -1600,7 +1596,7 @@ enum {
M_LAST M_LAST
}; };
struct stm32_mmux ker_mux[M_LAST]; static struct stm32_mmux ker_mux[M_LAST];
#define _K_MUX(_id, _offset, _shift, _width, _mux_flags, _mmux, _ops)\ #define _K_MUX(_id, _offset, _shift, _width, _mux_flags, _mmux, _ops)\
[_id] = {\ [_id] = {\
@ -1623,7 +1619,7 @@ struct stm32_mmux ker_mux[M_LAST];
_K_MUX(_id, _offset, _shift, _width, _mux_flags,\ _K_MUX(_id, _offset, _shift, _width, _mux_flags,\
&ker_mux[_id], &clk_mmux_ops) &ker_mux[_id], &clk_mmux_ops)
const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = { static const struct stm32_mux_cfg ker_mux_cfg[M_LAST] = {
/* Kernel multi mux */ /* Kernel multi mux */
K_MMUX(M_SDMMC12, RCC_SDMMC12CKSELR, 0, 3, 0), K_MMUX(M_SDMMC12, RCC_SDMMC12CKSELR, 0, 3, 0),
K_MMUX(M_SPI23, RCC_SPI2S23CKSELR, 0, 3, 0), K_MMUX(M_SPI23, RCC_SPI2S23CKSELR, 0, 3, 0),
@ -1860,7 +1856,8 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
PCLK(USART1, "usart1", "pclk5", 0, G_USART1), PCLK(USART1, "usart1", "pclk5", 0, G_USART1),
PCLK(RTCAPB, "rtcapb", "pclk5", CLK_IGNORE_UNUSED | PCLK(RTCAPB, "rtcapb", "pclk5", CLK_IGNORE_UNUSED |
CLK_IS_CRITICAL, G_RTCAPB), CLK_IS_CRITICAL, G_RTCAPB),
PCLK(TZC, "tzc", "pclk5", CLK_IGNORE_UNUSED, G_TZC), PCLK(TZC1, "tzc1", "ck_axi", CLK_IGNORE_UNUSED, G_TZC1),
PCLK(TZC2, "tzc2", "ck_axi", CLK_IGNORE_UNUSED, G_TZC2),
PCLK(TZPC, "tzpc", "pclk5", CLK_IGNORE_UNUSED, G_TZPC), PCLK(TZPC, "tzpc", "pclk5", CLK_IGNORE_UNUSED, G_TZPC),
PCLK(IWDG1, "iwdg1", "pclk5", 0, G_IWDG1), PCLK(IWDG1, "iwdg1", "pclk5", 0, G_IWDG1),
PCLK(BSEC, "bsec", "pclk5", CLK_IGNORE_UNUSED, G_BSEC), PCLK(BSEC, "bsec", "pclk5", CLK_IGNORE_UNUSED, G_BSEC),
@ -1916,8 +1913,7 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
KCLK(RNG1_K, "rng1_k", rng_src, 0, G_RNG1, M_RNG1), KCLK(RNG1_K, "rng1_k", rng_src, 0, G_RNG1, M_RNG1),
KCLK(RNG2_K, "rng2_k", rng_src, 0, G_RNG2, M_RNG2), KCLK(RNG2_K, "rng2_k", rng_src, 0, G_RNG2, M_RNG2),
KCLK(USBPHY_K, "usbphy_k", usbphy_src, 0, G_USBPHY, M_USBPHY), KCLK(USBPHY_K, "usbphy_k", usbphy_src, 0, G_USBPHY, M_USBPHY),
KCLK(STGEN_K, "stgen_k", stgen_src, CLK_IGNORE_UNUSED, KCLK(STGEN_K, "stgen_k", stgen_src, CLK_IS_CRITICAL, G_STGEN, M_STGEN),
G_STGEN, M_STGEN),
KCLK(SPDIF_K, "spdif_k", spdif_src, 0, G_SPDIF, M_SPDIF), KCLK(SPDIF_K, "spdif_k", spdif_src, 0, G_SPDIF, M_SPDIF),
KCLK(SPI1_K, "spi1_k", spi123_src, 0, G_SPI1, M_SPI1), KCLK(SPI1_K, "spi1_k", spi123_src, 0, G_SPI1, M_SPI1),
KCLK(SPI2_K, "spi2_k", spi123_src, 0, G_SPI2, M_SPI23), KCLK(SPI2_K, "spi2_k", spi123_src, 0, G_SPI2, M_SPI23),
@ -1948,8 +1944,8 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
KCLK(FDCAN_K, "fdcan_k", fdcan_src, 0, G_FDCAN, M_FDCAN), KCLK(FDCAN_K, "fdcan_k", fdcan_src, 0, G_FDCAN, M_FDCAN),
KCLK(SAI1_K, "sai1_k", sai_src, 0, G_SAI1, M_SAI1), KCLK(SAI1_K, "sai1_k", sai_src, 0, G_SAI1, M_SAI1),
KCLK(SAI2_K, "sai2_k", sai2_src, 0, G_SAI2, M_SAI2), KCLK(SAI2_K, "sai2_k", sai2_src, 0, G_SAI2, M_SAI2),
KCLK(SAI3_K, "sai3_k", sai_src, 0, G_SAI2, M_SAI3), KCLK(SAI3_K, "sai3_k", sai_src, 0, G_SAI3, M_SAI3),
KCLK(SAI4_K, "sai4_k", sai_src, 0, G_SAI2, M_SAI4), KCLK(SAI4_K, "sai4_k", sai_src, 0, G_SAI4, M_SAI4),
KCLK(ADC12_K, "adc12_k", adc12_src, 0, G_ADC12, M_ADC12), KCLK(ADC12_K, "adc12_k", adc12_src, 0, G_ADC12, M_ADC12),
KCLK(DSI_K, "dsi_k", dsi_src, 0, G_DSI, M_DSI), KCLK(DSI_K, "dsi_k", dsi_src, 0, G_DSI, M_DSI),
KCLK(ADFSDM_K, "adfsdm_k", sai_src, 0, G_ADFSDM, M_SAI1), KCLK(ADFSDM_K, "adfsdm_k", sai_src, 0, G_ADFSDM, M_SAI1),
@ -1992,10 +1988,6 @@ static const struct clock_config stm32mp1_clock_cfg[] = {
_DIV(RCC_MCO2CFGR, 4, 4, 0, NULL)), _DIV(RCC_MCO2CFGR, 4, 4, 0, NULL)),
/* Debug clocks */ /* Debug clocks */
FIXED_FACTOR(NO_ID, "ck_axi_div2", "ck_axi", 0, 1, 2),
GATE(DBG, "ck_apb_dbg", "ck_axi_div2", 0, RCC_DBGCFGR, 8, 0),
GATE(CK_DBG, "ck_sys_dbg", "ck_axi", 0, RCC_DBGCFGR, 8, 0), GATE(CK_DBG, "ck_sys_dbg", "ck_axi", 0, RCC_DBGCFGR, 8, 0),
COMPOSITE(CK_TRACE, "ck_trace", ck_trace_src, CLK_OPS_PARENT_ENABLE, COMPOSITE(CK_TRACE, "ck_trace", ck_trace_src, CLK_OPS_PARENT_ENABLE,

View File

@ -426,8 +426,8 @@ static bool mux_is_better_rate(unsigned long rate, unsigned long now,
return now <= rate && now > best; return now <= rate && now > best;
} }
static int int clk_mux_determine_rate_flags(struct clk_hw *hw,
clk_mux_determine_rate_flags(struct clk_hw *hw, struct clk_rate_request *req, struct clk_rate_request *req,
unsigned long flags) unsigned long flags)
{ {
struct clk_core *core = hw->core, *parent, *best_parent = NULL; struct clk_core *core = hw->core, *parent, *best_parent = NULL;
@ -488,6 +488,7 @@ out:
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
struct clk *__clk_lookup(const char *name) struct clk *__clk_lookup(const char *name)
{ {

View File

@ -153,10 +153,19 @@ static int clk_regmap_mux_set_parent(struct clk_hw *hw, u8 index)
val << mux->shift); val << mux->shift);
} }
static int clk_regmap_mux_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
struct clk_regmap *clk = to_clk_regmap(hw);
struct clk_regmap_mux_data *mux = clk_get_regmap_mux_data(clk);
return clk_mux_determine_rate_flags(hw, req, mux->flags);
}
const struct clk_ops clk_regmap_mux_ops = { const struct clk_ops clk_regmap_mux_ops = {
.get_parent = clk_regmap_mux_get_parent, .get_parent = clk_regmap_mux_get_parent,
.set_parent = clk_regmap_mux_set_parent, .set_parent = clk_regmap_mux_set_parent,
.determine_rate = __clk_mux_determine_rate, .determine_rate = clk_regmap_mux_determine_rate,
}; };
EXPORT_SYMBOL_GPL(clk_regmap_mux_ops); EXPORT_SYMBOL_GPL(clk_regmap_mux_ops);

View File

@ -17,8 +17,6 @@
#define AO_RTC_ALT_CLK_CNTL0 0x94 #define AO_RTC_ALT_CLK_CNTL0 0x94
#define AO_RTC_ALT_CLK_CNTL1 0x98 #define AO_RTC_ALT_CLK_CNTL1 0x98
extern const struct clk_ops meson_aoclk_gate_regmap_ops;
struct aoclk_cec_32k { struct aoclk_cec_32k {
struct clk_hw hw; struct clk_hw hw;
struct regmap *regmap; struct regmap *regmap;

View File

@ -253,7 +253,7 @@ static struct clk_fixed_factor meson8b_fclk_div3_div = {
.mult = 1, .mult = 1,
.div = 3, .div = 3,
.hw.init = &(struct clk_init_data){ .hw.init = &(struct clk_init_data){
.name = "fclk_div_div3", .name = "fclk_div3_div",
.ops = &clk_fixed_factor_ops, .ops = &clk_fixed_factor_ops,
.parent_names = (const char *[]){ "fixed_pll" }, .parent_names = (const char *[]){ "fixed_pll" },
.num_parents = 1, .num_parents = 1,
@ -632,7 +632,8 @@ static struct clk_regmap meson8b_cpu_clk = {
.hw.init = &(struct clk_init_data){ .hw.init = &(struct clk_init_data){
.name = "cpu_clk", .name = "cpu_clk",
.ops = &clk_regmap_mux_ro_ops, .ops = &clk_regmap_mux_ro_ops,
.parent_names = (const char *[]){ "xtal", "cpu_out_sel" }, .parent_names = (const char *[]){ "xtal",
"cpu_scale_out_sel" },
.num_parents = 2, .num_parents = 2,
.flags = (CLK_SET_RATE_PARENT | .flags = (CLK_SET_RATE_PARENT |
CLK_SET_RATE_NO_REPARENT), CLK_SET_RATE_NO_REPARENT),

View File

@ -126,6 +126,49 @@ static void cppc_cpufreq_stop_cpu(struct cpufreq_policy *policy)
cpu->perf_caps.lowest_perf, cpu_num, ret); cpu->perf_caps.lowest_perf, cpu_num, ret);
} }
/*
* The PCC subspace describes the rate at which platform can accept commands
* on the shared PCC channel (including READs which do not count towards freq
* trasition requests), so ideally we need to use the PCC values as a fallback
* if we don't have a platform specific transition_delay_us
*/
#ifdef CONFIG_ARM64
#include <asm/cputype.h>
static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu)
{
unsigned long implementor = read_cpuid_implementor();
unsigned long part_num = read_cpuid_part_number();
unsigned int delay_us = 0;
switch (implementor) {
case ARM_CPU_IMP_QCOM:
switch (part_num) {
case QCOM_CPU_PART_FALKOR_V1:
case QCOM_CPU_PART_FALKOR:
delay_us = 10000;
break;
default:
delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
break;
}
break;
default:
delay_us = cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
break;
}
return delay_us;
}
#else
static unsigned int cppc_cpufreq_get_transition_delay_us(int cpu)
{
return cppc_get_transition_latency(cpu) / NSEC_PER_USEC;
}
#endif
static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy) static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
{ {
struct cppc_cpudata *cpu; struct cppc_cpudata *cpu;
@ -162,8 +205,7 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
cpu->perf_caps.highest_perf; cpu->perf_caps.highest_perf;
policy->cpuinfo.max_freq = cppc_dmi_max_khz; policy->cpuinfo.max_freq = cppc_dmi_max_khz;
policy->transition_delay_us = cppc_get_transition_latency(cpu_num) / policy->transition_delay_us = cppc_cpufreq_get_transition_delay_us(cpu_num);
NSEC_PER_USEC;
policy->shared_type = cpu->shared_type; policy->shared_type = cpu->shared_type;
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {

View File

@ -56,7 +56,9 @@ static int dumb_vga_get_modes(struct drm_connector *connector)
} }
drm_mode_connector_update_edid_property(connector, edid); drm_mode_connector_update_edid_property(connector, edid);
return drm_add_edid_modes(connector, edid); ret = drm_add_edid_modes(connector, edid);
kfree(edid);
return ret;
fallback: fallback:
/* /*

View File

@ -35,6 +35,7 @@
*/ */
#define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin" #define I915_CSR_GLK "i915/glk_dmc_ver1_04.bin"
MODULE_FIRMWARE(I915_CSR_GLK);
#define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4) #define GLK_CSR_VERSION_REQUIRED CSR_VERSION(1, 4)
#define I915_CSR_CNL "i915/cnl_dmc_ver1_07.bin" #define I915_CSR_CNL "i915/cnl_dmc_ver1_07.bin"

View File

@ -760,6 +760,7 @@ static irqreturn_t vc4_crtc_irq_handler(int irq, void *data)
struct vc4_async_flip_state { struct vc4_async_flip_state {
struct drm_crtc *crtc; struct drm_crtc *crtc;
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
struct drm_framebuffer *old_fb;
struct drm_pending_vblank_event *event; struct drm_pending_vblank_event *event;
struct vc4_seqno_cb cb; struct vc4_seqno_cb cb;
@ -789,6 +790,23 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb)
drm_crtc_vblank_put(crtc); drm_crtc_vblank_put(crtc);
drm_framebuffer_put(flip_state->fb); drm_framebuffer_put(flip_state->fb);
/* Decrement the BO usecnt in order to keep the inc/dec calls balanced
* when the planes are updated through the async update path.
* FIXME: we should move to generic async-page-flip when it's
* available, so that we can get rid of this hand-made cleanup_fb()
* logic.
*/
if (flip_state->old_fb) {
struct drm_gem_cma_object *cma_bo;
struct vc4_bo *bo;
cma_bo = drm_fb_cma_get_gem_obj(flip_state->old_fb, 0);
bo = to_vc4_bo(&cma_bo->base);
vc4_bo_dec_usecnt(bo);
drm_framebuffer_put(flip_state->old_fb);
}
kfree(flip_state); kfree(flip_state);
up(&vc4->async_modeset); up(&vc4->async_modeset);
@ -813,9 +831,22 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0); struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0);
struct vc4_bo *bo = to_vc4_bo(&cma_bo->base); struct vc4_bo *bo = to_vc4_bo(&cma_bo->base);
/* Increment the BO usecnt here, so that we never end up with an
* unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the
* plane is later updated through the non-async path.
* FIXME: we should move to generic async-page-flip when it's
* available, so that we can get rid of this hand-made prepare_fb()
* logic.
*/
ret = vc4_bo_inc_usecnt(bo);
if (ret)
return ret;
flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL); flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL);
if (!flip_state) if (!flip_state) {
vc4_bo_dec_usecnt(bo);
return -ENOMEM; return -ENOMEM;
}
drm_framebuffer_get(fb); drm_framebuffer_get(fb);
flip_state->fb = fb; flip_state->fb = fb;
@ -826,10 +857,23 @@ static int vc4_async_page_flip(struct drm_crtc *crtc,
ret = down_interruptible(&vc4->async_modeset); ret = down_interruptible(&vc4->async_modeset);
if (ret) { if (ret) {
drm_framebuffer_put(fb); drm_framebuffer_put(fb);
vc4_bo_dec_usecnt(bo);
kfree(flip_state); kfree(flip_state);
return ret; return ret;
} }
/* Save the current FB before it's replaced by the new one in
* drm_atomic_set_fb_for_plane(). We'll need the old FB in
* vc4_async_page_flip_complete() to decrement the BO usecnt and keep
* it consistent.
* FIXME: we should move to generic async-page-flip when it's
* available, so that we can get rid of this hand-made cleanup_fb()
* logic.
*/
flip_state->old_fb = plane->state->fb;
if (flip_state->old_fb)
drm_framebuffer_get(flip_state->old_fb);
WARN_ON(drm_crtc_vblank_get(crtc) != 0); WARN_ON(drm_crtc_vblank_get(crtc) != 0);
/* Immediately update the plane's legacy fb pointer, so that later /* Immediately update the plane's legacy fb pointer, so that later

View File

@ -441,11 +441,11 @@ static int vmwgfx_set_config_internal(struct drm_mode_set *set)
struct drm_crtc *crtc = set->crtc; struct drm_crtc *crtc = set->crtc;
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
struct drm_crtc *tmp; struct drm_crtc *tmp;
struct drm_modeset_acquire_ctx *ctx;
struct drm_device *dev = set->crtc->dev; struct drm_device *dev = set->crtc->dev;
struct drm_modeset_acquire_ctx ctx;
int ret; int ret;
ctx = dev->mode_config.acquire_ctx; drm_modeset_acquire_init(&ctx, 0);
restart: restart:
/* /*
@ -458,7 +458,7 @@ restart:
fb = set->fb; fb = set->fb;
ret = crtc->funcs->set_config(set, ctx); ret = crtc->funcs->set_config(set, &ctx);
if (ret == 0) { if (ret == 0) {
crtc->primary->crtc = crtc; crtc->primary->crtc = crtc;
crtc->primary->fb = fb; crtc->primary->fb = fb;
@ -473,20 +473,13 @@ restart:
} }
if (ret == -EDEADLK) { if (ret == -EDEADLK) {
dev->mode_config.acquire_ctx = NULL; drm_modeset_backoff(&ctx);
retry_locking:
drm_modeset_backoff(ctx);
ret = drm_modeset_lock_all_ctx(dev, ctx);
if (ret)
goto retry_locking;
dev->mode_config.acquire_ctx = ctx;
goto restart; goto restart;
} }
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
return ret; return ret;
} }
@ -624,7 +617,6 @@ static int vmw_fb_set_par(struct fb_info *info)
} }
mutex_lock(&par->bo_mutex); mutex_lock(&par->bo_mutex);
drm_modeset_lock_all(vmw_priv->dev);
ret = vmw_fb_kms_framebuffer(info); ret = vmw_fb_kms_framebuffer(info);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
@ -657,7 +649,6 @@ out_unlock:
drm_mode_destroy(vmw_priv->dev, old_mode); drm_mode_destroy(vmw_priv->dev, old_mode);
par->set_mode = mode; par->set_mode = mode;
drm_modeset_unlock_all(vmw_priv->dev);
mutex_unlock(&par->bo_mutex); mutex_unlock(&par->bo_mutex);
return ret; return ret;
@ -713,18 +704,14 @@ int vmw_fb_init(struct vmw_private *vmw_priv)
par->max_width = fb_width; par->max_width = fb_width;
par->max_height = fb_height; par->max_height = fb_height;
drm_modeset_lock_all(vmw_priv->dev);
ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width, ret = vmw_kms_fbdev_init_data(vmw_priv, 0, par->max_width,
par->max_height, &par->con, par->max_height, &par->con,
&par->crtc, &init_mode); &par->crtc, &init_mode);
if (ret) { if (ret)
drm_modeset_unlock_all(vmw_priv->dev);
goto err_kms; goto err_kms;
}
info->var.xres = init_mode->hdisplay; info->var.xres = init_mode->hdisplay;
info->var.yres = init_mode->vdisplay; info->var.yres = init_mode->vdisplay;
drm_modeset_unlock_all(vmw_priv->dev);
/* /*
* Create buffers and alloc memory * Create buffers and alloc memory
@ -832,7 +819,9 @@ int vmw_fb_close(struct vmw_private *vmw_priv)
cancel_delayed_work_sync(&par->local_work); cancel_delayed_work_sync(&par->local_work);
unregister_framebuffer(info); unregister_framebuffer(info);
mutex_lock(&par->bo_mutex);
(void) vmw_fb_kms_detach(par, true, true); (void) vmw_fb_kms_detach(par, true, true);
mutex_unlock(&par->bo_mutex);
vfree(par->vmalloc); vfree(par->vmalloc);
framebuffer_release(info); framebuffer_release(info);

View File

@ -2595,6 +2595,7 @@ void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf, vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
out_fence, NULL); out_fence, NULL);
vmw_dmabuf_unreference(&ctx->buf);
vmw_resource_unreserve(res, false, NULL, 0); vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex); mutex_unlock(&res->dev_priv->cmdbuf_mutex);
} }
@ -2680,7 +2681,9 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
struct vmw_display_unit *du; struct vmw_display_unit *du;
struct drm_display_mode *mode; struct drm_display_mode *mode;
int i = 0; int i = 0;
int ret = 0;
mutex_lock(&dev_priv->dev->mode_config.mutex);
list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list, list_for_each_entry(con, &dev_priv->dev->mode_config.connector_list,
head) { head) {
if (i == unit) if (i == unit)
@ -2691,7 +2694,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
if (i != unit) { if (i != unit) {
DRM_ERROR("Could not find initial display unit.\n"); DRM_ERROR("Could not find initial display unit.\n");
return -EINVAL; ret = -EINVAL;
goto out_unlock;
} }
if (list_empty(&con->modes)) if (list_empty(&con->modes))
@ -2699,7 +2703,8 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
if (list_empty(&con->modes)) { if (list_empty(&con->modes)) {
DRM_ERROR("Could not find initial display mode.\n"); DRM_ERROR("Could not find initial display mode.\n");
return -EINVAL; ret = -EINVAL;
goto out_unlock;
} }
du = vmw_connector_to_du(con); du = vmw_connector_to_du(con);
@ -2720,7 +2725,10 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
head); head);
} }
return 0; out_unlock:
mutex_unlock(&dev_priv->dev->mode_config.mutex);
return ret;
} }
/** /**

View File

@ -209,7 +209,10 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
i2c_dw_disable_int(dev); i2c_dw_disable_int(dev);
/* Enable the adapter */ /* Enable the adapter */
__i2c_dw_enable_and_wait(dev, true); __i2c_dw_enable(dev, true);
/* Dummy read to avoid the register getting stuck on Bay Trail */
dw_readl(dev, DW_IC_ENABLE_STATUS);
/* Clear and enable interrupts */ /* Clear and enable interrupts */
dw_readl(dev, DW_IC_CLR_INTR); dw_readl(dev, DW_IC_CLR_INTR);

View File

@ -564,10 +564,10 @@ static int pmcmsptwi_master_xfer(struct i2c_adapter *adap,
* TODO: We could potentially loop and retry in the case * TODO: We could potentially loop and retry in the case
* of MSP_TWI_XFER_TIMEOUT. * of MSP_TWI_XFER_TIMEOUT.
*/ */
return -1; return -EIO;
} }
return 0; return num;
} }
static u32 pmcmsptwi_i2c_func(struct i2c_adapter *adapter) static u32 pmcmsptwi_i2c_func(struct i2c_adapter *adapter)

View File

@ -337,7 +337,7 @@ static int vprbrd_i2c_xfer(struct i2c_adapter *i2c, struct i2c_msg *msgs,
} }
mutex_unlock(&vb->lock); mutex_unlock(&vb->lock);
} }
return 0; return num;
error: error:
mutex_unlock(&vb->lock); mutex_unlock(&vb->lock);
return error; return error;

View File

@ -445,10 +445,17 @@ static int acpi_gsb_i2c_read_bytes(struct i2c_client *client,
msgs[1].buf = buffer; msgs[1].buf = buffer;
ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
if (ret < 0) if (ret < 0) {
dev_err(&client->adapter->dev, "i2c read failed\n"); /* Getting a NACK is unfortunately normal with some DSTDs */
if (ret == -EREMOTEIO)
dev_dbg(&client->adapter->dev, "i2c read %d bytes from client@%#x starting at reg %#x failed, error: %d\n",
data_len, client->addr, cmd, ret);
else else
dev_err(&client->adapter->dev, "i2c read %d bytes from client@%#x starting at reg %#x failed, error: %d\n",
data_len, client->addr, cmd, ret);
} else {
memcpy(data, buffer, data_len); memcpy(data, buffer, data_len);
}
kfree(buffer); kfree(buffer);
return ret; return ret;

View File

@ -61,9 +61,12 @@ config INFINIBAND_ON_DEMAND_PAGING
pages on demand instead. pages on demand instead.
config INFINIBAND_ADDR_TRANS config INFINIBAND_ADDR_TRANS
bool bool "RDMA/CM"
depends on INFINIBAND depends on INFINIBAND
default y default y
---help---
Support for RDMA communication manager (CM).
This allows for a generic connection abstraction over RDMA.
config INFINIBAND_ADDR_TRANS_CONFIGFS config INFINIBAND_ADDR_TRANS_CONFIGFS
bool bool

View File

@ -291,14 +291,18 @@ static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
* so lookup free slot only if requested. * so lookup free slot only if requested.
*/ */
if (pempty && empty < 0) { if (pempty && empty < 0) {
if (data->props & GID_TABLE_ENTRY_INVALID) { if (data->props & GID_TABLE_ENTRY_INVALID &&
/* Found an invalid (free) entry; allocate it */ (default_gid ==
if (data->props & GID_TABLE_ENTRY_DEFAULT) { !!(data->props & GID_TABLE_ENTRY_DEFAULT))) {
if (default_gid) /*
* Found an invalid (free) entry; allocate it.
* If default GID is requested, then our
* found slot must be one of the DEFAULT
* reserved slots or we fail.
* This ensures that only DEFAULT reserved
* slots are used for default property GIDs.
*/
empty = curr_index; empty = curr_index;
} else {
empty = curr_index;
}
} }
} }
@ -420,8 +424,10 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
return ret; return ret;
} }
int ib_cache_gid_del(struct ib_device *ib_dev, u8 port, static int
union ib_gid *gid, struct ib_gid_attr *attr) _ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
union ib_gid *gid, struct ib_gid_attr *attr,
unsigned long mask, bool default_gid)
{ {
struct ib_gid_table *table; struct ib_gid_table *table;
int ret = 0; int ret = 0;
@ -431,11 +437,7 @@ int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
mutex_lock(&table->lock); mutex_lock(&table->lock);
ix = find_gid(table, gid, attr, false, ix = find_gid(table, gid, attr, default_gid, mask, NULL);
GID_ATTR_FIND_MASK_GID |
GID_ATTR_FIND_MASK_GID_TYPE |
GID_ATTR_FIND_MASK_NETDEV,
NULL);
if (ix < 0) { if (ix < 0) {
ret = -EINVAL; ret = -EINVAL;
goto out_unlock; goto out_unlock;
@ -452,6 +454,17 @@ out_unlock:
return ret; return ret;
} }
int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
union ib_gid *gid, struct ib_gid_attr *attr)
{
unsigned long mask = GID_ATTR_FIND_MASK_GID |
GID_ATTR_FIND_MASK_GID_TYPE |
GID_ATTR_FIND_MASK_DEFAULT |
GID_ATTR_FIND_MASK_NETDEV;
return _ib_cache_gid_del(ib_dev, port, gid, attr, mask, false);
}
int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port, int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
struct net_device *ndev) struct net_device *ndev)
{ {
@ -728,7 +741,7 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
unsigned long gid_type_mask, unsigned long gid_type_mask,
enum ib_cache_gid_default_mode mode) enum ib_cache_gid_default_mode mode)
{ {
union ib_gid gid; union ib_gid gid = { };
struct ib_gid_attr gid_attr; struct ib_gid_attr gid_attr;
struct ib_gid_table *table; struct ib_gid_table *table;
unsigned int gid_type; unsigned int gid_type;
@ -736,7 +749,9 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid; table = ib_dev->cache.ports[port - rdma_start_port(ib_dev)].gid;
make_default_gid(ndev, &gid); mask = GID_ATTR_FIND_MASK_GID_TYPE |
GID_ATTR_FIND_MASK_DEFAULT |
GID_ATTR_FIND_MASK_NETDEV;
memset(&gid_attr, 0, sizeof(gid_attr)); memset(&gid_attr, 0, sizeof(gid_attr));
gid_attr.ndev = ndev; gid_attr.ndev = ndev;
@ -747,12 +762,12 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
gid_attr.gid_type = gid_type; gid_attr.gid_type = gid_type;
if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) { if (mode == IB_CACHE_GID_DEFAULT_MODE_SET) {
mask = GID_ATTR_FIND_MASK_GID_TYPE | make_default_gid(ndev, &gid);
GID_ATTR_FIND_MASK_DEFAULT;
__ib_cache_gid_add(ib_dev, port, &gid, __ib_cache_gid_add(ib_dev, port, &gid,
&gid_attr, mask, true); &gid_attr, mask, true);
} else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) { } else if (mode == IB_CACHE_GID_DEFAULT_MODE_DELETE) {
ib_cache_gid_del(ib_dev, port, &gid, &gid_attr); _ib_cache_gid_del(ib_dev, port, &gid,
&gid_attr, mask, true);
} }
} }
} }

View File

@ -382,6 +382,8 @@ struct cma_hdr {
#define CMA_VERSION 0x00 #define CMA_VERSION 0x00
struct cma_req_info { struct cma_req_info {
struct sockaddr_storage listen_addr_storage;
struct sockaddr_storage src_addr_storage;
struct ib_device *device; struct ib_device *device;
int port; int port;
union ib_gid local_gid; union ib_gid local_gid;
@ -866,7 +868,6 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
{ {
struct ib_qp_attr qp_attr; struct ib_qp_attr qp_attr;
int qp_attr_mask, ret; int qp_attr_mask, ret;
union ib_gid sgid;
mutex_lock(&id_priv->qp_mutex); mutex_lock(&id_priv->qp_mutex);
if (!id_priv->id.qp) { if (!id_priv->id.qp) {
@ -889,12 +890,6 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
if (ret) if (ret)
goto out; goto out;
ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num,
rdma_ah_read_grh(&qp_attr.ah_attr)->sgid_index,
&sgid, NULL);
if (ret)
goto out;
BUG_ON(id_priv->cma_dev->device != id_priv->id.device); BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
if (conn_param) if (conn_param)
@ -1340,11 +1335,11 @@ static bool validate_net_dev(struct net_device *net_dev,
} }
static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event, static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event,
const struct cma_req_info *req) struct cma_req_info *req)
{ {
struct sockaddr_storage listen_addr_storage, src_addr_storage; struct sockaddr *listen_addr =
struct sockaddr *listen_addr = (struct sockaddr *)&listen_addr_storage, (struct sockaddr *)&req->listen_addr_storage;
*src_addr = (struct sockaddr *)&src_addr_storage; struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage;
struct net_device *net_dev; struct net_device *net_dev;
const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL; const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL;
int err; int err;
@ -1359,11 +1354,6 @@ static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event,
if (!net_dev) if (!net_dev)
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
if (!validate_net_dev(net_dev, listen_addr, src_addr)) {
dev_put(net_dev);
return ERR_PTR(-EHOSTUNREACH);
}
return net_dev; return net_dev;
} }
@ -1490,15 +1480,51 @@ static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
} }
} }
/*
* Net namespace might be getting deleted while route lookup,
* cm_id lookup is in progress. Therefore, perform netdevice
* validation, cm_id lookup under rcu lock.
* RCU lock along with netdevice state check, synchronizes with
* netdevice migrating to different net namespace and also avoids
* case where net namespace doesn't get deleted while lookup is in
* progress.
* If the device state is not IFF_UP, its properties such as ifindex
* and nd_net cannot be trusted to remain valid without rcu lock.
* net/core/dev.c change_net_namespace() ensures to synchronize with
* ongoing operations on net device after device is closed using
* synchronize_net().
*/
rcu_read_lock();
if (*net_dev) {
/*
* If netdevice is down, it is likely that it is administratively
* down or it might be migrating to different namespace.
* In that case avoid further processing, as the net namespace
* or ifindex may change.
*/
if (((*net_dev)->flags & IFF_UP) == 0) {
id_priv = ERR_PTR(-EHOSTUNREACH);
goto err;
}
if (!validate_net_dev(*net_dev,
(struct sockaddr *)&req.listen_addr_storage,
(struct sockaddr *)&req.src_addr_storage)) {
id_priv = ERR_PTR(-EHOSTUNREACH);
goto err;
}
}
bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net, bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net,
rdma_ps_from_service_id(req.service_id), rdma_ps_from_service_id(req.service_id),
cma_port_from_service_id(req.service_id)); cma_port_from_service_id(req.service_id));
id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev);
err:
rcu_read_unlock();
if (IS_ERR(id_priv) && *net_dev) { if (IS_ERR(id_priv) && *net_dev) {
dev_put(*net_dev); dev_put(*net_dev);
*net_dev = NULL; *net_dev = NULL;
} }
return id_priv; return id_priv;
} }

View File

@ -114,7 +114,7 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
struct sockaddr_storage *mapped_sockaddr, struct sockaddr_storage *mapped_sockaddr,
u8 nl_client) u8 nl_client)
{ {
struct hlist_head *hash_bucket_head; struct hlist_head *hash_bucket_head = NULL;
struct iwpm_mapping_info *map_info; struct iwpm_mapping_info *map_info;
unsigned long flags; unsigned long flags;
int ret = -EINVAL; int ret = -EINVAL;
@ -142,6 +142,9 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
} }
} }
spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags); spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
if (!hash_bucket_head)
kfree(map_info);
return ret; return ret;
} }

View File

@ -59,7 +59,7 @@ module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
static struct list_head ib_mad_port_list; static struct list_head ib_mad_port_list;
static u32 ib_mad_client_id = 0; static atomic_t ib_mad_client_id = ATOMIC_INIT(0);
/* Port list lock */ /* Port list lock */
static DEFINE_SPINLOCK(ib_mad_port_list_lock); static DEFINE_SPINLOCK(ib_mad_port_list_lock);
@ -377,7 +377,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
} }
spin_lock_irqsave(&port_priv->reg_lock, flags); spin_lock_irqsave(&port_priv->reg_lock, flags);
mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; mad_agent_priv->agent.hi_tid = atomic_inc_return(&ib_mad_client_id);
/* /*
* Make sure MAD registration (if supplied) * Make sure MAD registration (if supplied)

View File

@ -255,6 +255,7 @@ static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
struct net_device *rdma_ndev) struct net_device *rdma_ndev)
{ {
struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev); struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev);
unsigned long gid_type_mask;
if (!rdma_ndev) if (!rdma_ndev)
return; return;
@ -264,10 +265,14 @@ static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
rcu_read_lock(); rcu_read_lock();
if (rdma_is_upper_dev_rcu(rdma_ndev, event_ndev) && if (((rdma_ndev != event_ndev &&
is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) == !rdma_is_upper_dev_rcu(rdma_ndev, event_ndev)) ||
BONDING_SLAVE_STATE_INACTIVE) { is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev)
unsigned long gid_type_mask; ==
BONDING_SLAVE_STATE_INACTIVE)) {
rcu_read_unlock();
return;
}
rcu_read_unlock(); rcu_read_unlock();
@ -276,9 +281,6 @@ static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev, ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
gid_type_mask, gid_type_mask,
IB_CACHE_GID_DEFAULT_MODE_DELETE); IB_CACHE_GID_DEFAULT_MODE_DELETE);
} else {
rcu_read_unlock();
}
} }
static void enum_netdev_ipv4_ips(struct ib_device *ib_dev, static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,

View File

@ -159,6 +159,23 @@ static void ucma_put_ctx(struct ucma_context *ctx)
complete(&ctx->comp); complete(&ctx->comp);
} }
/*
* Same as ucm_get_ctx but requires that ->cm_id->device is valid, eg that the
* CM_ID is bound.
*/
static struct ucma_context *ucma_get_ctx_dev(struct ucma_file *file, int id)
{
struct ucma_context *ctx = ucma_get_ctx(file, id);
if (IS_ERR(ctx))
return ctx;
if (!ctx->cm_id->device) {
ucma_put_ctx(ctx);
return ERR_PTR(-EINVAL);
}
return ctx;
}
static void ucma_close_event_id(struct work_struct *work) static void ucma_close_event_id(struct work_struct *work)
{ {
struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work); struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work);
@ -683,7 +700,7 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd))) if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT; return -EFAULT;
if (!rdma_addr_size_in6(&cmd.src_addr) || if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) ||
!rdma_addr_size_in6(&cmd.dst_addr)) !rdma_addr_size_in6(&cmd.dst_addr))
return -EINVAL; return -EINVAL;
@ -734,7 +751,7 @@ static ssize_t ucma_resolve_route(struct ucma_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd))) if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT; return -EFAULT;
ctx = ucma_get_ctx(file, cmd.id); ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
@ -1050,7 +1067,7 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
if (!cmd.conn_param.valid) if (!cmd.conn_param.valid)
return -EINVAL; return -EINVAL;
ctx = ucma_get_ctx(file, cmd.id); ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
@ -1092,7 +1109,7 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
if (copy_from_user(&cmd, inbuf, sizeof(cmd))) if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT; return -EFAULT;
ctx = ucma_get_ctx(file, cmd.id); ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
@ -1120,7 +1137,7 @@ static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
if (copy_from_user(&cmd, inbuf, sizeof(cmd))) if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT; return -EFAULT;
ctx = ucma_get_ctx(file, cmd.id); ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
@ -1139,7 +1156,7 @@ static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
if (copy_from_user(&cmd, inbuf, sizeof(cmd))) if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT; return -EFAULT;
ctx = ucma_get_ctx(file, cmd.id); ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
@ -1167,15 +1184,10 @@ static ssize_t ucma_init_qp_attr(struct ucma_file *file,
if (cmd.qp_state > IB_QPS_ERR) if (cmd.qp_state > IB_QPS_ERR)
return -EINVAL; return -EINVAL;
ctx = ucma_get_ctx(file, cmd.id); ctx = ucma_get_ctx_dev(file, cmd.id);
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
if (!ctx->cm_id->device) {
ret = -EINVAL;
goto out;
}
resp.qp_attr_mask = 0; resp.qp_attr_mask = 0;
memset(&qp_attr, 0, sizeof qp_attr); memset(&qp_attr, 0, sizeof qp_attr);
qp_attr.qp_state = cmd.qp_state; qp_attr.qp_state = cmd.qp_state;
@ -1316,13 +1328,13 @@ static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
if (copy_from_user(&cmd, inbuf, sizeof(cmd))) if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT; return -EFAULT;
if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
return -EINVAL;
ctx = ucma_get_ctx(file, cmd.id); ctx = ucma_get_ctx(file, cmd.id);
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);
if (unlikely(cmd.optlen > KMALLOC_MAX_SIZE))
return -EINVAL;
optval = memdup_user(u64_to_user_ptr(cmd.optval), optval = memdup_user(u64_to_user_ptr(cmd.optval),
cmd.optlen); cmd.optlen);
if (IS_ERR(optval)) { if (IS_ERR(optval)) {
@ -1384,7 +1396,7 @@ static ssize_t ucma_process_join(struct ucma_file *file,
else else
return -EINVAL; return -EINVAL;
ctx = ucma_get_ctx(file, cmd->id); ctx = ucma_get_ctx_dev(file, cmd->id);
if (IS_ERR(ctx)) if (IS_ERR(ctx))
return PTR_ERR(ctx); return PTR_ERR(ctx);

View File

@ -691,6 +691,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
mr->device = pd->device; mr->device = pd->device;
mr->pd = pd; mr->pd = pd;
mr->dm = NULL;
mr->uobject = uobj; mr->uobject = uobj;
atomic_inc(&pd->usecnt); atomic_inc(&pd->usecnt);
mr->res.type = RDMA_RESTRACK_MR; mr->res.type = RDMA_RESTRACK_MR;
@ -765,6 +766,11 @@ ssize_t ib_uverbs_rereg_mr(struct ib_uverbs_file *file,
mr = uobj->object; mr = uobj->object;
if (mr->dm) {
ret = -EINVAL;
goto put_uobjs;
}
if (cmd.flags & IB_MR_REREG_ACCESS) { if (cmd.flags & IB_MR_REREG_ACCESS) {
ret = ib_check_mr_access(cmd.access_flags); ret = ib_check_mr_access(cmd.access_flags);
if (ret) if (ret)

View File

@ -234,6 +234,15 @@ static int uverbs_validate_kernel_mandatory(const struct uverbs_method_spec *met
return -EINVAL; return -EINVAL;
} }
for (; i < method_spec->num_buckets; i++) {
struct uverbs_attr_spec_hash *attr_spec_bucket =
method_spec->attr_buckets[i];
if (!bitmap_empty(attr_spec_bucket->mandatory_attrs_bitmask,
attr_spec_bucket->num_attrs))
return -EINVAL;
}
return 0; return 0;
} }

View File

@ -363,28 +363,28 @@ static int UVERBS_HANDLER(UVERBS_METHOD_FLOW_ACTION_ESP_MODIFY)(struct ib_device
static const struct uverbs_attr_spec uverbs_flow_action_esp_keymat[] = { static const struct uverbs_attr_spec uverbs_flow_action_esp_keymat[] = {
[IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = { [IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM] = {
.ptr = { { .ptr = {
.type = UVERBS_ATTR_TYPE_PTR_IN, .type = UVERBS_ATTR_TYPE_PTR_IN,
UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_keymat_aes_gcm), UVERBS_ATTR_TYPE(struct ib_uverbs_flow_action_esp_keymat_aes_gcm),
.flags = UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO, .flags = UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO,
}, } },
}, },
}; };
static const struct uverbs_attr_spec uverbs_flow_action_esp_replay[] = { static const struct uverbs_attr_spec uverbs_flow_action_esp_replay[] = {
[IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE] = { [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_NONE] = {
.ptr = { { .ptr = {
.type = UVERBS_ATTR_TYPE_PTR_IN, .type = UVERBS_ATTR_TYPE_PTR_IN,
/* No need to specify any data */ /* No need to specify any data */
.len = 0, .len = 0,
} } }
}, },
[IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = { [IB_UVERBS_FLOW_ACTION_ESP_REPLAY_BMP] = {
.ptr = { { .ptr = {
.type = UVERBS_ATTR_TYPE_PTR_IN, .type = UVERBS_ATTR_TYPE_PTR_IN,
UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_replay_bmp, size), UVERBS_ATTR_STRUCT(struct ib_uverbs_flow_action_esp_replay_bmp, size),
.flags = UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO, .flags = UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO,
} } }
}, },
}; };

View File

@ -1656,6 +1656,7 @@ struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
if (!IS_ERR(mr)) { if (!IS_ERR(mr)) {
mr->device = pd->device; mr->device = pd->device;
mr->pd = pd; mr->pd = pd;
mr->dm = NULL;
mr->uobject = NULL; mr->uobject = NULL;
atomic_inc(&pd->usecnt); atomic_inc(&pd->usecnt);
mr->need_inval = false; mr->need_inval = false;

View File

@ -315,7 +315,7 @@ static void advance_oldest_read(struct t4_wq *wq)
* Deal with out-of-order and/or completions that complete * Deal with out-of-order and/or completions that complete
* prior unsignalled WRs. * prior unsignalled WRs.
*/ */
void c4iw_flush_hw_cq(struct c4iw_cq *chp) void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp)
{ {
struct t4_cqe *hw_cqe, *swcqe, read_cqe; struct t4_cqe *hw_cqe, *swcqe, read_cqe;
struct c4iw_qp *qhp; struct c4iw_qp *qhp;
@ -339,6 +339,13 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
if (qhp == NULL) if (qhp == NULL)
goto next_cqe; goto next_cqe;
if (flush_qhp != qhp) {
spin_lock(&qhp->lock);
if (qhp->wq.flushed == 1)
goto next_cqe;
}
if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE)
goto next_cqe; goto next_cqe;
@ -390,6 +397,8 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
next_cqe: next_cqe:
t4_hwcq_consume(&chp->cq); t4_hwcq_consume(&chp->cq);
ret = t4_next_hw_cqe(&chp->cq, &hw_cqe); ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
if (qhp && flush_qhp != qhp)
spin_unlock(&qhp->lock);
} }
} }

View File

@ -875,6 +875,11 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->status_page->db_off = 0; rdev->status_page->db_off = 0;
init_completion(&rdev->rqt_compl);
init_completion(&rdev->pbl_compl);
kref_init(&rdev->rqt_kref);
kref_init(&rdev->pbl_kref);
return 0; return 0;
err_free_status_page_and_wr_log: err_free_status_page_and_wr_log:
if (c4iw_wr_log && rdev->wr_log) if (c4iw_wr_log && rdev->wr_log)
@ -893,13 +898,15 @@ destroy_resource:
static void c4iw_rdev_close(struct c4iw_rdev *rdev) static void c4iw_rdev_close(struct c4iw_rdev *rdev)
{ {
destroy_workqueue(rdev->free_workq);
kfree(rdev->wr_log); kfree(rdev->wr_log);
c4iw_release_dev_ucontext(rdev, &rdev->uctx); c4iw_release_dev_ucontext(rdev, &rdev->uctx);
free_page((unsigned long)rdev->status_page); free_page((unsigned long)rdev->status_page);
c4iw_pblpool_destroy(rdev); c4iw_pblpool_destroy(rdev);
c4iw_rqtpool_destroy(rdev); c4iw_rqtpool_destroy(rdev);
wait_for_completion(&rdev->pbl_compl);
wait_for_completion(&rdev->rqt_compl);
c4iw_ocqp_pool_destroy(rdev); c4iw_ocqp_pool_destroy(rdev);
destroy_workqueue(rdev->free_workq);
c4iw_destroy_resource(&rdev->resource); c4iw_destroy_resource(&rdev->resource);
} }

View File

@ -185,6 +185,10 @@ struct c4iw_rdev {
struct wr_log_entry *wr_log; struct wr_log_entry *wr_log;
int wr_log_size; int wr_log_size;
struct workqueue_struct *free_workq; struct workqueue_struct *free_workq;
struct completion rqt_compl;
struct completion pbl_compl;
struct kref rqt_kref;
struct kref pbl_kref;
}; };
static inline int c4iw_fatal_error(struct c4iw_rdev *rdev) static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@ -1049,7 +1053,7 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size);
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size); void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size);
u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size); u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size);
void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size); void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size);
void c4iw_flush_hw_cq(struct c4iw_cq *chp); void c4iw_flush_hw_cq(struct c4iw_cq *chp, struct c4iw_qp *flush_qhp);
void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count); void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count);
int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp); int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp);
int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count); int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count);

View File

@ -1343,12 +1343,12 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
qhp->wq.flushed = 1; qhp->wq.flushed = 1;
t4_set_wq_in_error(&qhp->wq); t4_set_wq_in_error(&qhp->wq);
c4iw_flush_hw_cq(rchp); c4iw_flush_hw_cq(rchp, qhp);
c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count); c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count); rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
if (schp != rchp) if (schp != rchp)
c4iw_flush_hw_cq(schp); c4iw_flush_hw_cq(schp, qhp);
sq_flushed = c4iw_flush_sq(qhp); sq_flushed = c4iw_flush_sq(qhp);
spin_unlock(&qhp->lock); spin_unlock(&qhp->lock);

View File

@ -260,12 +260,22 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT); rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
if (rdev->stats.pbl.cur > rdev->stats.pbl.max) if (rdev->stats.pbl.cur > rdev->stats.pbl.max)
rdev->stats.pbl.max = rdev->stats.pbl.cur; rdev->stats.pbl.max = rdev->stats.pbl.cur;
kref_get(&rdev->pbl_kref);
} else } else
rdev->stats.pbl.fail++; rdev->stats.pbl.fail++;
mutex_unlock(&rdev->stats.lock); mutex_unlock(&rdev->stats.lock);
return (u32)addr; return (u32)addr;
} }
static void destroy_pblpool(struct kref *kref)
{
struct c4iw_rdev *rdev;
rdev = container_of(kref, struct c4iw_rdev, pbl_kref);
gen_pool_destroy(rdev->pbl_pool);
complete(&rdev->pbl_compl);
}
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size) void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{ {
pr_debug("addr 0x%x size %d\n", addr, size); pr_debug("addr 0x%x size %d\n", addr, size);
@ -273,6 +283,7 @@ void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT); rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
mutex_unlock(&rdev->stats.lock); mutex_unlock(&rdev->stats.lock);
gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size); gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
kref_put(&rdev->pbl_kref, destroy_pblpool);
} }
int c4iw_pblpool_create(struct c4iw_rdev *rdev) int c4iw_pblpool_create(struct c4iw_rdev *rdev)
@ -310,7 +321,7 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
void c4iw_pblpool_destroy(struct c4iw_rdev *rdev) void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
{ {
gen_pool_destroy(rdev->pbl_pool); kref_put(&rdev->pbl_kref, destroy_pblpool);
} }
/* /*
@ -331,12 +342,22 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT); rdev->stats.rqt.cur += roundup(size << 6, 1 << MIN_RQT_SHIFT);
if (rdev->stats.rqt.cur > rdev->stats.rqt.max) if (rdev->stats.rqt.cur > rdev->stats.rqt.max)
rdev->stats.rqt.max = rdev->stats.rqt.cur; rdev->stats.rqt.max = rdev->stats.rqt.cur;
kref_get(&rdev->rqt_kref);
} else } else
rdev->stats.rqt.fail++; rdev->stats.rqt.fail++;
mutex_unlock(&rdev->stats.lock); mutex_unlock(&rdev->stats.lock);
return (u32)addr; return (u32)addr;
} }
static void destroy_rqtpool(struct kref *kref)
{
struct c4iw_rdev *rdev;
rdev = container_of(kref, struct c4iw_rdev, rqt_kref);
gen_pool_destroy(rdev->rqt_pool);
complete(&rdev->rqt_compl);
}
void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size) void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{ {
pr_debug("addr 0x%x size %d\n", addr, size << 6); pr_debug("addr 0x%x size %d\n", addr, size << 6);
@ -344,6 +365,7 @@ void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT); rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
mutex_unlock(&rdev->stats.lock); mutex_unlock(&rdev->stats.lock);
gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6); gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
kref_put(&rdev->rqt_kref, destroy_rqtpool);
} }
int c4iw_rqtpool_create(struct c4iw_rdev *rdev) int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
@ -380,7 +402,7 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev) void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
{ {
gen_pool_destroy(rdev->rqt_pool); kref_put(&rdev->rqt_kref, destroy_rqtpool);
} }
/* /*

View File

@ -412,7 +412,6 @@ static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry *msix)
static int get_irq_affinity(struct hfi1_devdata *dd, static int get_irq_affinity(struct hfi1_devdata *dd,
struct hfi1_msix_entry *msix) struct hfi1_msix_entry *msix)
{ {
int ret;
cpumask_var_t diff; cpumask_var_t diff;
struct hfi1_affinity_node *entry; struct hfi1_affinity_node *entry;
struct cpu_mask_set *set = NULL; struct cpu_mask_set *set = NULL;
@ -424,10 +423,6 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
extra[0] = '\0'; extra[0] = '\0';
cpumask_clear(&msix->mask); cpumask_clear(&msix->mask);
ret = zalloc_cpumask_var(&diff, GFP_KERNEL);
if (!ret)
return -ENOMEM;
entry = node_affinity_lookup(dd->node); entry = node_affinity_lookup(dd->node);
switch (msix->type) { switch (msix->type) {
@ -458,6 +453,9 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
* finds its CPU here. * finds its CPU here.
*/ */
if (cpu == -1 && set) { if (cpu == -1 && set) {
if (!zalloc_cpumask_var(&diff, GFP_KERNEL))
return -ENOMEM;
if (cpumask_equal(&set->mask, &set->used)) { if (cpumask_equal(&set->mask, &set->used)) {
/* /*
* We've used up all the CPUs, bump up the generation * We've used up all the CPUs, bump up the generation
@ -469,6 +467,8 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
cpumask_andnot(diff, &set->mask, &set->used); cpumask_andnot(diff, &set->mask, &set->used);
cpu = cpumask_first(diff); cpu = cpumask_first(diff);
cpumask_set_cpu(cpu, &set->used); cpumask_set_cpu(cpu, &set->used);
free_cpumask_var(diff);
} }
cpumask_set_cpu(cpu, &msix->mask); cpumask_set_cpu(cpu, &msix->mask);
@ -482,7 +482,6 @@ static int get_irq_affinity(struct hfi1_devdata *dd,
hfi1_setup_sdma_notifier(msix); hfi1_setup_sdma_notifier(msix);
} }
free_cpumask_var(diff);
return 0; return 0;
} }

View File

@ -433,31 +433,43 @@ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
bool do_cnp) bool do_cnp)
{ {
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
struct ib_other_headers *ohdr = pkt->ohdr; struct ib_other_headers *ohdr = pkt->ohdr;
struct ib_grh *grh = pkt->grh; struct ib_grh *grh = pkt->grh;
u32 rqpn = 0, bth1; u32 rqpn = 0, bth1;
u16 pkey, rlid, dlid = ib_get_dlid(pkt->hdr); u16 pkey;
u32 rlid, slid, dlid = 0;
u8 hdr_type, sc, svc_type; u8 hdr_type, sc, svc_type;
bool is_mcast = false; bool is_mcast = false;
/* can be called from prescan */
if (pkt->etype == RHF_RCV_TYPE_BYPASS) { if (pkt->etype == RHF_RCV_TYPE_BYPASS) {
is_mcast = hfi1_is_16B_mcast(dlid); is_mcast = hfi1_is_16B_mcast(dlid);
pkey = hfi1_16B_get_pkey(pkt->hdr); pkey = hfi1_16B_get_pkey(pkt->hdr);
sc = hfi1_16B_get_sc(pkt->hdr); sc = hfi1_16B_get_sc(pkt->hdr);
dlid = hfi1_16B_get_dlid(pkt->hdr);
slid = hfi1_16B_get_slid(pkt->hdr);
hdr_type = HFI1_PKT_TYPE_16B; hdr_type = HFI1_PKT_TYPE_16B;
} else { } else {
is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) && is_mcast = (dlid > be16_to_cpu(IB_MULTICAST_LID_BASE)) &&
(dlid != be16_to_cpu(IB_LID_PERMISSIVE)); (dlid != be16_to_cpu(IB_LID_PERMISSIVE));
pkey = ib_bth_get_pkey(ohdr); pkey = ib_bth_get_pkey(ohdr);
sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf); sc = hfi1_9B_get_sc5(pkt->hdr, pkt->rhf);
dlid = ib_get_dlid(pkt->hdr);
slid = ib_get_slid(pkt->hdr);
hdr_type = HFI1_PKT_TYPE_9B; hdr_type = HFI1_PKT_TYPE_9B;
} }
switch (qp->ibqp.qp_type) { switch (qp->ibqp.qp_type) {
case IB_QPT_UD:
dlid = ppd->lid;
rlid = slid;
rqpn = ib_get_sqpn(pkt->ohdr);
svc_type = IB_CC_SVCTYPE_UD;
break;
case IB_QPT_SMI: case IB_QPT_SMI:
case IB_QPT_GSI: case IB_QPT_GSI:
case IB_QPT_UD: rlid = slid;
rlid = ib_get_slid(pkt->hdr);
rqpn = ib_get_sqpn(pkt->ohdr); rqpn = ib_get_sqpn(pkt->ohdr);
svc_type = IB_CC_SVCTYPE_UD; svc_type = IB_CC_SVCTYPE_UD;
break; break;
@ -482,7 +494,6 @@ void hfi1_process_ecn_slowpath(struct rvt_qp *qp, struct hfi1_packet *pkt,
dlid, rlid, sc, grh); dlid, rlid, sc, grh);
if (!is_mcast && (bth1 & IB_BECN_SMASK)) { if (!is_mcast && (bth1 & IB_BECN_SMASK)) {
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
u32 lqpn = bth1 & RVT_QPN_MASK; u32 lqpn = bth1 & RVT_QPN_MASK;
u8 sl = ibp->sc_to_sl[sc]; u8 sl = ibp->sc_to_sl[sc];

View File

@ -1537,13 +1537,13 @@ void set_link_ipg(struct hfi1_pportdata *ppd);
void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn, void process_becn(struct hfi1_pportdata *ppd, u8 sl, u32 rlid, u32 lqpn,
u32 rqpn, u8 svc_type); u32 rqpn, u8 svc_type);
void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn, void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
u32 pkey, u32 slid, u32 dlid, u8 sc5, u16 pkey, u32 slid, u32 dlid, u8 sc5,
const struct ib_grh *old_grh); const struct ib_grh *old_grh);
void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp, void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
u32 remote_qpn, u32 pkey, u32 slid, u32 dlid, u32 remote_qpn, u16 pkey, u32 slid, u32 dlid,
u8 sc5, const struct ib_grh *old_grh); u8 sc5, const struct ib_grh *old_grh);
typedef void (*hfi1_handle_cnp)(struct hfi1_ibport *ibp, struct rvt_qp *qp, typedef void (*hfi1_handle_cnp)(struct hfi1_ibport *ibp, struct rvt_qp *qp,
u32 remote_qpn, u32 pkey, u32 slid, u32 dlid, u32 remote_qpn, u16 pkey, u32 slid, u32 dlid,
u8 sc5, const struct ib_grh *old_grh); u8 sc5, const struct ib_grh *old_grh);
#define PKEY_CHECK_INVALID -1 #define PKEY_CHECK_INVALID -1
@ -2437,7 +2437,7 @@ static inline void hfi1_make_16b_hdr(struct hfi1_16b_header *hdr,
((slid >> OPA_16B_SLID_SHIFT) << OPA_16B_SLID_HIGH_SHIFT); ((slid >> OPA_16B_SLID_SHIFT) << OPA_16B_SLID_HIGH_SHIFT);
lrh2 = (lrh2 & ~OPA_16B_DLID_MASK) | lrh2 = (lrh2 & ~OPA_16B_DLID_MASK) |
((dlid >> OPA_16B_DLID_SHIFT) << OPA_16B_DLID_HIGH_SHIFT); ((dlid >> OPA_16B_DLID_SHIFT) << OPA_16B_DLID_HIGH_SHIFT);
lrh2 = (lrh2 & ~OPA_16B_PKEY_MASK) | (pkey << OPA_16B_PKEY_SHIFT); lrh2 = (lrh2 & ~OPA_16B_PKEY_MASK) | ((u32)pkey << OPA_16B_PKEY_SHIFT);
lrh2 = (lrh2 & ~OPA_16B_L4_MASK) | l4; lrh2 = (lrh2 & ~OPA_16B_L4_MASK) | l4;
hdr->lrh[0] = lrh0; hdr->lrh[0] = lrh0;

View File

@ -88,9 +88,9 @@
* pio buffers per ctxt, etc.) Zero means use one user context per CPU. * pio buffers per ctxt, etc.) Zero means use one user context per CPU.
*/ */
int num_user_contexts = -1; int num_user_contexts = -1;
module_param_named(num_user_contexts, num_user_contexts, uint, S_IRUGO); module_param_named(num_user_contexts, num_user_contexts, int, 0444);
MODULE_PARM_DESC( MODULE_PARM_DESC(
num_user_contexts, "Set max number of user contexts to use"); num_user_contexts, "Set max number of user contexts to use (default: -1 will use the real (non-HT) CPU count)");
uint krcvqs[RXE_NUM_DATA_VL]; uint krcvqs[RXE_NUM_DATA_VL];
int krcvqsset; int krcvqsset;
@ -1209,19 +1209,26 @@ static void finalize_asic_data(struct hfi1_devdata *dd,
kfree(ad); kfree(ad);
} }
static void __hfi1_free_devdata(struct kobject *kobj) /**
* hfi1_clean_devdata - cleans up per-unit data structure
* @dd: pointer to a valid devdata structure
*
* It cleans up all data structures set up by
* by hfi1_alloc_devdata().
*/
static void hfi1_clean_devdata(struct hfi1_devdata *dd)
{ {
struct hfi1_devdata *dd =
container_of(kobj, struct hfi1_devdata, kobj);
struct hfi1_asic_data *ad; struct hfi1_asic_data *ad;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&hfi1_devs_lock, flags); spin_lock_irqsave(&hfi1_devs_lock, flags);
if (!list_empty(&dd->list)) {
idr_remove(&hfi1_unit_table, dd->unit); idr_remove(&hfi1_unit_table, dd->unit);
list_del(&dd->list); list_del_init(&dd->list);
}
ad = release_asic_data(dd); ad = release_asic_data(dd);
spin_unlock_irqrestore(&hfi1_devs_lock, flags); spin_unlock_irqrestore(&hfi1_devs_lock, flags);
if (ad)
finalize_asic_data(dd, ad); finalize_asic_data(dd, ad);
free_platform_config(dd); free_platform_config(dd);
rcu_barrier(); /* wait for rcu callbacks to complete */ rcu_barrier(); /* wait for rcu callbacks to complete */
@ -1229,10 +1236,22 @@ static void __hfi1_free_devdata(struct kobject *kobj)
free_percpu(dd->rcv_limit); free_percpu(dd->rcv_limit);
free_percpu(dd->send_schedule); free_percpu(dd->send_schedule);
free_percpu(dd->tx_opstats); free_percpu(dd->tx_opstats);
dd->int_counter = NULL;
dd->rcv_limit = NULL;
dd->send_schedule = NULL;
dd->tx_opstats = NULL;
sdma_clean(dd, dd->num_sdma); sdma_clean(dd, dd->num_sdma);
rvt_dealloc_device(&dd->verbs_dev.rdi); rvt_dealloc_device(&dd->verbs_dev.rdi);
} }
static void __hfi1_free_devdata(struct kobject *kobj)
{
struct hfi1_devdata *dd =
container_of(kobj, struct hfi1_devdata, kobj);
hfi1_clean_devdata(dd);
}
static struct kobj_type hfi1_devdata_type = { static struct kobj_type hfi1_devdata_type = {
.release = __hfi1_free_devdata, .release = __hfi1_free_devdata,
}; };
@ -1265,6 +1284,8 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
dd->num_pports = nports; dd->num_pports = nports;
dd->pport = (struct hfi1_pportdata *)(dd + 1); dd->pport = (struct hfi1_pportdata *)(dd + 1);
dd->pcidev = pdev;
pci_set_drvdata(pdev, dd);
INIT_LIST_HEAD(&dd->list); INIT_LIST_HEAD(&dd->list);
idr_preload(GFP_KERNEL); idr_preload(GFP_KERNEL);
@ -1331,9 +1352,7 @@ struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, size_t extra)
return dd; return dd;
bail: bail:
if (!list_empty(&dd->list)) hfi1_clean_devdata(dd);
list_del_init(&dd->list);
rvt_dealloc_device(&dd->verbs_dev.rdi);
return ERR_PTR(ret); return ERR_PTR(ret);
} }

View File

@ -163,9 +163,6 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
resource_size_t addr; resource_size_t addr;
int ret = 0; int ret = 0;
dd->pcidev = pdev;
pci_set_drvdata(pdev, dd);
addr = pci_resource_start(pdev, 0); addr = pci_resource_start(pdev, 0);
len = pci_resource_len(pdev, 0); len = pci_resource_len(pdev, 0);

View File

@ -199,6 +199,7 @@ void free_platform_config(struct hfi1_devdata *dd)
{ {
/* Release memory allocated for eprom or fallback file read. */ /* Release memory allocated for eprom or fallback file read. */
kfree(dd->platform_config.data); kfree(dd->platform_config.data);
dd->platform_config.data = NULL;
} }
void get_port_type(struct hfi1_pportdata *ppd) void get_port_type(struct hfi1_pportdata *ppd)

View File

@ -204,6 +204,8 @@ static void clean_i2c_bus(struct hfi1_i2c_bus *bus)
void clean_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad) void clean_up_i2c(struct hfi1_devdata *dd, struct hfi1_asic_data *ad)
{ {
if (!ad)
return;
clean_i2c_bus(ad->i2c_bus0); clean_i2c_bus(ad->i2c_bus0);
ad->i2c_bus0 = NULL; ad->i2c_bus0 = NULL;
clean_i2c_bus(ad->i2c_bus1); clean_i2c_bus(ad->i2c_bus1);

View File

@ -733,6 +733,20 @@ static inline void hfi1_make_ruc_bth(struct rvt_qp *qp,
ohdr->bth[2] = cpu_to_be32(bth2); ohdr->bth[2] = cpu_to_be32(bth2);
} }
/**
* hfi1_make_ruc_header_16B - build a 16B header
* @qp: the queue pair
* @ohdr: a pointer to the destination header memory
* @bth0: bth0 passed in from the RC/UC builder
* @bth2: bth2 passed in from the RC/UC builder
* @middle: non zero implies indicates ahg "could" be used
* @ps: the current packet state
*
* This routine may disarm ahg under these situations:
* - packet needs a GRH
* - BECN needed
* - migration state not IB_MIG_MIGRATED
*/
static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp, static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
struct ib_other_headers *ohdr, struct ib_other_headers *ohdr,
u32 bth0, u32 bth2, int middle, u32 bth0, u32 bth2, int middle,
@ -777,6 +791,12 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
else else
middle = 0; middle = 0;
if (qp->s_flags & RVT_S_ECN) {
qp->s_flags &= ~RVT_S_ECN;
/* we recently received a FECN, so return a BECN */
becn = true;
middle = 0;
}
if (middle) if (middle)
build_ahg(qp, bth2); build_ahg(qp, bth2);
else else
@ -784,11 +804,6 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
bth0 |= pkey; bth0 |= pkey;
bth0 |= extra_bytes << 20; bth0 |= extra_bytes << 20;
if (qp->s_flags & RVT_S_ECN) {
qp->s_flags &= ~RVT_S_ECN;
/* we recently received a FECN, so return a BECN */
becn = true;
}
hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2); hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
if (!ppd->lid) if (!ppd->lid)
@ -806,6 +821,20 @@ static inline void hfi1_make_ruc_header_16B(struct rvt_qp *qp,
pkey, becn, 0, l4, priv->s_sc); pkey, becn, 0, l4, priv->s_sc);
} }
/**
* hfi1_make_ruc_header_9B - build a 9B header
* @qp: the queue pair
* @ohdr: a pointer to the destination header memory
* @bth0: bth0 passed in from the RC/UC builder
* @bth2: bth2 passed in from the RC/UC builder
* @middle: non zero implies indicates ahg "could" be used
* @ps: the current packet state
*
* This routine may disarm ahg under these situations:
* - packet needs a GRH
* - BECN needed
* - migration state not IB_MIG_MIGRATED
*/
static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp, static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
struct ib_other_headers *ohdr, struct ib_other_headers *ohdr,
u32 bth0, u32 bth2, int middle, u32 bth0, u32 bth2, int middle,
@ -839,6 +868,12 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
else else
middle = 0; middle = 0;
if (qp->s_flags & RVT_S_ECN) {
qp->s_flags &= ~RVT_S_ECN;
/* we recently received a FECN, so return a BECN */
bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT);
middle = 0;
}
if (middle) if (middle)
build_ahg(qp, bth2); build_ahg(qp, bth2);
else else
@ -846,11 +881,6 @@ static inline void hfi1_make_ruc_header_9B(struct rvt_qp *qp,
bth0 |= pkey; bth0 |= pkey;
bth0 |= extra_bytes << 20; bth0 |= extra_bytes << 20;
if (qp->s_flags & RVT_S_ECN) {
qp->s_flags &= ~RVT_S_ECN;
/* we recently received a FECN, so return a BECN */
bth1 |= (IB_BECN_MASK << IB_BECN_SHIFT);
}
hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2); hfi1_make_ruc_bth(qp, ohdr, bth0, bth1, bth2);
hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh, hfi1_make_ib_hdr(&ps->s_txreq->phdr.hdr.ibh,
lrh0, lrh0,

View File

@ -628,7 +628,7 @@ int hfi1_lookup_pkey_idx(struct hfi1_ibport *ibp, u16 pkey)
} }
void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp, void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
u32 remote_qpn, u32 pkey, u32 slid, u32 dlid, u32 remote_qpn, u16 pkey, u32 slid, u32 dlid,
u8 sc5, const struct ib_grh *old_grh) u8 sc5, const struct ib_grh *old_grh)
{ {
u64 pbc, pbc_flags = 0; u64 pbc, pbc_flags = 0;
@ -687,7 +687,7 @@ void return_cnp_16B(struct hfi1_ibport *ibp, struct rvt_qp *qp,
} }
void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn, void return_cnp(struct hfi1_ibport *ibp, struct rvt_qp *qp, u32 remote_qpn,
u32 pkey, u32 slid, u32 dlid, u8 sc5, u16 pkey, u32 slid, u32 dlid, u8 sc5,
const struct ib_grh *old_grh) const struct ib_grh *old_grh)
{ {
u64 pbc, pbc_flags = 0; u64 pbc, pbc_flags = 0;

View File

@ -912,7 +912,7 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
obj_per_chunk = buf_chunk_size / obj_size; obj_per_chunk = buf_chunk_size / obj_size;
num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk; num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
bt_chunk_num = bt_chunk_size / 8; bt_chunk_num = bt_chunk_size / 8;
if (table->type >= HEM_TYPE_MTT) if (type >= HEM_TYPE_MTT)
num_bt_l0 = bt_chunk_num; num_bt_l0 = bt_chunk_num;
table->hem = kcalloc(num_hem, sizeof(*table->hem), table->hem = kcalloc(num_hem, sizeof(*table->hem),
@ -920,7 +920,7 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
if (!table->hem) if (!table->hem)
goto err_kcalloc_hem_buf; goto err_kcalloc_hem_buf;
if (check_whether_bt_num_3(table->type, hop_num)) { if (check_whether_bt_num_3(type, hop_num)) {
unsigned long num_bt_l1; unsigned long num_bt_l1;
num_bt_l1 = (num_hem + bt_chunk_num - 1) / num_bt_l1 = (num_hem + bt_chunk_num - 1) /
@ -939,8 +939,8 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
goto err_kcalloc_l1_dma; goto err_kcalloc_l1_dma;
} }
if (check_whether_bt_num_2(table->type, hop_num) || if (check_whether_bt_num_2(type, hop_num) ||
check_whether_bt_num_3(table->type, hop_num)) { check_whether_bt_num_3(type, hop_num)) {
table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0), table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0),
GFP_KERNEL); GFP_KERNEL);
if (!table->bt_l0) if (!table->bt_l0)
@ -1039,14 +1039,14 @@ void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev) void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
{ {
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table); hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
if (hr_dev->caps.trrl_entry_sz) if (hr_dev->caps.trrl_entry_sz)
hns_roce_cleanup_hem_table(hr_dev, hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->qp_table.trrl_table); &hr_dev->qp_table.trrl_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table); hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table); hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE)) if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
hns_roce_cleanup_hem_table(hr_dev, hns_roce_cleanup_hem_table(hr_dev,
&hr_dev->mr_table.mtt_cqe_table); &hr_dev->mr_table.mtt_cqe_table);
hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
} }

View File

@ -71,6 +71,11 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
return -EINVAL; return -EINVAL;
} }
if (wr->opcode == IB_WR_RDMA_READ) {
dev_err(hr_dev->dev, "Not support inline data!\n");
return -EINVAL;
}
for (i = 0; i < wr->num_sge; i++) { for (i = 0; i < wr->num_sge; i++) {
memcpy(wqe, ((void *)wr->sg_list[i].addr), memcpy(wqe, ((void *)wr->sg_list[i].addr),
wr->sg_list[i].length); wr->sg_list[i].length);
@ -148,7 +153,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
ibqp->qp_type != IB_QPT_GSI && ibqp->qp_type != IB_QPT_GSI &&
ibqp->qp_type != IB_QPT_UD)) { ibqp->qp_type != IB_QPT_UD)) {
dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type); dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type);
*bad_wr = NULL; *bad_wr = wr;
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
@ -182,7 +187,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] =
wr->wr_id; wr->wr_id;
owner_bit = ~(qp->sq.head >> ilog2(qp->sq.wqe_cnt)) & 0x1; owner_bit =
~(((qp->sq.head + nreq) >> ilog2(qp->sq.wqe_cnt)) & 0x1);
/* Corresponding to the QP type, wqe process separately */ /* Corresponding to the QP type, wqe process separately */
if (ibqp->qp_type == IB_QPT_GSI) { if (ibqp->qp_type == IB_QPT_GSI) {
@ -456,6 +462,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
} else { } else {
dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type); dev_err(dev, "Illegal qp_type(0x%x)\n", ibqp->qp_type);
spin_unlock_irqrestore(&qp->sq.lock, flags); spin_unlock_irqrestore(&qp->sq.lock, flags);
*bad_wr = wr;
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
} }
@ -2592,10 +2599,12 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M, roce_set_field(qpc_mask->byte_4_sqpn_tst, V2_QPC_BYTE_4_SQPN_M,
V2_QPC_BYTE_4_SQPN_S, 0); V2_QPC_BYTE_4_SQPN_S, 0);
if (attr_mask & IB_QP_DEST_QPN) {
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn); V2_QPC_BYTE_56_DQPN_S, hr_qp->qpn);
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, roce_set_field(qpc_mask->byte_56_dqpn_err,
V2_QPC_BYTE_56_DQPN_S, 0); V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
}
roce_set_field(context->byte_168_irrl_idx, roce_set_field(context->byte_168_irrl_idx,
V2_QPC_BYTE_168_SQ_SHIFT_BAK_M, V2_QPC_BYTE_168_SQ_SHIFT_BAK_M,
V2_QPC_BYTE_168_SQ_SHIFT_BAK_S, V2_QPC_BYTE_168_SQ_SHIFT_BAK_S,
@ -2650,8 +2659,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
return -EINVAL; return -EINVAL;
} }
if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_ACCESS_FLAGS) || if (attr_mask & IB_QP_ALT_PATH) {
(attr_mask & IB_QP_PKEY_INDEX) || (attr_mask & IB_QP_QKEY)) {
dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask); dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask);
return -EINVAL; return -EINVAL;
} }
@ -2800,10 +2808,12 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
V2_QPC_BYTE_140_RR_MAX_S, 0); V2_QPC_BYTE_140_RR_MAX_S, 0);
} }
if (attr_mask & IB_QP_DEST_QPN) {
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M,
V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num); V2_QPC_BYTE_56_DQPN_S, attr->dest_qp_num);
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_DQPN_M, roce_set_field(qpc_mask->byte_56_dqpn_err,
V2_QPC_BYTE_56_DQPN_S, 0); V2_QPC_BYTE_56_DQPN_M, V2_QPC_BYTE_56_DQPN_S, 0);
}
/* Configure GID index */ /* Configure GID index */
port_num = rdma_ah_get_port_num(&attr->ah_attr); port_num = rdma_ah_get_port_num(&attr->ah_attr);
@ -2845,7 +2855,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD) if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, IB_MTU_4096); V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
else else if (attr_mask & IB_QP_PATH_MTU)
roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M, roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
V2_QPC_BYTE_24_MTU_S, attr->path_mtu); V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
@ -2922,11 +2932,9 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
return -EINVAL; return -EINVAL;
} }
/* If exist optional param, return error */ /* Not support alternate path and path migration */
if ((attr_mask & IB_QP_ALT_PATH) || (attr_mask & IB_QP_ACCESS_FLAGS) || if ((attr_mask & IB_QP_ALT_PATH) ||
(attr_mask & IB_QP_QKEY) || (attr_mask & IB_QP_PATH_MIG_STATE) || (attr_mask & IB_QP_PATH_MIG_STATE)) {
(attr_mask & IB_QP_CUR_STATE) ||
(attr_mask & IB_QP_MIN_RNR_TIMER)) {
dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask); dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
return -EINVAL; return -EINVAL;
} }
@ -3161,7 +3169,8 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
(cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) || (cur_state == IB_QPS_RTR && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) || (cur_state == IB_QPS_RTS && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_SQD && new_state == IB_QPS_ERR) || (cur_state == IB_QPS_SQD && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR)) { (cur_state == IB_QPS_SQE && new_state == IB_QPS_ERR) ||
(cur_state == IB_QPS_ERR && new_state == IB_QPS_ERR)) {
/* Nothing */ /* Nothing */
; ;
} else { } else {
@ -4478,7 +4487,7 @@ static int hns_roce_v2_create_eq(struct hns_roce_dev *hr_dev,
ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0, ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, eq->eqn, 0,
eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS); eq_cmd, HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret) { if (ret) {
dev_err(dev, "[mailbox cmd] creat eqc failed.\n"); dev_err(dev, "[mailbox cmd] create eqc failed.\n");
goto err_cmd_mbox; goto err_cmd_mbox;
} }

View File

@ -620,7 +620,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
to_hr_ucontext(ib_pd->uobject->context), to_hr_ucontext(ib_pd->uobject->context),
ucmd.db_addr, &hr_qp->rdb); ucmd.db_addr, &hr_qp->rdb);
if (ret) { if (ret) {
dev_err(dev, "rp record doorbell map failed!\n"); dev_err(dev, "rq record doorbell map failed!\n");
goto err_mtt; goto err_mtt;
} }
} }

View File

@ -346,7 +346,7 @@ int mlx4_ib_umem_calc_optimal_mtt_size(struct ib_umem *umem, u64 start_va,
/* Add to the first block the misalignment that it suffers from. */ /* Add to the first block the misalignment that it suffers from. */
total_len += (first_block_start & ((1ULL << block_shift) - 1ULL)); total_len += (first_block_start & ((1ULL << block_shift) - 1ULL));
last_block_end = current_block_start + current_block_len; last_block_end = current_block_start + current_block_len;
last_block_aligned_end = round_up(last_block_end, 1 << block_shift); last_block_aligned_end = round_up(last_block_end, 1ULL << block_shift);
total_len += (last_block_aligned_end - last_block_end); total_len += (last_block_aligned_end - last_block_end);
if (total_len & ((1ULL << block_shift) - 1ULL)) if (total_len & ((1ULL << block_shift) - 1ULL))

View File

@ -673,7 +673,8 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx,
MLX4_IB_RX_HASH_SRC_PORT_TCP | MLX4_IB_RX_HASH_SRC_PORT_TCP |
MLX4_IB_RX_HASH_DST_PORT_TCP | MLX4_IB_RX_HASH_DST_PORT_TCP |
MLX4_IB_RX_HASH_SRC_PORT_UDP | MLX4_IB_RX_HASH_SRC_PORT_UDP |
MLX4_IB_RX_HASH_DST_PORT_UDP)) { MLX4_IB_RX_HASH_DST_PORT_UDP |
MLX4_IB_RX_HASH_INNER)) {
pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n", pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n",
ucmd->rx_hash_fields_mask); ucmd->rx_hash_fields_mask);
return (-EOPNOTSUPP); return (-EOPNOTSUPP);

View File

@ -1,6 +1,7 @@
config MLX5_INFINIBAND config MLX5_INFINIBAND
tristate "Mellanox Connect-IB HCA support" tristate "Mellanox Connect-IB HCA support"
depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
depends on INFINIBAND_USER_ACCESS || INFINIBAND_USER_ACCESS=n
---help--- ---help---
This driver provides low-level InfiniBand support for This driver provides low-level InfiniBand support for
Mellanox Connect-IB PCI Express host channel adapters (HCAs). Mellanox Connect-IB PCI Express host channel adapters (HCAs).

View File

@ -52,7 +52,6 @@
#include <linux/mlx5/port.h> #include <linux/mlx5/port.h>
#include <linux/mlx5/vport.h> #include <linux/mlx5/vport.h>
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include <linux/mlx5/fs_helpers.h>
#include <linux/list.h> #include <linux/list.h>
#include <rdma/ib_smi.h> #include <rdma/ib_smi.h>
#include <rdma/ib_umem.h> #include <rdma/ib_umem.h>
@ -180,7 +179,7 @@ static int mlx5_netdev_event(struct notifier_block *this,
if (rep_ndev == ndev) if (rep_ndev == ndev)
roce->netdev = (event == NETDEV_UNREGISTER) ? roce->netdev = (event == NETDEV_UNREGISTER) ?
NULL : ndev; NULL : ndev;
} else if (ndev->dev.parent == &ibdev->mdev->pdev->dev) { } else if (ndev->dev.parent == &mdev->pdev->dev) {
roce->netdev = (event == NETDEV_UNREGISTER) ? roce->netdev = (event == NETDEV_UNREGISTER) ?
NULL : ndev; NULL : ndev;
} }
@ -4757,7 +4756,7 @@ mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
{ {
struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_ib_dev *dev = to_mdev(ibdev);
return mlx5_get_vector_affinity(dev->mdev, comp_vector); return mlx5_get_vector_affinity_hint(dev->mdev, comp_vector);
} }
/* The mlx5_ib_multiport_mutex should be held when calling this function */ /* The mlx5_ib_multiport_mutex should be held when calling this function */
@ -5427,9 +5426,7 @@ static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev)
static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev) static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev)
{ {
dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev); dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev);
if (!dev->mdev->priv.uar) return PTR_ERR_OR_ZERO(dev->mdev->priv.uar);
return -ENOMEM;
return 0;
} }
static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev) static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)

View File

@ -866,25 +866,28 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
int *order) int *order)
{ {
struct mlx5_ib_dev *dev = to_mdev(pd->device); struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct ib_umem *u;
int err; int err;
*umem = ib_umem_get(pd->uobject->context, start, length,
access_flags, 0);
err = PTR_ERR_OR_ZERO(*umem);
if (err) {
*umem = NULL; *umem = NULL;
mlx5_ib_err(dev, "umem get failed (%d)\n", err);
u = ib_umem_get(pd->uobject->context, start, length, access_flags, 0);
err = PTR_ERR_OR_ZERO(u);
if (err) {
mlx5_ib_dbg(dev, "umem get failed (%d)\n", err);
return err; return err;
} }
mlx5_ib_cont_pages(*umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages, mlx5_ib_cont_pages(u, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages,
page_shift, ncont, order); page_shift, ncont, order);
if (!*npages) { if (!*npages) {
mlx5_ib_warn(dev, "avoid zero region\n"); mlx5_ib_warn(dev, "avoid zero region\n");
ib_umem_release(*umem); ib_umem_release(u);
return -EINVAL; return -EINVAL;
} }
*umem = u;
mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n", mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
*npages, *ncont, *order, *page_shift); *npages, *ncont, *order, *page_shift);
@ -1458,13 +1461,12 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
int access_flags = flags & IB_MR_REREG_ACCESS ? int access_flags = flags & IB_MR_REREG_ACCESS ?
new_access_flags : new_access_flags :
mr->access_flags; mr->access_flags;
u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
int page_shift = 0; int page_shift = 0;
int upd_flags = 0; int upd_flags = 0;
int npages = 0; int npages = 0;
int ncont = 0; int ncont = 0;
int order = 0; int order = 0;
u64 addr, len;
int err; int err;
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n", mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
@ -1472,6 +1474,17 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
atomic_sub(mr->npages, &dev->mdev->priv.reg_pages); atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
if (!mr->umem)
return -EINVAL;
if (flags & IB_MR_REREG_TRANS) {
addr = virt_addr;
len = length;
} else {
addr = mr->umem->address;
len = mr->umem->length;
}
if (flags != IB_MR_REREG_PD) { if (flags != IB_MR_REREG_PD) {
/* /*
* Replace umem. This needs to be done whether or not UMR is * Replace umem. This needs to be done whether or not UMR is
@ -1479,6 +1492,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
*/ */
flags |= IB_MR_REREG_TRANS; flags |= IB_MR_REREG_TRANS;
ib_umem_release(mr->umem); ib_umem_release(mr->umem);
mr->umem = NULL;
err = mr_umem_get(pd, addr, len, access_flags, &mr->umem, err = mr_umem_get(pd, addr, len, access_flags, &mr->umem,
&npages, &page_shift, &ncont, &order); &npages, &page_shift, &ncont, &order);
if (err) if (err)

View File

@ -259,7 +259,11 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
} else { } else {
if (ucmd) { if (ucmd) {
qp->rq.wqe_cnt = ucmd->rq_wqe_count; qp->rq.wqe_cnt = ucmd->rq_wqe_count;
if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
return -EINVAL;
qp->rq.wqe_shift = ucmd->rq_wqe_shift; qp->rq.wqe_shift = ucmd->rq_wqe_shift;
if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig)
return -EINVAL;
qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
qp->rq.max_post = qp->rq.wqe_cnt; qp->rq.max_post = qp->rq.wqe_cnt;
} else { } else {
@ -2451,18 +2455,18 @@ enum {
static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate) static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
{ {
if (rate == IB_RATE_PORT_CURRENT) { if (rate == IB_RATE_PORT_CURRENT)
return 0; return 0;
} else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS)
return -EINVAL; return -EINVAL;
} else {
while (rate != IB_RATE_2_5_GBPS && while (rate != IB_RATE_PORT_CURRENT &&
!(1 << (rate + MLX5_STAT_RATE_OFFSET) & !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
MLX5_CAP_GEN(dev->mdev, stat_rate_support))) MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
--rate; --rate;
}
return rate + MLX5_STAT_RATE_OFFSET; return rate ? rate + MLX5_STAT_RATE_OFFSET : rate;
} }
static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev, static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,

View File

@ -461,7 +461,7 @@ static bool nes_nic_send(struct sk_buff *skb, struct net_device *netdev)
/** /**
* nes_netdev_start_xmit * nes_netdev_start_xmit
*/ */
static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev) static netdev_tx_t nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{ {
struct nes_vnic *nesvnic = netdev_priv(netdev); struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev; struct nes_device *nesdev = nesvnic->nesdev;

View File

@ -390,7 +390,7 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = {
.name = "IB_OPCODE_RC_SEND_ONLY_INV", .name = "IB_OPCODE_RC_SEND_ONLY_INV",
.mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK
| RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK
| RXE_END_MASK, | RXE_END_MASK | RXE_START_MASK,
.length = RXE_BTH_BYTES + RXE_IETH_BYTES, .length = RXE_BTH_BYTES + RXE_IETH_BYTES,
.offset = { .offset = {
[RXE_BTH] = 0, [RXE_BTH] = 0,

View File

@ -728,7 +728,6 @@ next_wqe:
rollback_state(wqe, qp, &rollback_wqe, rollback_psn); rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
if (ret == -EAGAIN) { if (ret == -EAGAIN) {
kfree_skb(skb);
rxe_run_task(&qp->req.task, 1); rxe_run_task(&qp->req.task, 1);
goto exit; goto exit;
} }

View File

@ -742,7 +742,6 @@ static enum resp_states read_reply(struct rxe_qp *qp,
err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb); err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
if (err) { if (err) {
pr_err("Failed sending RDMA reply.\n"); pr_err("Failed sending RDMA reply.\n");
kfree_skb(skb);
return RESPST_ERR_RNR; return RESPST_ERR_RNR;
} }
@ -954,10 +953,8 @@ static int send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt,
} }
err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb); err = rxe_xmit_packet(rxe, qp, &ack_pkt, skb);
if (err) { if (err)
pr_err_ratelimited("Failed sending ack\n"); pr_err_ratelimited("Failed sending ack\n");
kfree_skb(skb);
}
err1: err1:
return err; return err;
@ -1141,7 +1138,6 @@ static enum resp_states duplicate_request(struct rxe_qp *qp,
if (rc) { if (rc) {
pr_err("Failed resending result. This flow is not handled - skb ignored\n"); pr_err("Failed resending result. This flow is not handled - skb ignored\n");
rxe_drop_ref(qp); rxe_drop_ref(qp);
kfree_skb(skb_copy);
rc = RESPST_CLEANUP; rc = RESPST_CLEANUP;
goto out; goto out;
} }

View File

@ -1094,7 +1094,7 @@ drop_and_unlock:
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
} }
static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) static netdev_tx_t ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct ipoib_dev_priv *priv = ipoib_priv(dev); struct ipoib_dev_priv *priv = ipoib_priv(dev);
struct rdma_netdev *rn = netdev_priv(dev); struct rdma_netdev *rn = netdev_priv(dev);

View File

@ -1,6 +1,6 @@
config INFINIBAND_SRP config INFINIBAND_SRP
tristate "InfiniBand SCSI RDMA Protocol" tristate "InfiniBand SCSI RDMA Protocol"
depends on SCSI depends on SCSI && INFINIBAND_ADDR_TRANS
select SCSI_SRP_ATTRS select SCSI_SRP_ATTRS
---help--- ---help---
Support for the SCSI RDMA Protocol over InfiniBand. This Support for the SCSI RDMA Protocol over InfiniBand. This

View File

@ -1,6 +1,6 @@
config INFINIBAND_SRPT config INFINIBAND_SRPT
tristate "InfiniBand SCSI RDMA Protocol target support" tristate "InfiniBand SCSI RDMA Protocol target support"
depends on INFINIBAND && TARGET_CORE depends on INFINIBAND && INFINIBAND_ADDR_TRANS && TARGET_CORE
---help--- ---help---
Support for the SCSI RDMA Protocol (SRP) Target driver. The Support for the SCSI RDMA Protocol (SRP) Target driver. The

View File

@ -88,6 +88,7 @@ static int input_leds_connect(struct input_handler *handler,
const struct input_device_id *id) const struct input_device_id *id)
{ {
struct input_leds *leds; struct input_leds *leds;
struct input_led *led;
unsigned int num_leds; unsigned int num_leds;
unsigned int led_code; unsigned int led_code;
int led_no; int led_no;
@ -119,14 +120,13 @@ static int input_leds_connect(struct input_handler *handler,
led_no = 0; led_no = 0;
for_each_set_bit(led_code, dev->ledbit, LED_CNT) { for_each_set_bit(led_code, dev->ledbit, LED_CNT) {
struct input_led *led = &leds->leds[led_no];
led->handle = &leds->handle;
led->code = led_code;
if (!input_led_info[led_code].name) if (!input_led_info[led_code].name)
continue; continue;
led = &leds->leds[led_no];
led->handle = &leds->handle;
led->code = led_code;
led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s", led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s",
dev_name(&dev->dev), dev_name(&dev->dev),
input_led_info[led_code].name); input_led_info[led_code].name);

View File

@ -583,7 +583,7 @@ static void alps_process_trackstick_packet_v3(struct psmouse *psmouse)
x = (s8)(((packet[0] & 0x20) << 2) | (packet[1] & 0x7f)); x = (s8)(((packet[0] & 0x20) << 2) | (packet[1] & 0x7f));
y = (s8)(((packet[0] & 0x10) << 3) | (packet[2] & 0x7f)); y = (s8)(((packet[0] & 0x10) << 3) | (packet[2] & 0x7f));
z = packet[4] & 0x7c; z = packet[4] & 0x7f;
/* /*
* The x and y values tend to be quite large, and when used * The x and y values tend to be quite large, and when used

View File

@ -147,8 +147,11 @@ static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi,
if (len > RMI_SPI_XFER_SIZE_LIMIT) if (len > RMI_SPI_XFER_SIZE_LIMIT)
return -EINVAL; return -EINVAL;
if (rmi_spi->xfer_buf_size < len) if (rmi_spi->xfer_buf_size < len) {
rmi_spi_manage_pools(rmi_spi, len); ret = rmi_spi_manage_pools(rmi_spi, len);
if (ret < 0)
return ret;
}
if (addr == 0) if (addr == 0)
/* /*

View File

@ -362,7 +362,7 @@ config TOUCHSCREEN_HIDEEP
If unsure, say N. If unsure, say N.
To compile this driver as a moudle, choose M here : the To compile this driver as a module, choose M here : the
module will be called hideep_ts. module will be called hideep_ts.
config TOUCHSCREEN_ILI210X config TOUCHSCREEN_ILI210X

View File

@ -280,7 +280,8 @@ struct mxt_data {
struct input_dev *input_dev; struct input_dev *input_dev;
char phys[64]; /* device physical location */ char phys[64]; /* device physical location */
struct mxt_object *object_table; struct mxt_object *object_table;
struct mxt_info info; struct mxt_info *info;
void *raw_info_block;
unsigned int irq; unsigned int irq;
unsigned int max_x; unsigned int max_x;
unsigned int max_y; unsigned int max_y;
@ -460,12 +461,13 @@ static int mxt_lookup_bootloader_address(struct mxt_data *data, bool retry)
{ {
u8 appmode = data->client->addr; u8 appmode = data->client->addr;
u8 bootloader; u8 bootloader;
u8 family_id = data->info ? data->info->family_id : 0;
switch (appmode) { switch (appmode) {
case 0x4a: case 0x4a:
case 0x4b: case 0x4b:
/* Chips after 1664S use different scheme */ /* Chips after 1664S use different scheme */
if (retry || data->info.family_id >= 0xa2) { if (retry || family_id >= 0xa2) {
bootloader = appmode - 0x24; bootloader = appmode - 0x24;
break; break;
} }
@ -692,7 +694,7 @@ mxt_get_object(struct mxt_data *data, u8 type)
struct mxt_object *object; struct mxt_object *object;
int i; int i;
for (i = 0; i < data->info.object_num; i++) { for (i = 0; i < data->info->object_num; i++) {
object = data->object_table + i; object = data->object_table + i;
if (object->type == type) if (object->type == type)
return object; return object;
@ -1462,12 +1464,12 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
data_pos += offset; data_pos += offset;
} }
if (cfg_info.family_id != data->info.family_id) { if (cfg_info.family_id != data->info->family_id) {
dev_err(dev, "Family ID mismatch!\n"); dev_err(dev, "Family ID mismatch!\n");
return -EINVAL; return -EINVAL;
} }
if (cfg_info.variant_id != data->info.variant_id) { if (cfg_info.variant_id != data->info->variant_id) {
dev_err(dev, "Variant ID mismatch!\n"); dev_err(dev, "Variant ID mismatch!\n");
return -EINVAL; return -EINVAL;
} }
@ -1512,7 +1514,7 @@ static int mxt_update_cfg(struct mxt_data *data, const struct firmware *cfg)
/* Malloc memory to store configuration */ /* Malloc memory to store configuration */
cfg_start_ofs = MXT_OBJECT_START + cfg_start_ofs = MXT_OBJECT_START +
data->info.object_num * sizeof(struct mxt_object) + data->info->object_num * sizeof(struct mxt_object) +
MXT_INFO_CHECKSUM_SIZE; MXT_INFO_CHECKSUM_SIZE;
config_mem_size = data->mem_size - cfg_start_ofs; config_mem_size = data->mem_size - cfg_start_ofs;
config_mem = kzalloc(config_mem_size, GFP_KERNEL); config_mem = kzalloc(config_mem_size, GFP_KERNEL);
@ -1563,20 +1565,6 @@ release_mem:
return ret; return ret;
} }
static int mxt_get_info(struct mxt_data *data)
{
struct i2c_client *client = data->client;
struct mxt_info *info = &data->info;
int error;
/* Read 7-byte info block starting at address 0 */
error = __mxt_read_reg(client, 0, sizeof(*info), info);
if (error)
return error;
return 0;
}
static void mxt_free_input_device(struct mxt_data *data) static void mxt_free_input_device(struct mxt_data *data)
{ {
if (data->input_dev) { if (data->input_dev) {
@ -1591,9 +1579,10 @@ static void mxt_free_object_table(struct mxt_data *data)
video_unregister_device(&data->dbg.vdev); video_unregister_device(&data->dbg.vdev);
v4l2_device_unregister(&data->dbg.v4l2); v4l2_device_unregister(&data->dbg.v4l2);
#endif #endif
kfree(data->object_table);
data->object_table = NULL; data->object_table = NULL;
data->info = NULL;
kfree(data->raw_info_block);
data->raw_info_block = NULL;
kfree(data->msg_buf); kfree(data->msg_buf);
data->msg_buf = NULL; data->msg_buf = NULL;
data->T5_address = 0; data->T5_address = 0;
@ -1609,34 +1598,18 @@ static void mxt_free_object_table(struct mxt_data *data)
data->max_reportid = 0; data->max_reportid = 0;
} }
static int mxt_get_object_table(struct mxt_data *data) static int mxt_parse_object_table(struct mxt_data *data,
struct mxt_object *object_table)
{ {
struct i2c_client *client = data->client; struct i2c_client *client = data->client;
size_t table_size;
struct mxt_object *object_table;
int error;
int i; int i;
u8 reportid; u8 reportid;
u16 end_address; u16 end_address;
table_size = data->info.object_num * sizeof(struct mxt_object);
object_table = kzalloc(table_size, GFP_KERNEL);
if (!object_table) {
dev_err(&data->client->dev, "Failed to allocate memory\n");
return -ENOMEM;
}
error = __mxt_read_reg(client, MXT_OBJECT_START, table_size,
object_table);
if (error) {
kfree(object_table);
return error;
}
/* Valid Report IDs start counting from 1 */ /* Valid Report IDs start counting from 1 */
reportid = 1; reportid = 1;
data->mem_size = 0; data->mem_size = 0;
for (i = 0; i < data->info.object_num; i++) { for (i = 0; i < data->info->object_num; i++) {
struct mxt_object *object = object_table + i; struct mxt_object *object = object_table + i;
u8 min_id, max_id; u8 min_id, max_id;
@ -1660,8 +1633,8 @@ static int mxt_get_object_table(struct mxt_data *data)
switch (object->type) { switch (object->type) {
case MXT_GEN_MESSAGE_T5: case MXT_GEN_MESSAGE_T5:
if (data->info.family_id == 0x80 && if (data->info->family_id == 0x80 &&
data->info.version < 0x20) { data->info->version < 0x20) {
/* /*
* On mXT224 firmware versions prior to V2.0 * On mXT224 firmware versions prior to V2.0
* read and discard unused CRC byte otherwise * read and discard unused CRC byte otherwise
@ -1716,24 +1689,102 @@ static int mxt_get_object_table(struct mxt_data *data)
/* If T44 exists, T5 position has to be directly after */ /* If T44 exists, T5 position has to be directly after */
if (data->T44_address && (data->T5_address != data->T44_address + 1)) { if (data->T44_address && (data->T5_address != data->T44_address + 1)) {
dev_err(&client->dev, "Invalid T44 position\n"); dev_err(&client->dev, "Invalid T44 position\n");
error = -EINVAL; return -EINVAL;
goto free_object_table;
} }
data->msg_buf = kcalloc(data->max_reportid, data->msg_buf = kcalloc(data->max_reportid,
data->T5_msg_size, GFP_KERNEL); data->T5_msg_size, GFP_KERNEL);
if (!data->msg_buf) { if (!data->msg_buf)
dev_err(&client->dev, "Failed to allocate message buffer\n"); return -ENOMEM;
return 0;
}
static int mxt_read_info_block(struct mxt_data *data)
{
struct i2c_client *client = data->client;
int error;
size_t size;
void *id_buf, *buf;
uint8_t num_objects;
u32 calculated_crc;
u8 *crc_ptr;
/* If info block already allocated, free it */
if (data->raw_info_block)
mxt_free_object_table(data);
/* Read 7-byte ID information block starting at address 0 */
size = sizeof(struct mxt_info);
id_buf = kzalloc(size, GFP_KERNEL);
if (!id_buf)
return -ENOMEM;
error = __mxt_read_reg(client, 0, size, id_buf);
if (error)
goto err_free_mem;
/* Resize buffer to give space for rest of info block */
num_objects = ((struct mxt_info *)id_buf)->object_num;
size += (num_objects * sizeof(struct mxt_object))
+ MXT_INFO_CHECKSUM_SIZE;
buf = krealloc(id_buf, size, GFP_KERNEL);
if (!buf) {
error = -ENOMEM; error = -ENOMEM;
goto free_object_table; goto err_free_mem;
}
id_buf = buf;
/* Read rest of info block */
error = __mxt_read_reg(client, MXT_OBJECT_START,
size - MXT_OBJECT_START,
id_buf + MXT_OBJECT_START);
if (error)
goto err_free_mem;
/* Extract & calculate checksum */
crc_ptr = id_buf + size - MXT_INFO_CHECKSUM_SIZE;
data->info_crc = crc_ptr[0] | (crc_ptr[1] << 8) | (crc_ptr[2] << 16);
calculated_crc = mxt_calculate_crc(id_buf, 0,
size - MXT_INFO_CHECKSUM_SIZE);
/*
* CRC mismatch can be caused by data corruption due to I2C comms
* issue or else device is not using Object Based Protocol (eg i2c-hid)
*/
if ((data->info_crc == 0) || (data->info_crc != calculated_crc)) {
dev_err(&client->dev,
"Info Block CRC error calculated=0x%06X read=0x%06X\n",
calculated_crc, data->info_crc);
error = -EIO;
goto err_free_mem;
} }
data->object_table = object_table; data->raw_info_block = id_buf;
data->info = (struct mxt_info *)id_buf;
dev_info(&client->dev,
"Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n",
data->info->family_id, data->info->variant_id,
data->info->version >> 4, data->info->version & 0xf,
data->info->build, data->info->object_num);
/* Parse object table information */
error = mxt_parse_object_table(data, id_buf + MXT_OBJECT_START);
if (error) {
dev_err(&client->dev, "Error %d parsing object table\n", error);
mxt_free_object_table(data);
goto err_free_mem;
}
data->object_table = (struct mxt_object *)(id_buf + MXT_OBJECT_START);
return 0; return 0;
free_object_table: err_free_mem:
mxt_free_object_table(data); kfree(id_buf);
return error; return error;
} }
@ -2046,7 +2097,7 @@ static int mxt_initialize(struct mxt_data *data)
int error; int error;
while (1) { while (1) {
error = mxt_get_info(data); error = mxt_read_info_block(data);
if (!error) if (!error)
break; break;
@ -2077,16 +2128,9 @@ static int mxt_initialize(struct mxt_data *data)
msleep(MXT_FW_RESET_TIME); msleep(MXT_FW_RESET_TIME);
} }
/* Get object table information */
error = mxt_get_object_table(data);
if (error) {
dev_err(&client->dev, "Error %d reading object table\n", error);
return error;
}
error = mxt_acquire_irq(data); error = mxt_acquire_irq(data);
if (error) if (error)
goto err_free_object_table; return error;
error = request_firmware_nowait(THIS_MODULE, true, MXT_CFG_NAME, error = request_firmware_nowait(THIS_MODULE, true, MXT_CFG_NAME,
&client->dev, GFP_KERNEL, data, &client->dev, GFP_KERNEL, data,
@ -2094,14 +2138,10 @@ static int mxt_initialize(struct mxt_data *data)
if (error) { if (error) {
dev_err(&client->dev, "Failed to invoke firmware loader: %d\n", dev_err(&client->dev, "Failed to invoke firmware loader: %d\n",
error); error);
goto err_free_object_table; return error;
} }
return 0; return 0;
err_free_object_table:
mxt_free_object_table(data);
return error;
} }
static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep) static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep)
@ -2162,7 +2202,7 @@ recheck:
static u16 mxt_get_debug_value(struct mxt_data *data, unsigned int x, static u16 mxt_get_debug_value(struct mxt_data *data, unsigned int x,
unsigned int y) unsigned int y)
{ {
struct mxt_info *info = &data->info; struct mxt_info *info = data->info;
struct mxt_dbg *dbg = &data->dbg; struct mxt_dbg *dbg = &data->dbg;
unsigned int ofs, page; unsigned int ofs, page;
unsigned int col = 0; unsigned int col = 0;
@ -2490,7 +2530,7 @@ static const struct video_device mxt_video_device = {
static void mxt_debug_init(struct mxt_data *data) static void mxt_debug_init(struct mxt_data *data)
{ {
struct mxt_info *info = &data->info; struct mxt_info *info = data->info;
struct mxt_dbg *dbg = &data->dbg; struct mxt_dbg *dbg = &data->dbg;
struct mxt_object *object; struct mxt_object *object;
int error; int error;
@ -2576,7 +2616,6 @@ static int mxt_configure_objects(struct mxt_data *data,
const struct firmware *cfg) const struct firmware *cfg)
{ {
struct device *dev = &data->client->dev; struct device *dev = &data->client->dev;
struct mxt_info *info = &data->info;
int error; int error;
error = mxt_init_t7_power_cfg(data); error = mxt_init_t7_power_cfg(data);
@ -2601,11 +2640,6 @@ static int mxt_configure_objects(struct mxt_data *data,
mxt_debug_init(data); mxt_debug_init(data);
dev_info(dev,
"Family: %u Variant: %u Firmware V%u.%u.%02X Objects: %u\n",
info->family_id, info->variant_id, info->version >> 4,
info->version & 0xf, info->build, info->object_num);
return 0; return 0;
} }
@ -2614,7 +2648,7 @@ static ssize_t mxt_fw_version_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct mxt_data *data = dev_get_drvdata(dev); struct mxt_data *data = dev_get_drvdata(dev);
struct mxt_info *info = &data->info; struct mxt_info *info = data->info;
return scnprintf(buf, PAGE_SIZE, "%u.%u.%02X\n", return scnprintf(buf, PAGE_SIZE, "%u.%u.%02X\n",
info->version >> 4, info->version & 0xf, info->build); info->version >> 4, info->version & 0xf, info->build);
} }
@ -2624,7 +2658,7 @@ static ssize_t mxt_hw_version_show(struct device *dev,
struct device_attribute *attr, char *buf) struct device_attribute *attr, char *buf)
{ {
struct mxt_data *data = dev_get_drvdata(dev); struct mxt_data *data = dev_get_drvdata(dev);
struct mxt_info *info = &data->info; struct mxt_info *info = data->info;
return scnprintf(buf, PAGE_SIZE, "%u.%u\n", return scnprintf(buf, PAGE_SIZE, "%u.%u\n",
info->family_id, info->variant_id); info->family_id, info->variant_id);
} }
@ -2663,7 +2697,7 @@ static ssize_t mxt_object_show(struct device *dev,
return -ENOMEM; return -ENOMEM;
error = 0; error = 0;
for (i = 0; i < data->info.object_num; i++) { for (i = 0; i < data->info->object_num; i++) {
object = data->object_table + i; object = data->object_table + i;
if (!mxt_object_readable(object->type)) if (!mxt_object_readable(object->type))
@ -3034,6 +3068,15 @@ static const struct dmi_system_id mxt_dmi_table[] = {
}, },
.driver_data = samus_platform_data, .driver_data = samus_platform_data,
}, },
{
/* Samsung Chromebook Pro */
.ident = "Samsung Chromebook Pro",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Google"),
DMI_MATCH(DMI_PRODUCT_NAME, "Caroline"),
},
.driver_data = samus_platform_data,
},
{ {
/* Other Google Chromebooks */ /* Other Google Chromebooks */
.ident = "Chromebook", .ident = "Chromebook",
@ -3254,6 +3297,11 @@ static SIMPLE_DEV_PM_OPS(mxt_pm_ops, mxt_suspend, mxt_resume);
static const struct of_device_id mxt_of_match[] = { static const struct of_device_id mxt_of_match[] = {
{ .compatible = "atmel,maxtouch", }, { .compatible = "atmel,maxtouch", },
/* Compatibles listed below are deprecated */
{ .compatible = "atmel,qt602240_ts", },
{ .compatible = "atmel,atmel_mxt_ts", },
{ .compatible = "atmel,atmel_mxt_tp", },
{ .compatible = "atmel,mXT224", },
{}, {},
}; };
MODULE_DEVICE_TABLE(of, mxt_of_match); MODULE_DEVICE_TABLE(of, mxt_of_match);

View File

@ -83,7 +83,6 @@
static DEFINE_SPINLOCK(amd_iommu_devtable_lock); static DEFINE_SPINLOCK(amd_iommu_devtable_lock);
static DEFINE_SPINLOCK(pd_bitmap_lock); static DEFINE_SPINLOCK(pd_bitmap_lock);
static DEFINE_SPINLOCK(iommu_table_lock);
/* List of all available dev_data structures */ /* List of all available dev_data structures */
static LLIST_HEAD(dev_data_list); static LLIST_HEAD(dev_data_list);
@ -3562,6 +3561,7 @@ EXPORT_SYMBOL(amd_iommu_device_info);
*****************************************************************************/ *****************************************************************************/
static struct irq_chip amd_ir_chip; static struct irq_chip amd_ir_chip;
static DEFINE_SPINLOCK(iommu_table_lock);
static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table) static void set_dte_irq_entry(u16 devid, struct irq_remap_table *table)
{ {

View File

@ -167,40 +167,16 @@ EXPORT_SYMBOL(iommu_put_dma_cookie);
* @list: Reserved region list from iommu_get_resv_regions() * @list: Reserved region list from iommu_get_resv_regions()
* *
* IOMMU drivers can use this to implement their .get_resv_regions callback * IOMMU drivers can use this to implement their .get_resv_regions callback
* for general non-IOMMU-specific reservations. Currently, this covers host * for general non-IOMMU-specific reservations. Currently, this covers GICv3
* bridge windows for PCI devices and GICv3 ITS region reservation on ACPI * ITS region reservation on ACPI based ARM platforms that may require HW MSI
* based ARM platforms that may require HW MSI reservation. * reservation.
*/ */
void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list) void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
{ {
struct pci_host_bridge *bridge;
struct resource_entry *window;
if (!is_of_node(dev->iommu_fwspec->iommu_fwnode) && if (!is_of_node(dev->iommu_fwspec->iommu_fwnode))
iort_iommu_msi_get_resv_regions(dev, list) < 0) iort_iommu_msi_get_resv_regions(dev, list);
return;
if (!dev_is_pci(dev))
return;
bridge = pci_find_host_bridge(to_pci_dev(dev)->bus);
resource_list_for_each_entry(window, &bridge->windows) {
struct iommu_resv_region *region;
phys_addr_t start;
size_t length;
if (resource_type(window->res) != IORESOURCE_MEM)
continue;
start = window->res->start - window->offset;
length = window->res->end - window->res->start + 1;
region = iommu_alloc_resv_region(start, length, 0,
IOMMU_RESV_RESERVED);
if (!region)
return;
list_add_tail(&region->list, list);
}
} }
EXPORT_SYMBOL(iommu_dma_get_resv_regions); EXPORT_SYMBOL(iommu_dma_get_resv_regions);
@ -229,6 +205,23 @@ static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
return 0; return 0;
} }
static void iova_reserve_pci_windows(struct pci_dev *dev,
struct iova_domain *iovad)
{
struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
struct resource_entry *window;
unsigned long lo, hi;
resource_list_for_each_entry(window, &bridge->windows) {
if (resource_type(window->res) != IORESOURCE_MEM)
continue;
lo = iova_pfn(iovad, window->res->start - window->offset);
hi = iova_pfn(iovad, window->res->end - window->offset);
reserve_iova(iovad, lo, hi);
}
}
static int iova_reserve_iommu_regions(struct device *dev, static int iova_reserve_iommu_regions(struct device *dev,
struct iommu_domain *domain) struct iommu_domain *domain)
{ {
@ -238,6 +231,9 @@ static int iova_reserve_iommu_regions(struct device *dev,
LIST_HEAD(resv_regions); LIST_HEAD(resv_regions);
int ret = 0; int ret = 0;
if (dev_is_pci(dev))
iova_reserve_pci_windows(to_pci_dev(dev), iovad);
iommu_get_resv_regions(dev, &resv_regions); iommu_get_resv_regions(dev, &resv_regions);
list_for_each_entry(region, &resv_regions, list) { list_for_each_entry(region, &resv_regions, list) {
unsigned long lo, hi; unsigned long lo, hi;

View File

@ -1345,7 +1345,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
struct qi_desc desc; struct qi_desc desc;
if (mask) { if (mask) {
BUG_ON(addr & ((1 << (VTD_PAGE_SHIFT + mask)) - 1)); WARN_ON_ONCE(addr & ((1ULL << (VTD_PAGE_SHIFT + mask)) - 1));
addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1; addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE; desc.high = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
} else } else

View File

@ -1136,7 +1136,7 @@ static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force)
irte->dest_id = IRTE_DEST(cfg->dest_apicid); irte->dest_id = IRTE_DEST(cfg->dest_apicid);
/* Update the hardware only if the interrupt is in remapped mode. */ /* Update the hardware only if the interrupt is in remapped mode. */
if (!force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING) if (force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
modify_irte(&ir_data->irq_2_iommu, irte); modify_irte(&ir_data->irq_2_iommu, irte);
} }

View File

@ -1098,7 +1098,7 @@ static int rk_iommu_of_xlate(struct device *dev,
data->iommu = platform_get_drvdata(iommu_dev); data->iommu = platform_get_drvdata(iommu_dev);
dev->archdata.iommu = data; dev->archdata.iommu = data;
of_dev_put(iommu_dev); platform_device_put(iommu_dev);
return 0; return 0;
} }
@ -1175,8 +1175,15 @@ static int rk_iommu_probe(struct platform_device *pdev)
for (i = 0; i < iommu->num_clocks; ++i) for (i = 0; i < iommu->num_clocks; ++i)
iommu->clocks[i].id = rk_iommu_clocks[i]; iommu->clocks[i].id = rk_iommu_clocks[i];
/*
* iommu clocks should be present for all new devices and devicetrees
* but there are older devicetrees without clocks out in the wild.
* So clocks as optional for the time being.
*/
err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks); err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks);
if (err) if (err == -ENOENT)
iommu->num_clocks = 0;
else if (err)
return err; return err;
err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks); err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks);

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and * it under the terms of the GNU General Public License version 2 and
@ -68,7 +68,7 @@ static void combiner_handle_irq(struct irq_desc *desc)
bit = readl_relaxed(combiner->regs[reg].addr); bit = readl_relaxed(combiner->regs[reg].addr);
status = bit & combiner->regs[reg].enabled; status = bit & combiner->regs[reg].enabled;
if (!status) if (bit && !status)
pr_warn_ratelimited("Unexpected IRQ on CPU%d: (%08x %08lx %p)\n", pr_warn_ratelimited("Unexpected IRQ on CPU%d: (%08x %08lx %p)\n",
smp_processor_id(), bit, smp_processor_id(), bit,
combiner->regs[reg].enabled, combiner->regs[reg].enabled,

View File

@ -290,7 +290,7 @@ do { \
if (kthread_should_stop() || \ if (kthread_should_stop() || \
test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \ test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) { \
set_current_state(TASK_RUNNING); \ set_current_state(TASK_RUNNING); \
return 0; \ goto out; \
} \ } \
\ \
schedule(); \ schedule(); \
@ -378,6 +378,9 @@ retry_invalidate:
bch_prio_write(ca); bch_prio_write(ca);
} }
} }
out:
wait_for_kthread_stop();
return 0;
} }
/* Allocation */ /* Allocation */

Some files were not shown because too many files have changed in this diff Show More