forked from Minki/linux
Merge branch 'linus' into x86/mm
Make sure to get the latest fixes before applying the ptdump enhancements.
This commit is contained in:
commit
5b1ad68f9b
@ -15,6 +15,9 @@ Properties:
|
||||
Second cell specifies the irq distribution mode to cores
|
||||
0=Round Robin; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3
|
||||
|
||||
The second cell in interrupts property is deprecated and may be ignored by
|
||||
the kernel.
|
||||
|
||||
intc accessed via the special ARC AUX register interface, hence "reg" property
|
||||
is not specified.
|
||||
|
||||
|
@ -33,11 +33,6 @@ Arguments
|
||||
Description
|
||||
===========
|
||||
|
||||
.. note::
|
||||
|
||||
This documents the proposed CEC API. This API is not yet finalized
|
||||
and is currently only available as a staging kernel module.
|
||||
|
||||
Closes the cec device. Resources associated with the file descriptor are
|
||||
freed. The device configuration remain unchanged.
|
||||
|
||||
|
@ -39,11 +39,6 @@ Arguments
|
||||
Description
|
||||
===========
|
||||
|
||||
.. note::
|
||||
|
||||
This documents the proposed CEC API. This API is not yet finalized
|
||||
and is currently only available as a staging kernel module.
|
||||
|
||||
The :c:func:`ioctl()` function manipulates cec device parameters. The
|
||||
argument ``fd`` must be an open file descriptor.
|
||||
|
||||
|
@ -46,11 +46,6 @@ Arguments
|
||||
Description
|
||||
===========
|
||||
|
||||
.. note::
|
||||
|
||||
This documents the proposed CEC API. This API is not yet finalized
|
||||
and is currently only available as a staging kernel module.
|
||||
|
||||
To open a cec device applications call :c:func:`open()` with the
|
||||
desired device name. The function has no side effects; the device
|
||||
configuration remain unchanged.
|
||||
|
@ -39,11 +39,6 @@ Arguments
|
||||
Description
|
||||
===========
|
||||
|
||||
.. note::
|
||||
|
||||
This documents the proposed CEC API. This API is not yet finalized
|
||||
and is currently only available as a staging kernel module.
|
||||
|
||||
With the :c:func:`poll()` function applications can wait for CEC
|
||||
events.
|
||||
|
||||
|
@ -3,11 +3,6 @@
|
||||
Introduction
|
||||
============
|
||||
|
||||
.. note::
|
||||
|
||||
This documents the proposed CEC API. This API is not yet finalized
|
||||
and is currently only available as a staging kernel module.
|
||||
|
||||
HDMI connectors provide a single pin for use by the Consumer Electronics
|
||||
Control protocol. This protocol allows different devices connected by an
|
||||
HDMI cable to communicate. The protocol for CEC version 1.4 is defined
|
||||
@ -31,3 +26,15 @@ control just the CEC pin.
|
||||
Drivers that support CEC will create a CEC device node (/dev/cecX) to
|
||||
give userspace access to the CEC adapter. The
|
||||
:ref:`CEC_ADAP_G_CAPS` ioctl will tell userspace what it is allowed to do.
|
||||
|
||||
In order to check the support and test it, it is suggested to download
|
||||
the `v4l-utils <https://git.linuxtv.org/v4l-utils.git/>`_ package. It
|
||||
provides three tools to handle CEC:
|
||||
|
||||
- cec-ctl: the Swiss army knife of CEC. Allows you to configure, transmit
|
||||
and monitor CEC messages.
|
||||
|
||||
- cec-compliance: does a CEC compliance test of a remote CEC device to
|
||||
determine how compliant the CEC implementation is.
|
||||
|
||||
- cec-follower: emulates a CEC follower.
|
||||
|
@ -29,11 +29,6 @@ Arguments
|
||||
Description
|
||||
===========
|
||||
|
||||
.. note::
|
||||
|
||||
This documents the proposed CEC API. This API is not yet finalized
|
||||
and is currently only available as a staging kernel module.
|
||||
|
||||
All cec devices must support :ref:`ioctl CEC_ADAP_G_CAPS <CEC_ADAP_G_CAPS>`. To query
|
||||
device information, applications call the ioctl with a pointer to a
|
||||
struct :c:type:`cec_caps`. The driver fills the structure and
|
||||
|
@ -35,11 +35,6 @@ Arguments
|
||||
Description
|
||||
===========
|
||||
|
||||
.. note::
|
||||
|
||||
This documents the proposed CEC API. This API is not yet finalized
|
||||
and is currently only available as a staging kernel module.
|
||||
|
||||
To query the current CEC logical addresses, applications call
|
||||
:ref:`ioctl CEC_ADAP_G_LOG_ADDRS <CEC_ADAP_G_LOG_ADDRS>` with a pointer to a
|
||||
struct :c:type:`cec_log_addrs` where the driver stores the logical addresses.
|
||||
|
@ -35,11 +35,6 @@ Arguments
|
||||
Description
|
||||
===========
|
||||
|
||||
.. note::
|
||||
|
||||
This documents the proposed CEC API. This API is not yet finalized
|
||||
and is currently only available as a staging kernel module.
|
||||
|
||||
To query the current physical address applications call
|
||||
:ref:`ioctl CEC_ADAP_G_PHYS_ADDR <CEC_ADAP_G_PHYS_ADDR>` with a pointer to a __u16 where the
|
||||
driver stores the physical address.
|
||||
|
@ -30,11 +30,6 @@ Arguments
|
||||
Description
|
||||
===========
|
||||
|
||||
.. note::
|
||||
|
||||
This documents the proposed CEC API. This API is not yet finalized
|
||||
and is currently only available as a staging kernel module.
|
||||
|
||||
CEC devices can send asynchronous events. These can be retrieved by
|
||||
calling :c:func:`CEC_DQEVENT`. If the file descriptor is in
|
||||
non-blocking mode and no event is pending, then it will return -1 and
|
||||
|
@ -31,11 +31,6 @@ Arguments
|
||||
Description
|
||||
===========
|
||||
|
||||
.. note::
|
||||
|
||||
This documents the proposed CEC API. This API is not yet finalized
|
||||
and is currently only available as a staging kernel module.
|
||||
|
||||
By default any filehandle can use :ref:`CEC_TRANSMIT`, but in order to prevent
|
||||
applications from stepping on each others toes it must be possible to
|
||||
obtain exclusive access to the CEC adapter. This ioctl sets the
|
||||
|
@ -34,11 +34,6 @@ Arguments
|
||||
Description
|
||||
===========
|
||||
|
||||
.. note::
|
||||
|
||||
This documents the proposed CEC API. This API is not yet finalized
|
||||
and is currently only available as a staging kernel module.
|
||||
|
||||
To receive a CEC message the application has to fill in the
|
||||
``timeout`` field of struct :c:type:`cec_msg` and pass it to
|
||||
:ref:`ioctl CEC_RECEIVE <CEC_RECEIVE>`.
|
||||
|
39
MAINTAINERS
39
MAINTAINERS
@ -1091,7 +1091,7 @@ F: arch/arm/boot/dts/aspeed-*
|
||||
F: drivers/*/*aspeed*
|
||||
|
||||
ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
|
||||
M: Nicolas Ferre <nicolas.ferre@atmel.com>
|
||||
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||
M: Alexandre Belloni <alexandre.belloni@free-electrons.com>
|
||||
M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
@ -1773,7 +1773,7 @@ F: drivers/soc/renesas/
|
||||
F: include/linux/soc/renesas/
|
||||
|
||||
ARM/SOCFPGA ARCHITECTURE
|
||||
M: Dinh Nguyen <dinguyen@opensource.altera.com>
|
||||
M: Dinh Nguyen <dinguyen@kernel.org>
|
||||
S: Maintained
|
||||
F: arch/arm/mach-socfpga/
|
||||
F: arch/arm/boot/dts/socfpga*
|
||||
@ -1783,7 +1783,7 @@ W: http://www.rocketboards.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
|
||||
|
||||
ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT
|
||||
M: Dinh Nguyen <dinguyen@opensource.altera.com>
|
||||
M: Dinh Nguyen <dinguyen@kernel.org>
|
||||
S: Maintained
|
||||
F: drivers/clk/socfpga/
|
||||
|
||||
@ -2175,56 +2175,56 @@ F: include/linux/atm*
|
||||
F: include/uapi/linux/atm*
|
||||
|
||||
ATMEL AT91 / AT32 MCI DRIVER
|
||||
M: Ludovic Desroches <ludovic.desroches@atmel.com>
|
||||
M: Ludovic Desroches <ludovic.desroches@microchip.com>
|
||||
S: Maintained
|
||||
F: drivers/mmc/host/atmel-mci.c
|
||||
|
||||
ATMEL AT91 SAMA5D2-Compatible Shutdown Controller
|
||||
M: Nicolas Ferre <nicolas.ferre@atmel.com>
|
||||
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||
S: Supported
|
||||
F: drivers/power/reset/at91-sama5d2_shdwc.c
|
||||
|
||||
ATMEL SAMA5D2 ADC DRIVER
|
||||
M: Ludovic Desroches <ludovic.desroches@atmel.com>
|
||||
M: Ludovic Desroches <ludovic.desroches@microchip.com>
|
||||
L: linux-iio@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/iio/adc/at91-sama5d2_adc.c
|
||||
|
||||
ATMEL Audio ALSA driver
|
||||
M: Nicolas Ferre <nicolas.ferre@atmel.com>
|
||||
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
F: sound/soc/atmel
|
||||
|
||||
ATMEL XDMA DRIVER
|
||||
M: Ludovic Desroches <ludovic.desroches@atmel.com>
|
||||
M: Ludovic Desroches <ludovic.desroches@microchip.com>
|
||||
L: linux-arm-kernel@lists.infradead.org
|
||||
L: dmaengine@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/dma/at_xdmac.c
|
||||
|
||||
ATMEL I2C DRIVER
|
||||
M: Ludovic Desroches <ludovic.desroches@atmel.com>
|
||||
M: Ludovic Desroches <ludovic.desroches@microchip.com>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/i2c/busses/i2c-at91.c
|
||||
|
||||
ATMEL ISI DRIVER
|
||||
M: Ludovic Desroches <ludovic.desroches@atmel.com>
|
||||
M: Ludovic Desroches <ludovic.desroches@microchip.com>
|
||||
L: linux-media@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/media/platform/soc_camera/atmel-isi.c
|
||||
F: include/media/atmel-isi.h
|
||||
|
||||
ATMEL LCDFB DRIVER
|
||||
M: Nicolas Ferre <nicolas.ferre@atmel.com>
|
||||
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||
L: linux-fbdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/video/fbdev/atmel_lcdfb.c
|
||||
F: include/video/atmel_lcdc.h
|
||||
|
||||
ATMEL MACB ETHERNET DRIVER
|
||||
M: Nicolas Ferre <nicolas.ferre@atmel.com>
|
||||
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/cadence/
|
||||
|
||||
@ -2236,32 +2236,32 @@ S: Supported
|
||||
F: drivers/mtd/nand/atmel_nand*
|
||||
|
||||
ATMEL SDMMC DRIVER
|
||||
M: Ludovic Desroches <ludovic.desroches@atmel.com>
|
||||
M: Ludovic Desroches <ludovic.desroches@microchip.com>
|
||||
L: linux-mmc@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/mmc/host/sdhci-of-at91.c
|
||||
|
||||
ATMEL SPI DRIVER
|
||||
M: Nicolas Ferre <nicolas.ferre@atmel.com>
|
||||
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||
S: Supported
|
||||
F: drivers/spi/spi-atmel.*
|
||||
|
||||
ATMEL SSC DRIVER
|
||||
M: Nicolas Ferre <nicolas.ferre@atmel.com>
|
||||
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
F: drivers/misc/atmel-ssc.c
|
||||
F: include/linux/atmel-ssc.h
|
||||
|
||||
ATMEL Timer Counter (TC) AND CLOCKSOURCE DRIVERS
|
||||
M: Nicolas Ferre <nicolas.ferre@atmel.com>
|
||||
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
F: drivers/misc/atmel_tclib.c
|
||||
F: drivers/clocksource/tcb_clksrc.c
|
||||
|
||||
ATMEL USBA UDC DRIVER
|
||||
M: Nicolas Ferre <nicolas.ferre@atmel.com>
|
||||
M: Nicolas Ferre <nicolas.ferre@microchip.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
F: drivers/usb/gadget/udc/atmel_usba_udc.*
|
||||
@ -9736,7 +9736,7 @@ S: Maintained
|
||||
F: drivers/pinctrl/pinctrl-at91.*
|
||||
|
||||
PIN CONTROLLER - ATMEL AT91 PIO4
|
||||
M: Ludovic Desroches <ludovic.desroches@atmel.com>
|
||||
M: Ludovic Desroches <ludovic.desroches@microchip.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: linux-gpio@vger.kernel.org
|
||||
S: Supported
|
||||
@ -10195,7 +10195,6 @@ F: drivers/media/tuners/qt1010*
|
||||
QUALCOMM ATHEROS ATH9K WIRELESS DRIVER
|
||||
M: QCA ath9k Development <ath9k-devel@qca.qualcomm.com>
|
||||
L: linux-wireless@vger.kernel.org
|
||||
L: ath9k-devel@lists.ath9k.org
|
||||
W: http://wireless.kernel.org/en/users/Drivers/ath9k
|
||||
S: Supported
|
||||
F: drivers/net/wireless/ath/ath9k/
|
||||
@ -13066,7 +13065,7 @@ F: drivers/input/serio/userio.c
|
||||
F: include/uapi/linux/userio.h
|
||||
|
||||
VIRTIO CONSOLE DRIVER
|
||||
M: Amit Shah <amit.shah@redhat.com>
|
||||
M: Amit Shah <amit@kernel.org>
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
S: Maintained
|
||||
F: drivers/char/virtio_console.c
|
||||
|
6
Makefile
6
Makefile
@ -1,8 +1,8 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Anniversary Edition
|
||||
EXTRAVERSION = -rc8
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
# To see a list of typical targets execute "make help"
|
||||
@ -797,7 +797,7 @@ KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types)
|
||||
KBUILD_ARFLAGS := $(call ar-option,D)
|
||||
|
||||
# check for 'asm goto'
|
||||
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC)), y)
|
||||
ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
|
||||
KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
|
||||
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
|
||||
endif
|
||||
|
@ -26,7 +26,9 @@ static inline void __delay(unsigned long loops)
|
||||
" lp 1f \n"
|
||||
" nop \n"
|
||||
"1: \n"
|
||||
: : "r"(loops));
|
||||
:
|
||||
: "r"(loops)
|
||||
: "lp_count");
|
||||
}
|
||||
|
||||
extern void __bad_udelay(void);
|
||||
|
@ -71,14 +71,14 @@ ENTRY(stext)
|
||||
GET_CPU_ID r5
|
||||
cmp r5, 0
|
||||
mov.nz r0, r5
|
||||
#ifdef CONFIG_ARC_SMP_HALT_ON_RESET
|
||||
; Non-Master can proceed as system would be booted sufficiently
|
||||
jnz first_lines_of_secondary
|
||||
#else
|
||||
bz .Lmaster_proceed
|
||||
|
||||
; Non-Masters wait for Master to boot enough and bring them up
|
||||
jnz arc_platform_smp_wait_to_boot
|
||||
#endif
|
||||
; Master falls thru
|
||||
; when they resume, tail-call to entry point
|
||||
mov blink, @first_lines_of_secondary
|
||||
j arc_platform_smp_wait_to_boot
|
||||
|
||||
.Lmaster_proceed:
|
||||
#endif
|
||||
|
||||
; Clear BSS before updating any globals
|
||||
|
@ -93,11 +93,10 @@ static void mcip_probe_n_setup(void)
|
||||
READ_BCR(ARC_REG_MCIP_BCR, mp);
|
||||
|
||||
sprintf(smp_cpuinfo_buf,
|
||||
"Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n",
|
||||
"Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
|
||||
mp.ver, mp.num_cores,
|
||||
IS_AVAIL1(mp.ipi, "IPI "),
|
||||
IS_AVAIL1(mp.idu, "IDU "),
|
||||
IS_AVAIL1(mp.llm, "LLM "),
|
||||
IS_AVAIL1(mp.dbg, "DEBUG "),
|
||||
IS_AVAIL1(mp.gfrc, "GFRC"));
|
||||
|
||||
@ -175,7 +174,6 @@ static void idu_irq_unmask(struct irq_data *data)
|
||||
raw_spin_unlock_irqrestore(&mcip_lock, flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int
|
||||
idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
|
||||
bool force)
|
||||
@ -205,12 +203,27 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
|
||||
|
||||
return IRQ_SET_MASK_OK;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void idu_irq_enable(struct irq_data *data)
|
||||
{
|
||||
/*
|
||||
* By default send all common interrupts to all available online CPUs.
|
||||
* The affinity of common interrupts in IDU must be set manually since
|
||||
* in some cases the kernel will not call irq_set_affinity() by itself:
|
||||
* 1. When the kernel is not configured with support of SMP.
|
||||
* 2. When the kernel is configured with support of SMP but upper
|
||||
* interrupt controllers does not support setting of the affinity
|
||||
* and cannot propagate it to IDU.
|
||||
*/
|
||||
idu_irq_set_affinity(data, cpu_online_mask, false);
|
||||
idu_irq_unmask(data);
|
||||
}
|
||||
|
||||
static struct irq_chip idu_irq_chip = {
|
||||
.name = "MCIP IDU Intc",
|
||||
.irq_mask = idu_irq_mask,
|
||||
.irq_unmask = idu_irq_unmask,
|
||||
.irq_enable = idu_irq_enable,
|
||||
#ifdef CONFIG_SMP
|
||||
.irq_set_affinity = idu_irq_set_affinity,
|
||||
#endif
|
||||
@ -243,36 +256,14 @@ static int idu_irq_xlate(struct irq_domain *d, struct device_node *n,
|
||||
const u32 *intspec, unsigned int intsize,
|
||||
irq_hw_number_t *out_hwirq, unsigned int *out_type)
|
||||
{
|
||||
irq_hw_number_t hwirq = *out_hwirq = intspec[0];
|
||||
int distri = intspec[1];
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Ignore value of interrupt distribution mode for common interrupts in
|
||||
* IDU which resides in intspec[1] since setting an affinity using value
|
||||
* from Device Tree is deprecated in ARC.
|
||||
*/
|
||||
*out_hwirq = intspec[0];
|
||||
*out_type = IRQ_TYPE_NONE;
|
||||
|
||||
/* XXX: validate distribution scheme again online cpu mask */
|
||||
if (distri == 0) {
|
||||
/* 0 - Round Robin to all cpus, otherwise 1 bit per core */
|
||||
raw_spin_lock_irqsave(&mcip_lock, flags);
|
||||
idu_set_dest(hwirq, BIT(num_online_cpus()) - 1);
|
||||
idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
|
||||
raw_spin_unlock_irqrestore(&mcip_lock, flags);
|
||||
} else {
|
||||
/*
|
||||
* DEST based distribution for Level Triggered intr can only
|
||||
* have 1 CPU, so generalize it to always contain 1 cpu
|
||||
*/
|
||||
int cpu = ffs(distri);
|
||||
|
||||
if (cpu != fls(distri))
|
||||
pr_warn("IDU irq %lx distri mode set to cpu %x\n",
|
||||
hwirq, cpu);
|
||||
|
||||
raw_spin_lock_irqsave(&mcip_lock, flags);
|
||||
idu_set_dest(hwirq, cpu);
|
||||
idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_DEST);
|
||||
raw_spin_unlock_irqrestore(&mcip_lock, flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -90,22 +90,37 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
||||
*/
|
||||
static volatile int wake_flag;
|
||||
|
||||
#ifdef CONFIG_ISA_ARCOMPACT
|
||||
|
||||
#define __boot_read(f) f
|
||||
#define __boot_write(f, v) f = v
|
||||
|
||||
#else
|
||||
|
||||
#define __boot_read(f) arc_read_uncached_32(&f)
|
||||
#define __boot_write(f, v) arc_write_uncached_32(&f, v)
|
||||
|
||||
#endif
|
||||
|
||||
static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
|
||||
{
|
||||
BUG_ON(cpu == 0);
|
||||
wake_flag = cpu;
|
||||
|
||||
__boot_write(wake_flag, cpu);
|
||||
}
|
||||
|
||||
void arc_platform_smp_wait_to_boot(int cpu)
|
||||
{
|
||||
while (wake_flag != cpu)
|
||||
/* for halt-on-reset, we've waited already */
|
||||
if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
|
||||
return;
|
||||
|
||||
while (__boot_read(wake_flag) != cpu)
|
||||
;
|
||||
|
||||
wake_flag = 0;
|
||||
__asm__ __volatile__("j @first_lines_of_secondary \n");
|
||||
__boot_write(wake_flag, 0);
|
||||
}
|
||||
|
||||
|
||||
const char *arc_platform_smp_cpuinfo(void)
|
||||
{
|
||||
return plat_smp_ops.info ? : "";
|
||||
|
@ -241,8 +241,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
|
||||
if (state.fault)
|
||||
goto fault;
|
||||
|
||||
/* clear any remanants of delay slot */
|
||||
if (delay_mode(regs)) {
|
||||
regs->ret = regs->bta;
|
||||
regs->ret = regs->bta & ~1U;
|
||||
regs->status32 &= ~STATUS_DE_MASK;
|
||||
} else {
|
||||
regs->ret += state.instr_len;
|
||||
|
@ -617,7 +617,7 @@ dtb-$(CONFIG_ARCH_ORION5X) += \
|
||||
orion5x-lacie-ethernet-disk-mini-v2.dtb \
|
||||
orion5x-linkstation-lsgl.dtb \
|
||||
orion5x-linkstation-lswtgl.dtb \
|
||||
orion5x-lschl.dtb \
|
||||
orion5x-linkstation-lschl.dtb \
|
||||
orion5x-lswsgl.dtb \
|
||||
orion5x-maxtor-shared-storage-2.dtb \
|
||||
orion5x-netgear-wnr854t.dtb \
|
||||
|
@ -18,6 +18,14 @@
|
||||
/ {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
/*
|
||||
* The decompressor and also some bootloaders rely on a
|
||||
* pre-existing /chosen node to be available to insert the
|
||||
* command line and merge other ATAGS info.
|
||||
* Also for U-Boot there must be a pre-existing /memory node.
|
||||
*/
|
||||
chosen {};
|
||||
memory { device_type = "memory"; reg = <0 0>; };
|
||||
|
||||
aliases {
|
||||
gpio0 = &gpio1;
|
||||
|
@ -16,6 +16,14 @@
|
||||
#size-cells = <1>;
|
||||
|
||||
interrupt-parent = <&icoll>;
|
||||
/*
|
||||
* The decompressor and also some bootloaders rely on a
|
||||
* pre-existing /chosen node to be available to insert the
|
||||
* command line and merge other ATAGS info.
|
||||
* Also for U-Boot there must be a pre-existing /memory node.
|
||||
*/
|
||||
chosen {};
|
||||
memory { device_type = "memory"; reg = <0 0>; };
|
||||
|
||||
aliases {
|
||||
gpio0 = &gpio0;
|
||||
|
@ -14,6 +14,14 @@
|
||||
/ {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
/*
|
||||
* The decompressor and also some bootloaders rely on a
|
||||
* pre-existing /chosen node to be available to insert the
|
||||
* command line and merge other ATAGS info.
|
||||
* Also for U-Boot there must be a pre-existing /memory node.
|
||||
*/
|
||||
chosen {};
|
||||
memory { device_type = "memory"; reg = <0 0>; };
|
||||
|
||||
aliases {
|
||||
ethernet0 = &fec;
|
||||
|
@ -19,6 +19,14 @@
|
||||
/ {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
/*
|
||||
* The decompressor and also some bootloaders rely on a
|
||||
* pre-existing /chosen node to be available to insert the
|
||||
* command line and merge other ATAGS info.
|
||||
* Also for U-Boot there must be a pre-existing /memory node.
|
||||
*/
|
||||
chosen {};
|
||||
memory { device_type = "memory"; reg = <0 0>; };
|
||||
|
||||
aliases {
|
||||
ethernet0 = &fec;
|
||||
|
@ -17,6 +17,14 @@
|
||||
#size-cells = <1>;
|
||||
|
||||
interrupt-parent = <&icoll>;
|
||||
/*
|
||||
* The decompressor and also some bootloaders rely on a
|
||||
* pre-existing /chosen node to be available to insert the
|
||||
* command line and merge other ATAGS info.
|
||||
* Also for U-Boot there must be a pre-existing /memory node.
|
||||
*/
|
||||
chosen {};
|
||||
memory { device_type = "memory"; reg = <0 0>; };
|
||||
|
||||
aliases {
|
||||
ethernet0 = &mac0;
|
||||
|
@ -12,6 +12,14 @@
|
||||
/ {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
/*
|
||||
* The decompressor and also some bootloaders rely on a
|
||||
* pre-existing /chosen node to be available to insert the
|
||||
* command line and merge other ATAGS info.
|
||||
* Also for U-Boot there must be a pre-existing /memory node.
|
||||
*/
|
||||
chosen {};
|
||||
memory { device_type = "memory"; reg = <0 0>; };
|
||||
|
||||
aliases {
|
||||
serial0 = &uart1;
|
||||
|
@ -13,6 +13,14 @@
|
||||
/ {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
/*
|
||||
* The decompressor and also some bootloaders rely on a
|
||||
* pre-existing /chosen node to be available to insert the
|
||||
* command line and merge other ATAGS info.
|
||||
* Also for U-Boot there must be a pre-existing /memory node.
|
||||
*/
|
||||
chosen {};
|
||||
memory { device_type = "memory"; reg = <0 0>; };
|
||||
|
||||
aliases {
|
||||
ethernet0 = &fec;
|
||||
|
@ -17,6 +17,14 @@
|
||||
/ {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
/*
|
||||
* The decompressor and also some bootloaders rely on a
|
||||
* pre-existing /chosen node to be available to insert the
|
||||
* command line and merge other ATAGS info.
|
||||
* Also for U-Boot there must be a pre-existing /memory node.
|
||||
*/
|
||||
chosen {};
|
||||
memory { device_type = "memory"; reg = <0 0>; };
|
||||
|
||||
aliases {
|
||||
ethernet0 = &fec;
|
||||
|
@ -19,6 +19,14 @@
|
||||
/ {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
/*
|
||||
* The decompressor and also some bootloaders rely on a
|
||||
* pre-existing /chosen node to be available to insert the
|
||||
* command line and merge other ATAGS info.
|
||||
* Also for U-Boot there must be a pre-existing /memory node.
|
||||
*/
|
||||
chosen {};
|
||||
memory { device_type = "memory"; reg = <0 0>; };
|
||||
|
||||
aliases {
|
||||
ethernet0 = &fec;
|
||||
|
@ -19,6 +19,14 @@
|
||||
/ {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
/*
|
||||
* The decompressor and also some bootloaders rely on a
|
||||
* pre-existing /chosen node to be available to insert the
|
||||
* command line and merge other ATAGS info.
|
||||
* Also for U-Boot there must be a pre-existing /memory node.
|
||||
*/
|
||||
chosen {};
|
||||
memory { device_type = "memory"; reg = <0 0>; };
|
||||
|
||||
aliases {
|
||||
ethernet0 = &fec;
|
||||
|
@ -137,7 +137,7 @@
|
||||
&gpio4 {
|
||||
gpio-ranges = <&iomuxc 5 136 1>, <&iomuxc 6 145 1>, <&iomuxc 7 150 1>,
|
||||
<&iomuxc 8 146 1>, <&iomuxc 9 151 1>, <&iomuxc 10 147 1>,
|
||||
<&iomuxc 11 151 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>,
|
||||
<&iomuxc 11 152 1>, <&iomuxc 12 148 1>, <&iomuxc 13 153 1>,
|
||||
<&iomuxc 14 149 1>, <&iomuxc 15 154 1>, <&iomuxc 16 39 7>,
|
||||
<&iomuxc 23 56 1>, <&iomuxc 24 61 7>, <&iomuxc 31 46 1>;
|
||||
};
|
||||
|
@ -16,6 +16,14 @@
|
||||
/ {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
/*
|
||||
* The decompressor and also some bootloaders rely on a
|
||||
* pre-existing /chosen node to be available to insert the
|
||||
* command line and merge other ATAGS info.
|
||||
* Also for U-Boot there must be a pre-existing /memory node.
|
||||
*/
|
||||
chosen {};
|
||||
memory { device_type = "memory"; reg = <0 0>; };
|
||||
|
||||
aliases {
|
||||
ethernet0 = &fec;
|
||||
|
@ -14,6 +14,14 @@
|
||||
/ {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
/*
|
||||
* The decompressor and also some bootloaders rely on a
|
||||
* pre-existing /chosen node to be available to insert the
|
||||
* command line and merge other ATAGS info.
|
||||
* Also for U-Boot there must be a pre-existing /memory node.
|
||||
*/
|
||||
chosen {};
|
||||
memory { device_type = "memory"; reg = <0 0>; };
|
||||
|
||||
aliases {
|
||||
ethernet0 = &fec;
|
||||
|
@ -15,6 +15,14 @@
|
||||
/ {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
/*
|
||||
* The decompressor and also some bootloaders rely on a
|
||||
* pre-existing /chosen node to be available to insert the
|
||||
* command line and merge other ATAGS info.
|
||||
* Also for U-Boot there must be a pre-existing /memory node.
|
||||
*/
|
||||
chosen {};
|
||||
memory { device_type = "memory"; reg = <0 0>; };
|
||||
|
||||
aliases {
|
||||
can0 = &flexcan1;
|
||||
|
@ -15,6 +15,14 @@
|
||||
/ {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
/*
|
||||
* The decompressor and also some bootloaders rely on a
|
||||
* pre-existing /chosen node to be available to insert the
|
||||
* command line and merge other ATAGS info.
|
||||
* Also for U-Boot there must be a pre-existing /memory node.
|
||||
*/
|
||||
chosen {};
|
||||
memory { device_type = "memory"; reg = <0 0>; };
|
||||
|
||||
aliases {
|
||||
ethernet0 = &fec1;
|
||||
|
@ -50,6 +50,14 @@
|
||||
/ {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
/*
|
||||
* The decompressor and also some bootloaders rely on a
|
||||
* pre-existing /chosen node to be available to insert the
|
||||
* command line and merge other ATAGS info.
|
||||
* Also for U-Boot there must be a pre-existing /memory node.
|
||||
*/
|
||||
chosen {};
|
||||
memory { device_type = "memory"; reg = <0 0>; };
|
||||
|
||||
aliases {
|
||||
gpio0 = &gpio1;
|
||||
|
@ -2,7 +2,7 @@
|
||||
* Device Tree file for Buffalo Linkstation LS-CHLv3
|
||||
*
|
||||
* Copyright (C) 2016 Ash Hughes <ashley.hughes@blueyonder.co.uk>
|
||||
* Copyright (C) 2015, 2016
|
||||
* Copyright (C) 2015-2017
|
||||
* Roger Shimizu <rogershimizu@gmail.com>
|
||||
*
|
||||
* This file is dual-licensed: you can use it either under the terms
|
||||
@ -52,7 +52,7 @@
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
|
||||
/ {
|
||||
model = "Buffalo Linkstation Live v3 (LS-CHL)";
|
||||
model = "Buffalo Linkstation LiveV3 (LS-CHL)";
|
||||
compatible = "buffalo,lschl", "marvell,orion5x-88f5182", "marvell,orion5x";
|
||||
|
||||
memory { /* 128 MB */
|
@ -680,6 +680,7 @@
|
||||
phy-names = "usb2-phy", "usb3-phy";
|
||||
phys = <&usb2_picophy0>,
|
||||
<&phy_port2 PHY_TYPE_USB3>;
|
||||
snps,dis_u3_susphy_quirk;
|
||||
};
|
||||
};
|
||||
|
||||
|
@ -64,8 +64,8 @@ CONFIG_NETFILTER=y
|
||||
CONFIG_NETFILTER_NETLINK_QUEUE=m
|
||||
CONFIG_NF_CONNTRACK=m
|
||||
CONFIG_NF_CONNTRACK_EVENTS=y
|
||||
CONFIG_NF_CT_PROTO_SCTP=m
|
||||
CONFIG_NF_CT_PROTO_UDPLITE=m
|
||||
CONFIG_NF_CT_PROTO_SCTP=y
|
||||
CONFIG_NF_CT_PROTO_UDPLITE=y
|
||||
CONFIG_NF_CONNTRACK_AMANDA=m
|
||||
CONFIG_NF_CONNTRACK_FTP=m
|
||||
CONFIG_NF_CONNTRACK_H323=m
|
||||
|
@ -56,8 +56,8 @@ CONFIG_NETFILTER=y
|
||||
CONFIG_NETFILTER_NETLINK_QUEUE=m
|
||||
CONFIG_NF_CONNTRACK=m
|
||||
CONFIG_NF_CONNTRACK_EVENTS=y
|
||||
CONFIG_NF_CT_PROTO_SCTP=m
|
||||
CONFIG_NF_CT_PROTO_UDPLITE=m
|
||||
CONFIG_NF_CT_PROTO_SCTP=y
|
||||
CONFIG_NF_CT_PROTO_UDPLITE=y
|
||||
CONFIG_NF_CONNTRACK_AMANDA=m
|
||||
CONFIG_NF_CONNTRACK_FTP=m
|
||||
CONFIG_NF_CONNTRACK_H323=m
|
||||
|
@ -600,7 +600,7 @@ static int gpr_set(struct task_struct *target,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
struct pt_regs newregs;
|
||||
struct pt_regs newregs = *task_pt_regs(target);
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&newregs,
|
||||
|
@ -60,7 +60,6 @@
|
||||
|
||||
#define to_mmdc_pmu(p) container_of(p, struct mmdc_pmu, pmu)
|
||||
|
||||
static enum cpuhp_state cpuhp_mmdc_state;
|
||||
static int ddr_type;
|
||||
|
||||
struct fsl_mmdc_devtype_data {
|
||||
@ -82,6 +81,7 @@ static const struct of_device_id imx_mmdc_dt_ids[] = {
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
|
||||
static enum cpuhp_state cpuhp_mmdc_state;
|
||||
static DEFINE_IDA(mmdc_ida);
|
||||
|
||||
PMU_EVENT_ATTR_STRING(total-cycles, mmdc_pmu_total_cycles, "event=0x00")
|
||||
|
@ -610,9 +610,9 @@ static int __init early_abort_handler(unsigned long addr, unsigned int fsr,
|
||||
|
||||
void __init early_abt_enable(void)
|
||||
{
|
||||
fsr_info[22].fn = early_abort_handler;
|
||||
fsr_info[FSR_FS_AEA].fn = early_abort_handler;
|
||||
local_abt_enable();
|
||||
fsr_info[22].fn = do_bad;
|
||||
fsr_info[FSR_FS_AEA].fn = do_bad;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
|
@ -11,11 +11,15 @@
|
||||
#define FSR_FS5_0 (0x3f)
|
||||
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
#define FSR_FS_AEA 17
|
||||
|
||||
static inline int fsr_fs(unsigned int fsr)
|
||||
{
|
||||
return fsr & FSR_FS5_0;
|
||||
}
|
||||
#else
|
||||
#define FSR_FS_AEA 22
|
||||
|
||||
static inline int fsr_fs(unsigned int fsr)
|
||||
{
|
||||
return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
|
||||
|
@ -55,6 +55,24 @@
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
|
||||
reserved-memory {
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
ranges;
|
||||
|
||||
/* 16 MiB reserved for Hardware ROM Firmware */
|
||||
hwrom_reserved: hwrom@0 {
|
||||
reg = <0x0 0x0 0x0 0x1000000>;
|
||||
no-map;
|
||||
};
|
||||
|
||||
/* 2 MiB reserved for ARM Trusted Firmware (BL31) */
|
||||
secmon_reserved: secmon@10000000 {
|
||||
reg = <0x0 0x10000000 0x0 0x200000>;
|
||||
no-map;
|
||||
};
|
||||
};
|
||||
|
||||
cpus {
|
||||
#address-cells = <0x2>;
|
||||
#size-cells = <0x0>;
|
||||
|
@ -151,6 +151,18 @@
|
||||
status = "okay";
|
||||
pinctrl-0 = <ð_rgmii_pins>;
|
||||
pinctrl-names = "default";
|
||||
phy-handle = <ð_phy0>;
|
||||
|
||||
mdio {
|
||||
compatible = "snps,dwmac-mdio";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
eth_phy0: ethernet-phy@0 {
|
||||
reg = <0>;
|
||||
eee-broken-1000t;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
&ir {
|
||||
|
@ -193,15 +193,16 @@ AES_ENTRY(aes_cbc_encrypt)
|
||||
cbz w6, .Lcbcencloop
|
||||
|
||||
ld1 {v0.16b}, [x5] /* get iv */
|
||||
enc_prepare w3, x2, x5
|
||||
enc_prepare w3, x2, x6
|
||||
|
||||
.Lcbcencloop:
|
||||
ld1 {v1.16b}, [x1], #16 /* get next pt block */
|
||||
eor v0.16b, v0.16b, v1.16b /* ..and xor with iv */
|
||||
encrypt_block v0, w3, x2, x5, w6
|
||||
encrypt_block v0, w3, x2, x6, w7
|
||||
st1 {v0.16b}, [x0], #16
|
||||
subs w4, w4, #1
|
||||
bne .Lcbcencloop
|
||||
st1 {v0.16b}, [x5] /* return iv */
|
||||
ret
|
||||
AES_ENDPROC(aes_cbc_encrypt)
|
||||
|
||||
@ -211,7 +212,7 @@ AES_ENTRY(aes_cbc_decrypt)
|
||||
cbz w6, .LcbcdecloopNx
|
||||
|
||||
ld1 {v7.16b}, [x5] /* get iv */
|
||||
dec_prepare w3, x2, x5
|
||||
dec_prepare w3, x2, x6
|
||||
|
||||
.LcbcdecloopNx:
|
||||
#if INTERLEAVE >= 2
|
||||
@ -248,7 +249,7 @@ AES_ENTRY(aes_cbc_decrypt)
|
||||
.Lcbcdecloop:
|
||||
ld1 {v1.16b}, [x1], #16 /* get next ct block */
|
||||
mov v0.16b, v1.16b /* ...and copy to v0 */
|
||||
decrypt_block v0, w3, x2, x5, w6
|
||||
decrypt_block v0, w3, x2, x6, w7
|
||||
eor v0.16b, v0.16b, v7.16b /* xor with iv => pt */
|
||||
mov v7.16b, v1.16b /* ct is next iv */
|
||||
st1 {v0.16b}, [x0], #16
|
||||
@ -256,6 +257,7 @@ AES_ENTRY(aes_cbc_decrypt)
|
||||
bne .Lcbcdecloop
|
||||
.Lcbcdecout:
|
||||
FRAME_POP
|
||||
st1 {v7.16b}, [x5] /* return iv */
|
||||
ret
|
||||
AES_ENDPROC(aes_cbc_decrypt)
|
||||
|
||||
@ -267,24 +269,15 @@ AES_ENDPROC(aes_cbc_decrypt)
|
||||
|
||||
AES_ENTRY(aes_ctr_encrypt)
|
||||
FRAME_PUSH
|
||||
cbnz w6, .Lctrfirst /* 1st time around? */
|
||||
umov x5, v4.d[1] /* keep swabbed ctr in reg */
|
||||
rev x5, x5
|
||||
#if INTERLEAVE >= 2
|
||||
cmn w5, w4 /* 32 bit overflow? */
|
||||
bcs .Lctrinc
|
||||
add x5, x5, #1 /* increment BE ctr */
|
||||
b .LctrincNx
|
||||
#else
|
||||
b .Lctrinc
|
||||
#endif
|
||||
.Lctrfirst:
|
||||
cbz w6, .Lctrnotfirst /* 1st time around? */
|
||||
enc_prepare w3, x2, x6
|
||||
ld1 {v4.16b}, [x5]
|
||||
umov x5, v4.d[1] /* keep swabbed ctr in reg */
|
||||
rev x5, x5
|
||||
|
||||
.Lctrnotfirst:
|
||||
umov x8, v4.d[1] /* keep swabbed ctr in reg */
|
||||
rev x8, x8
|
||||
#if INTERLEAVE >= 2
|
||||
cmn w5, w4 /* 32 bit overflow? */
|
||||
cmn w8, w4 /* 32 bit overflow? */
|
||||
bcs .Lctrloop
|
||||
.LctrloopNx:
|
||||
subs w4, w4, #INTERLEAVE
|
||||
@ -292,11 +285,11 @@ AES_ENTRY(aes_ctr_encrypt)
|
||||
#if INTERLEAVE == 2
|
||||
mov v0.8b, v4.8b
|
||||
mov v1.8b, v4.8b
|
||||
rev x7, x5
|
||||
add x5, x5, #1
|
||||
rev x7, x8
|
||||
add x8, x8, #1
|
||||
ins v0.d[1], x7
|
||||
rev x7, x5
|
||||
add x5, x5, #1
|
||||
rev x7, x8
|
||||
add x8, x8, #1
|
||||
ins v1.d[1], x7
|
||||
ld1 {v2.16b-v3.16b}, [x1], #32 /* get 2 input blocks */
|
||||
do_encrypt_block2x
|
||||
@ -305,7 +298,7 @@ AES_ENTRY(aes_ctr_encrypt)
|
||||
st1 {v0.16b-v1.16b}, [x0], #32
|
||||
#else
|
||||
ldr q8, =0x30000000200000001 /* addends 1,2,3[,0] */
|
||||
dup v7.4s, w5
|
||||
dup v7.4s, w8
|
||||
mov v0.16b, v4.16b
|
||||
add v7.4s, v7.4s, v8.4s
|
||||
mov v1.16b, v4.16b
|
||||
@ -323,18 +316,12 @@ AES_ENTRY(aes_ctr_encrypt)
|
||||
eor v2.16b, v7.16b, v2.16b
|
||||
eor v3.16b, v5.16b, v3.16b
|
||||
st1 {v0.16b-v3.16b}, [x0], #64
|
||||
add x5, x5, #INTERLEAVE
|
||||
add x8, x8, #INTERLEAVE
|
||||
#endif
|
||||
cbz w4, .LctroutNx
|
||||
.LctrincNx:
|
||||
rev x7, x5
|
||||
rev x7, x8
|
||||
ins v4.d[1], x7
|
||||
cbz w4, .Lctrout
|
||||
b .LctrloopNx
|
||||
.LctroutNx:
|
||||
sub x5, x5, #1
|
||||
rev x7, x5
|
||||
ins v4.d[1], x7
|
||||
b .Lctrout
|
||||
.Lctr1x:
|
||||
adds w4, w4, #INTERLEAVE
|
||||
beq .Lctrout
|
||||
@ -342,30 +329,39 @@ AES_ENTRY(aes_ctr_encrypt)
|
||||
.Lctrloop:
|
||||
mov v0.16b, v4.16b
|
||||
encrypt_block v0, w3, x2, x6, w7
|
||||
|
||||
adds x8, x8, #1 /* increment BE ctr */
|
||||
rev x7, x8
|
||||
ins v4.d[1], x7
|
||||
bcs .Lctrcarry /* overflow? */
|
||||
|
||||
.Lctrcarrydone:
|
||||
subs w4, w4, #1
|
||||
bmi .Lctrhalfblock /* blocks < 0 means 1/2 block */
|
||||
ld1 {v3.16b}, [x1], #16
|
||||
eor v3.16b, v0.16b, v3.16b
|
||||
st1 {v3.16b}, [x0], #16
|
||||
beq .Lctrout
|
||||
.Lctrinc:
|
||||
adds x5, x5, #1 /* increment BE ctr */
|
||||
rev x7, x5
|
||||
ins v4.d[1], x7
|
||||
bcc .Lctrloop /* no overflow? */
|
||||
bne .Lctrloop
|
||||
|
||||
.Lctrout:
|
||||
st1 {v4.16b}, [x5] /* return next CTR value */
|
||||
FRAME_POP
|
||||
ret
|
||||
|
||||
.Lctrhalfblock:
|
||||
ld1 {v3.8b}, [x1]
|
||||
eor v3.8b, v0.8b, v3.8b
|
||||
st1 {v3.8b}, [x0]
|
||||
FRAME_POP
|
||||
ret
|
||||
|
||||
.Lctrcarry:
|
||||
umov x7, v4.d[0] /* load upper word of ctr */
|
||||
rev x7, x7 /* ... to handle the carry */
|
||||
add x7, x7, #1
|
||||
rev x7, x7
|
||||
ins v4.d[0], x7
|
||||
b .Lctrloop
|
||||
.Lctrhalfblock:
|
||||
ld1 {v3.8b}, [x1]
|
||||
eor v3.8b, v0.8b, v3.8b
|
||||
st1 {v3.8b}, [x0]
|
||||
.Lctrout:
|
||||
FRAME_POP
|
||||
ret
|
||||
b .Lctrcarrydone
|
||||
AES_ENDPROC(aes_ctr_encrypt)
|
||||
.ltorg
|
||||
|
||||
|
@ -11,6 +11,7 @@
|
||||
* for more details.
|
||||
*/
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/init.h>
|
||||
@ -209,7 +210,12 @@ static struct notifier_block init_cpu_capacity_notifier = {
|
||||
|
||||
static int __init register_cpufreq_notifier(void)
|
||||
{
|
||||
if (cap_parsing_failed)
|
||||
/*
|
||||
* on ACPI-based systems we need to use the default cpu capacity
|
||||
* until we have the necessary code to parse the cpu capacity, so
|
||||
* skip registering cpufreq notifier.
|
||||
*/
|
||||
if (!acpi_disabled || cap_parsing_failed)
|
||||
return -EINVAL;
|
||||
|
||||
if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
|
||||
|
@ -6,7 +6,7 @@
|
||||
#endif
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
|
||||
#include <asm/types.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <linux/atomic.h>
|
||||
@ -17,6 +17,12 @@
|
||||
* to include/asm-i386/bitops.h or kerneldoc
|
||||
*/
|
||||
|
||||
#if __BITS_PER_LONG == 64
|
||||
#define SHIFT_PER_LONG 6
|
||||
#else
|
||||
#define SHIFT_PER_LONG 5
|
||||
#endif
|
||||
|
||||
#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
|
||||
|
||||
|
||||
|
@ -3,10 +3,8 @@
|
||||
|
||||
#if defined(__LP64__)
|
||||
#define __BITS_PER_LONG 64
|
||||
#define SHIFT_PER_LONG 6
|
||||
#else
|
||||
#define __BITS_PER_LONG 32
|
||||
#define SHIFT_PER_LONG 5
|
||||
#endif
|
||||
|
||||
#include <asm-generic/bitsperlong.h>
|
||||
|
@ -1,6 +1,7 @@
|
||||
#ifndef _PARISC_SWAB_H
|
||||
#define _PARISC_SWAB_H
|
||||
|
||||
#include <asm/bitsperlong.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
|
||||
}
|
||||
#define __arch_swab32 __arch_swab32
|
||||
|
||||
#if BITS_PER_LONG > 32
|
||||
#if __BITS_PER_LONG > 32
|
||||
/*
|
||||
** From "PA-RISC 2.0 Architecture", HP Professional Books.
|
||||
** See Appendix I page 8 , "Endian Byte Swapping".
|
||||
@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
|
||||
return x;
|
||||
}
|
||||
#define __arch_swab64 __arch_swab64
|
||||
#endif /* BITS_PER_LONG > 32 */
|
||||
#endif /* __BITS_PER_LONG > 32 */
|
||||
|
||||
#endif /* _PARISC_SWAB_H */
|
||||
|
@ -164,7 +164,6 @@ config PPC
|
||||
select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE
|
||||
select HAVE_ARCH_HARDENED_USERCOPY
|
||||
select HAVE_KERNEL_GZIP
|
||||
select HAVE_CC_STACKPROTECTOR
|
||||
|
||||
config GENERIC_CSUM
|
||||
def_bool CPU_LITTLE_ENDIAN
|
||||
@ -484,6 +483,7 @@ config RELOCATABLE
|
||||
bool "Build a relocatable kernel"
|
||||
depends on (PPC64 && !COMPILE_TEST) || (FLATMEM && (44x || FSL_BOOKE))
|
||||
select NONSTATIC_KERNEL
|
||||
select MODULE_REL_CRCS if MODVERSIONS
|
||||
help
|
||||
This builds a kernel image that is capable of running at the
|
||||
location the kernel is loaded at. For ppc32, there is no any
|
||||
|
@ -23,7 +23,9 @@ static __always_inline bool cpu_has_feature(unsigned long feature)
|
||||
{
|
||||
int i;
|
||||
|
||||
#ifndef __clang__ /* clang can't cope with this */
|
||||
BUILD_BUG_ON(!__builtin_constant_p(feature));
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
|
||||
if (!static_key_initialized) {
|
||||
|
@ -160,7 +160,9 @@ static __always_inline bool mmu_has_feature(unsigned long feature)
|
||||
{
|
||||
int i;
|
||||
|
||||
#ifndef __clang__ /* clang can't cope with this */
|
||||
BUILD_BUG_ON(!__builtin_constant_p(feature));
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
|
||||
if (!static_key_initialized) {
|
||||
|
@ -90,9 +90,5 @@ static inline int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sec
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64)
|
||||
#define ARCH_RELOCATES_KCRCTAB
|
||||
#define reloc_start PHYSICAL_START
|
||||
#endif
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_POWERPC_MODULE_H */
|
||||
|
@ -649,9 +649,10 @@
|
||||
#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
|
||||
#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
|
||||
#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
|
||||
#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 */
|
||||
#define SRR1_WAKEMASK_P8 0x003c0000 /* reason for wakeup on POWER8 and 9 */
|
||||
#define SRR1_WAKESYSERR 0x00300000 /* System error */
|
||||
#define SRR1_WAKEEE 0x00200000 /* External interrupt */
|
||||
#define SRR1_WAKEHVI 0x00240000 /* Hypervisor Virtualization Interrupt (P9) */
|
||||
#define SRR1_WAKEMT 0x00280000 /* mtctrl */
|
||||
#define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */
|
||||
#define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */
|
||||
|
@ -1,40 +0,0 @@
|
||||
/*
|
||||
* GCC stack protector support.
|
||||
*
|
||||
* Stack protector works by putting predefined pattern at the start of
|
||||
* the stack frame and verifying that it hasn't been overwritten when
|
||||
* returning from the function. The pattern is called stack canary
|
||||
* and gcc expects it to be defined by a global variable called
|
||||
* "__stack_chk_guard" on PPC. This unfortunately means that on SMP
|
||||
* we cannot have a different canary value per task.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_STACKPROTECTOR_H
|
||||
#define _ASM_STACKPROTECTOR_H
|
||||
|
||||
#include <linux/random.h>
|
||||
#include <linux/version.h>
|
||||
#include <asm/reg.h>
|
||||
|
||||
extern unsigned long __stack_chk_guard;
|
||||
|
||||
/*
|
||||
* Initialize the stackprotector canary value.
|
||||
*
|
||||
* NOTE: this must only be called from functions that never return,
|
||||
* and it must always be inlined.
|
||||
*/
|
||||
static __always_inline void boot_init_stack_canary(void)
|
||||
{
|
||||
unsigned long canary;
|
||||
|
||||
/* Try to get a semi random initial value. */
|
||||
get_random_bytes(&canary, sizeof(canary));
|
||||
canary ^= mftb();
|
||||
canary ^= LINUX_VERSION_CODE;
|
||||
|
||||
current->stack_canary = canary;
|
||||
__stack_chk_guard = current->stack_canary;
|
||||
}
|
||||
|
||||
#endif /* _ASM_STACKPROTECTOR_H */
|
@ -44,6 +44,7 @@ static inline int icp_hv_init(void) { return -ENODEV; }
|
||||
|
||||
#ifdef CONFIG_PPC_POWERNV
|
||||
extern int icp_opal_init(void);
|
||||
extern void icp_opal_flush_interrupt(void);
|
||||
#else
|
||||
static inline int icp_opal_init(void) { return -ENODEV; }
|
||||
#endif
|
||||
|
@ -19,10 +19,6 @@ CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
|
||||
CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
|
||||
CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN)
|
||||
|
||||
# -fstack-protector triggers protection checks in this code,
|
||||
# but it is being used too early to link to meaningful stack_chk logic.
|
||||
CFLAGS_prom_init.o += $(call cc-option, -fno-stack-protector)
|
||||
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
# Do not trace early boot code
|
||||
CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE)
|
||||
|
@ -91,9 +91,6 @@ int main(void)
|
||||
DEFINE(TI_livepatch_sp, offsetof(struct thread_info, livepatch_sp));
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
|
||||
#endif
|
||||
DEFINE(KSP, offsetof(struct thread_struct, ksp));
|
||||
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
|
||||
#ifdef CONFIG_BOOKE
|
||||
|
@ -545,7 +545,7 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
|
||||
static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
|
||||
{
|
||||
struct eeh_pe *pe = (struct eeh_pe *)data;
|
||||
bool *clear_sw_state = flag;
|
||||
bool clear_sw_state = *(bool *)flag;
|
||||
int i, rc = 1;
|
||||
|
||||
for (i = 0; rc && i < 3; i++)
|
||||
|
@ -674,11 +674,7 @@ BEGIN_FTR_SECTION
|
||||
mtspr SPRN_SPEFSCR,r0 /* restore SPEFSCR reg */
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_SPE)
|
||||
#endif /* CONFIG_SPE */
|
||||
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
|
||||
lwz r0,TSK_STACK_CANARY(r2)
|
||||
lis r4,__stack_chk_guard@ha
|
||||
stw r0,__stack_chk_guard@l(r4)
|
||||
#endif
|
||||
|
||||
lwz r0,_CCR(r1)
|
||||
mtcrf 0xFF,r0
|
||||
/* r3-r12 are destroyed -- Cort */
|
||||
|
@ -286,14 +286,6 @@ static void dedotify_versions(struct modversion_info *vers,
|
||||
for (end = (void *)vers + size; vers < end; vers++)
|
||||
if (vers->name[0] == '.') {
|
||||
memmove(vers->name, vers->name+1, strlen(vers->name));
|
||||
#ifdef ARCH_RELOCATES_KCRCTAB
|
||||
/* The TOC symbol has no CRC computed. To avoid CRC
|
||||
* check failing, we must force it to the expected
|
||||
* value (see CRC check in module.c).
|
||||
*/
|
||||
if (!strcmp(vers->name, "TOC."))
|
||||
vers->crc = -(unsigned long)reloc_start;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -64,12 +64,6 @@
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/kdebug.h>
|
||||
|
||||
#ifdef CONFIG_CC_STACKPROTECTOR
|
||||
#include <linux/stackprotector.h>
|
||||
unsigned long __stack_chk_guard __read_mostly;
|
||||
EXPORT_SYMBOL(__stack_chk_guard);
|
||||
#endif
|
||||
|
||||
/* Transactional Memory debug */
|
||||
#ifdef TM_DEBUG_SW
|
||||
#define TM_DEBUG(x...) printk(KERN_INFO x)
|
||||
|
@ -2834,6 +2834,9 @@ static void __init prom_find_boot_cpu(void)
|
||||
|
||||
cpu_pkg = call_prom("instance-to-package", 1, 1, prom_cpu);
|
||||
|
||||
if (!PHANDLE_VALID(cpu_pkg))
|
||||
return;
|
||||
|
||||
prom_getprop(cpu_pkg, "reg", &rval, sizeof(rval));
|
||||
prom.cpu = be32_to_cpu(rval);
|
||||
|
||||
|
@ -253,8 +253,11 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||
if (unlikely(debugger_fault_handler(regs)))
|
||||
goto bail;
|
||||
|
||||
/* On a kernel SLB miss we can only check for a valid exception entry */
|
||||
if (!user_mode(regs) && (address >= TASK_SIZE)) {
|
||||
/*
|
||||
* The kernel should never take an execute fault nor should it
|
||||
* take a page fault to a kernel address.
|
||||
*/
|
||||
if (!user_mode(regs) && (is_exec || (address >= TASK_SIZE))) {
|
||||
rc = SIGSEGV;
|
||||
goto bail;
|
||||
}
|
||||
@ -390,20 +393,6 @@ good_area:
|
||||
#endif /* CONFIG_8xx */
|
||||
|
||||
if (is_exec) {
|
||||
/*
|
||||
* An execution fault + no execute ?
|
||||
*
|
||||
* On CPUs that don't have CPU_FTR_COHERENT_ICACHE we
|
||||
* deliberately create NX mappings, and use the fault to do the
|
||||
* cache flush. This is usually handled in hash_page_do_lazy_icache()
|
||||
* but we could end up here if that races with a concurrent PTE
|
||||
* update. In that case we need to fall through here to the VMA
|
||||
* check below.
|
||||
*/
|
||||
if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
|
||||
(regs->msr & SRR1_ISI_N_OR_G))
|
||||
goto bad_area;
|
||||
|
||||
/*
|
||||
* Allow execution from readable areas if the MMU does not
|
||||
* provide separate controls over reading and executing.
|
||||
|
@ -65,7 +65,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
|
||||
if (!pmdp)
|
||||
return -ENOMEM;
|
||||
if (map_page_size == PMD_SIZE) {
|
||||
ptep = (pte_t *)pudp;
|
||||
ptep = pmdp_ptep(pmdp);
|
||||
goto set_the_pte;
|
||||
}
|
||||
ptep = pte_alloc_kernel(pmdp, ea);
|
||||
@ -90,7 +90,7 @@ int radix__map_kernel_page(unsigned long ea, unsigned long pa,
|
||||
}
|
||||
pmdp = pmd_offset(pudp, ea);
|
||||
if (map_page_size == PMD_SIZE) {
|
||||
ptep = (pte_t *)pudp;
|
||||
ptep = pmdp_ptep(pmdp);
|
||||
goto set_the_pte;
|
||||
}
|
||||
if (!pmd_present(*pmdp)) {
|
||||
|
@ -50,9 +50,7 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric)
|
||||
for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) {
|
||||
__tlbiel_pid(pid, set, ric);
|
||||
}
|
||||
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
|
||||
asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
|
||||
return;
|
||||
asm volatile(PPC_INVALIDATE_ERAT "; isync" : : :"memory");
|
||||
}
|
||||
|
||||
static inline void _tlbie_pid(unsigned long pid, unsigned long ric)
|
||||
@ -85,8 +83,6 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid,
|
||||
asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
|
||||
: : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory");
|
||||
asm volatile("ptesync": : :"memory");
|
||||
if (cpu_has_feature(CPU_FTR_POWER9_DD1))
|
||||
asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
|
||||
}
|
||||
|
||||
static inline void _tlbie_va(unsigned long va, unsigned long pid,
|
||||
|
@ -155,8 +155,10 @@ static void pnv_smp_cpu_kill_self(void)
|
||||
wmask = SRR1_WAKEMASK_P8;
|
||||
|
||||
idle_states = pnv_get_supported_cpuidle_states();
|
||||
|
||||
/* We don't want to take decrementer interrupts while we are offline,
|
||||
* so clear LPCR:PECE1. We keep PECE2 enabled.
|
||||
* so clear LPCR:PECE1. We keep PECE2 (and LPCR_PECE_HVEE on P9)
|
||||
* enabled as to let IPIs in.
|
||||
*/
|
||||
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
|
||||
|
||||
@ -206,8 +208,12 @@ static void pnv_smp_cpu_kill_self(void)
|
||||
* contains 0.
|
||||
*/
|
||||
if (((srr1 & wmask) == SRR1_WAKEEE) ||
|
||||
((srr1 & wmask) == SRR1_WAKEHVI) ||
|
||||
(local_paca->irq_happened & PACA_IRQ_EE)) {
|
||||
icp_native_flush_interrupt();
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
||||
icp_opal_flush_interrupt();
|
||||
else
|
||||
icp_native_flush_interrupt();
|
||||
} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
|
||||
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
|
||||
asm volatile(PPC_MSGCLR(%0) : : "r" (msg));
|
||||
@ -221,6 +227,8 @@ static void pnv_smp_cpu_kill_self(void)
|
||||
if (srr1 && !generic_check_cpu_restart(cpu))
|
||||
DBG("CPU%d Unexpected exit while offline !\n", cpu);
|
||||
}
|
||||
|
||||
/* Re-enable decrementer interrupts */
|
||||
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_PECE1);
|
||||
DBG("CPU%d coming online...\n", cpu);
|
||||
}
|
||||
|
@ -120,18 +120,49 @@ static void icp_opal_cause_ipi(int cpu, unsigned long data)
|
||||
{
|
||||
int hw_cpu = get_hard_smp_processor_id(cpu);
|
||||
|
||||
kvmppc_set_host_ipi(cpu, 1);
|
||||
opal_int_set_mfrr(hw_cpu, IPI_PRIORITY);
|
||||
}
|
||||
|
||||
static irqreturn_t icp_opal_ipi_action(int irq, void *dev_id)
|
||||
{
|
||||
int hw_cpu = hard_smp_processor_id();
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
opal_int_set_mfrr(hw_cpu, 0xff);
|
||||
kvmppc_set_host_ipi(cpu, 0);
|
||||
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
|
||||
|
||||
return smp_ipi_demux();
|
||||
}
|
||||
|
||||
/*
|
||||
* Called when an interrupt is received on an off-line CPU to
|
||||
* clear the interrupt, so that the CPU can go back to nap mode.
|
||||
*/
|
||||
void icp_opal_flush_interrupt(void)
|
||||
{
|
||||
unsigned int xirr;
|
||||
unsigned int vec;
|
||||
|
||||
do {
|
||||
xirr = icp_opal_get_xirr();
|
||||
vec = xirr & 0x00ffffff;
|
||||
if (vec == XICS_IRQ_SPURIOUS)
|
||||
break;
|
||||
if (vec == XICS_IPI) {
|
||||
/* Clear pending IPI */
|
||||
int cpu = smp_processor_id();
|
||||
kvmppc_set_host_ipi(cpu, 0);
|
||||
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), 0xff);
|
||||
} else {
|
||||
pr_err("XICS: hw interrupt 0x%x to offline cpu, "
|
||||
"disabling\n", vec);
|
||||
xics_mask_unknown_vec(vec);
|
||||
}
|
||||
|
||||
/* EOI the interrupt */
|
||||
} while (opal_int_eoi(xirr) > 0);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static const struct icp_ops icp_opal_ops = {
|
||||
|
@ -35,15 +35,15 @@ void __tsb_context_switch(unsigned long pgd_pa,
|
||||
static inline void tsb_context_switch(struct mm_struct *mm)
|
||||
{
|
||||
__tsb_context_switch(__pa(mm->pgd),
|
||||
&mm->context.tsb_block[0],
|
||||
&mm->context.tsb_block[MM_TSB_BASE],
|
||||
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
|
||||
(mm->context.tsb_block[1].tsb ?
|
||||
&mm->context.tsb_block[1] :
|
||||
(mm->context.tsb_block[MM_TSB_HUGE].tsb ?
|
||||
&mm->context.tsb_block[MM_TSB_HUGE] :
|
||||
NULL)
|
||||
#else
|
||||
NULL
|
||||
#endif
|
||||
, __pa(&mm->context.tsb_descr[0]));
|
||||
, __pa(&mm->context.tsb_descr[MM_TSB_BASE]));
|
||||
}
|
||||
|
||||
void tsb_grow(struct mm_struct *mm,
|
||||
|
@ -1021,7 +1021,7 @@ static void __init alloc_one_queue(unsigned long *pa_ptr, unsigned long qmask)
|
||||
unsigned long order = get_order(size);
|
||||
unsigned long p;
|
||||
|
||||
p = __get_free_pages(GFP_KERNEL, order);
|
||||
p = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
|
||||
if (!p) {
|
||||
prom_printf("SUN4V: Error, cannot allocate queue.\n");
|
||||
prom_halt();
|
||||
|
@ -43,8 +43,8 @@ static const char poweroff_msg[32] __attribute__((aligned(32))) =
|
||||
"Linux powering off";
|
||||
static const char rebooting_msg[32] __attribute__((aligned(32))) =
|
||||
"Linux rebooting";
|
||||
static const char panicing_msg[32] __attribute__((aligned(32))) =
|
||||
"Linux panicing";
|
||||
static const char panicking_msg[32] __attribute__((aligned(32))) =
|
||||
"Linux panicking";
|
||||
|
||||
static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused)
|
||||
{
|
||||
@ -76,7 +76,7 @@ static struct notifier_block sstate_reboot_notifier = {
|
||||
|
||||
static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
|
||||
{
|
||||
do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg);
|
||||
do_set_sstate(HV_SOFT_STATE_TRANSITION, panicking_msg);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
@ -2051,6 +2051,73 @@ void sun4v_resum_overflow(struct pt_regs *regs)
|
||||
atomic_inc(&sun4v_resum_oflow_cnt);
|
||||
}
|
||||
|
||||
/* Given a set of registers, get the virtual addressi that was being accessed
|
||||
* by the faulting instructions at tpc.
|
||||
*/
|
||||
static unsigned long sun4v_get_vaddr(struct pt_regs *regs)
|
||||
{
|
||||
unsigned int insn;
|
||||
|
||||
if (!copy_from_user(&insn, (void __user *)regs->tpc, 4)) {
|
||||
return compute_effective_address(regs, insn,
|
||||
(insn >> 25) & 0x1f);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Attempt to handle non-resumable errors generated from userspace.
|
||||
* Returns true if the signal was handled, false otherwise.
|
||||
*/
|
||||
bool sun4v_nonresum_error_user_handled(struct pt_regs *regs,
|
||||
struct sun4v_error_entry *ent) {
|
||||
|
||||
unsigned int attrs = ent->err_attrs;
|
||||
|
||||
if (attrs & SUN4V_ERR_ATTRS_MEMORY) {
|
||||
unsigned long addr = ent->err_raddr;
|
||||
siginfo_t info;
|
||||
|
||||
if (addr == ~(u64)0) {
|
||||
/* This seems highly unlikely to ever occur */
|
||||
pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory error detected in unknown location!\n");
|
||||
} else {
|
||||
unsigned long page_cnt = DIV_ROUND_UP(ent->err_size,
|
||||
PAGE_SIZE);
|
||||
|
||||
/* Break the unfortunate news. */
|
||||
pr_emerg("SUN4V NON-RECOVERABLE ERROR: Memory failed at %016lX\n",
|
||||
addr);
|
||||
pr_emerg("SUN4V NON-RECOVERABLE ERROR: Claiming %lu ages.\n",
|
||||
page_cnt);
|
||||
|
||||
while (page_cnt-- > 0) {
|
||||
if (pfn_valid(addr >> PAGE_SHIFT))
|
||||
get_page(pfn_to_page(addr >> PAGE_SHIFT));
|
||||
addr += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
info.si_signo = SIGKILL;
|
||||
info.si_errno = 0;
|
||||
info.si_trapno = 0;
|
||||
force_sig_info(info.si_signo, &info, current);
|
||||
|
||||
return true;
|
||||
}
|
||||
if (attrs & SUN4V_ERR_ATTRS_PIO) {
|
||||
siginfo_t info;
|
||||
|
||||
info.si_signo = SIGBUS;
|
||||
info.si_code = BUS_ADRERR;
|
||||
info.si_addr = (void __user *)sun4v_get_vaddr(regs);
|
||||
force_sig_info(info.si_signo, &info, current);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Default to doing nothing */
|
||||
return false;
|
||||
}
|
||||
|
||||
/* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
|
||||
* Log the event, clear the first word of the entry, and die.
|
||||
*/
|
||||
@ -2075,6 +2142,12 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
|
||||
|
||||
put_cpu();
|
||||
|
||||
if (!(regs->tstate & TSTATE_PRIV) &&
|
||||
sun4v_nonresum_error_user_handled(regs, &local_copy)) {
|
||||
/* DON'T PANIC: This userspace error was handled. */
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
/* Check for the special PCI poke sequence. */
|
||||
if (pci_poke_in_progress && pci_poke_cpu == cpu) {
|
||||
|
@ -1085,9 +1085,9 @@ static void aesni_free_simds(void)
|
||||
aesni_simd_skciphers[i]; i++)
|
||||
simd_skcipher_free(aesni_simd_skciphers[i]);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2) &&
|
||||
aesni_simd_skciphers2[i].simd; i++)
|
||||
simd_skcipher_free(aesni_simd_skciphers2[i].simd);
|
||||
for (i = 0; i < ARRAY_SIZE(aesni_simd_skciphers2); i++)
|
||||
if (aesni_simd_skciphers2[i].simd)
|
||||
simd_skcipher_free(aesni_simd_skciphers2[i].simd);
|
||||
}
|
||||
|
||||
static int __init aesni_init(void)
|
||||
@ -1168,7 +1168,7 @@ static int __init aesni_init(void)
|
||||
simd = simd_skcipher_create_compat(algname, drvname, basename);
|
||||
err = PTR_ERR(simd);
|
||||
if (IS_ERR(simd))
|
||||
goto unregister_simds;
|
||||
continue;
|
||||
|
||||
aesni_simd_skciphers2[i].simd = simd;
|
||||
}
|
||||
|
@ -161,7 +161,13 @@ static u64 rapl_timer_ms;
|
||||
|
||||
static inline struct rapl_pmu *cpu_to_rapl_pmu(unsigned int cpu)
|
||||
{
|
||||
return rapl_pmus->pmus[topology_logical_package_id(cpu)];
|
||||
unsigned int pkgid = topology_logical_package_id(cpu);
|
||||
|
||||
/*
|
||||
* The unsigned check also catches the '-1' return value for non
|
||||
* existent mappings in the topology map.
|
||||
*/
|
||||
return pkgid < rapl_pmus->maxpkg ? rapl_pmus->pmus[pkgid] : NULL;
|
||||
}
|
||||
|
||||
static inline u64 rapl_read_counter(struct perf_event *event)
|
||||
@ -402,6 +408,8 @@ static int rapl_pmu_event_init(struct perf_event *event)
|
||||
|
||||
/* must be done before validate_group */
|
||||
pmu = cpu_to_rapl_pmu(event->cpu);
|
||||
if (!pmu)
|
||||
return -EINVAL;
|
||||
event->cpu = pmu->cpu;
|
||||
event->pmu_private = pmu;
|
||||
event->hw.event_base = msr;
|
||||
@ -585,6 +593,20 @@ static int rapl_cpu_online(unsigned int cpu)
|
||||
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
||||
int target;
|
||||
|
||||
if (!pmu) {
|
||||
pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
|
||||
if (!pmu)
|
||||
return -ENOMEM;
|
||||
|
||||
raw_spin_lock_init(&pmu->lock);
|
||||
INIT_LIST_HEAD(&pmu->active_list);
|
||||
pmu->pmu = &rapl_pmus->pmu;
|
||||
pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
|
||||
rapl_hrtimer_init(pmu);
|
||||
|
||||
rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if there is an online cpu in the package which collects rapl
|
||||
* events already.
|
||||
@ -598,27 +620,6 @@ static int rapl_cpu_online(unsigned int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rapl_cpu_prepare(unsigned int cpu)
|
||||
{
|
||||
struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu);
|
||||
|
||||
if (pmu)
|
||||
return 0;
|
||||
|
||||
pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
|
||||
if (!pmu)
|
||||
return -ENOMEM;
|
||||
|
||||
raw_spin_lock_init(&pmu->lock);
|
||||
INIT_LIST_HEAD(&pmu->active_list);
|
||||
pmu->pmu = &rapl_pmus->pmu;
|
||||
pmu->timer_interval = ms_to_ktime(rapl_timer_ms);
|
||||
pmu->cpu = -1;
|
||||
rapl_hrtimer_init(pmu);
|
||||
rapl_pmus->pmus[topology_logical_package_id(cpu)] = pmu;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rapl_check_hw_unit(bool apply_quirk)
|
||||
{
|
||||
u64 msr_rapl_power_unit_bits;
|
||||
@ -803,29 +804,21 @@ static int __init rapl_pmu_init(void)
|
||||
/*
|
||||
* Install callbacks. Core will call them for each online cpu.
|
||||
*/
|
||||
|
||||
ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "perf/x86/rapl:prepare",
|
||||
rapl_cpu_prepare, NULL);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE,
|
||||
"perf/x86/rapl:online",
|
||||
rapl_cpu_online, rapl_cpu_offline);
|
||||
if (ret)
|
||||
goto out1;
|
||||
goto out;
|
||||
|
||||
ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1);
|
||||
if (ret)
|
||||
goto out2;
|
||||
goto out1;
|
||||
|
||||
rapl_advertise();
|
||||
return 0;
|
||||
|
||||
out2:
|
||||
cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
|
||||
out1:
|
||||
cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP);
|
||||
cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE);
|
||||
out:
|
||||
pr_warn("Initialization failed (%d), disabled\n", ret);
|
||||
cleanup_rapl_pmus();
|
||||
@ -836,7 +829,6 @@ module_init(rapl_pmu_init);
|
||||
static void __exit intel_rapl_exit(void)
|
||||
{
|
||||
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE);
|
||||
cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP);
|
||||
perf_pmu_unregister(&rapl_pmus->pmu);
|
||||
cleanup_rapl_pmus();
|
||||
}
|
||||
|
@ -100,7 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj,
|
||||
|
||||
struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
|
||||
{
|
||||
return pmu->boxes[topology_logical_package_id(cpu)];
|
||||
unsigned int pkgid = topology_logical_package_id(cpu);
|
||||
|
||||
/*
|
||||
* The unsigned check also catches the '-1' return value for non
|
||||
* existent mappings in the topology map.
|
||||
*/
|
||||
return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
|
||||
}
|
||||
|
||||
u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
|
||||
@ -764,30 +770,6 @@ static void uncore_pmu_unregister(struct intel_uncore_pmu *pmu)
|
||||
pmu->registered = false;
|
||||
}
|
||||
|
||||
static void __uncore_exit_boxes(struct intel_uncore_type *type, int cpu)
|
||||
{
|
||||
struct intel_uncore_pmu *pmu = type->pmus;
|
||||
struct intel_uncore_box *box;
|
||||
int i, pkg;
|
||||
|
||||
if (pmu) {
|
||||
pkg = topology_physical_package_id(cpu);
|
||||
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
||||
box = pmu->boxes[pkg];
|
||||
if (box)
|
||||
uncore_box_exit(box);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void uncore_exit_boxes(void *dummy)
|
||||
{
|
||||
struct intel_uncore_type **types;
|
||||
|
||||
for (types = uncore_msr_uncores; *types; types++)
|
||||
__uncore_exit_boxes(*types++, smp_processor_id());
|
||||
}
|
||||
|
||||
static void uncore_free_boxes(struct intel_uncore_pmu *pmu)
|
||||
{
|
||||
int pkg;
|
||||
@ -1058,86 +1040,6 @@ static void uncore_pci_exit(void)
|
||||
}
|
||||
}
|
||||
|
||||
static int uncore_cpu_dying(unsigned int cpu)
|
||||
{
|
||||
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
||||
struct intel_uncore_pmu *pmu;
|
||||
struct intel_uncore_box *box;
|
||||
int i, pkg;
|
||||
|
||||
pkg = topology_logical_package_id(cpu);
|
||||
for (; *types; types++) {
|
||||
type = *types;
|
||||
pmu = type->pmus;
|
||||
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
||||
box = pmu->boxes[pkg];
|
||||
if (box && atomic_dec_return(&box->refcnt) == 0)
|
||||
uncore_box_exit(box);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int first_init;
|
||||
|
||||
static int uncore_cpu_starting(unsigned int cpu)
|
||||
{
|
||||
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
||||
struct intel_uncore_pmu *pmu;
|
||||
struct intel_uncore_box *box;
|
||||
int i, pkg, ncpus = 1;
|
||||
|
||||
if (first_init) {
|
||||
/*
|
||||
* On init we get the number of online cpus in the package
|
||||
* and set refcount for all of them.
|
||||
*/
|
||||
ncpus = cpumask_weight(topology_core_cpumask(cpu));
|
||||
}
|
||||
|
||||
pkg = topology_logical_package_id(cpu);
|
||||
for (; *types; types++) {
|
||||
type = *types;
|
||||
pmu = type->pmus;
|
||||
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
||||
box = pmu->boxes[pkg];
|
||||
if (!box)
|
||||
continue;
|
||||
/* The first cpu on a package activates the box */
|
||||
if (atomic_add_return(ncpus, &box->refcnt) == ncpus)
|
||||
uncore_box_init(box);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int uncore_cpu_prepare(unsigned int cpu)
|
||||
{
|
||||
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
||||
struct intel_uncore_pmu *pmu;
|
||||
struct intel_uncore_box *box;
|
||||
int i, pkg;
|
||||
|
||||
pkg = topology_logical_package_id(cpu);
|
||||
for (; *types; types++) {
|
||||
type = *types;
|
||||
pmu = type->pmus;
|
||||
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
||||
if (pmu->boxes[pkg])
|
||||
continue;
|
||||
/* First cpu of a package allocates the box */
|
||||
box = uncore_alloc_box(type, cpu_to_node(cpu));
|
||||
if (!box)
|
||||
return -ENOMEM;
|
||||
box->pmu = pmu;
|
||||
box->pkgid = pkg;
|
||||
pmu->boxes[pkg] = box;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
|
||||
int new_cpu)
|
||||
{
|
||||
@ -1177,12 +1079,14 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
|
||||
|
||||
static int uncore_event_cpu_offline(unsigned int cpu)
|
||||
{
|
||||
int target;
|
||||
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
||||
struct intel_uncore_pmu *pmu;
|
||||
struct intel_uncore_box *box;
|
||||
int i, pkg, target;
|
||||
|
||||
/* Check if exiting cpu is used for collecting uncore events */
|
||||
if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
|
||||
return 0;
|
||||
|
||||
goto unref;
|
||||
/* Find a new cpu to collect uncore events */
|
||||
target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
|
||||
|
||||
@ -1194,12 +1098,82 @@ static int uncore_event_cpu_offline(unsigned int cpu)
|
||||
|
||||
uncore_change_context(uncore_msr_uncores, cpu, target);
|
||||
uncore_change_context(uncore_pci_uncores, cpu, target);
|
||||
|
||||
unref:
|
||||
/* Clear the references */
|
||||
pkg = topology_logical_package_id(cpu);
|
||||
for (; *types; types++) {
|
||||
type = *types;
|
||||
pmu = type->pmus;
|
||||
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
||||
box = pmu->boxes[pkg];
|
||||
if (box && atomic_dec_return(&box->refcnt) == 0)
|
||||
uncore_box_exit(box);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int allocate_boxes(struct intel_uncore_type **types,
|
||||
unsigned int pkg, unsigned int cpu)
|
||||
{
|
||||
struct intel_uncore_box *box, *tmp;
|
||||
struct intel_uncore_type *type;
|
||||
struct intel_uncore_pmu *pmu;
|
||||
LIST_HEAD(allocated);
|
||||
int i;
|
||||
|
||||
/* Try to allocate all required boxes */
|
||||
for (; *types; types++) {
|
||||
type = *types;
|
||||
pmu = type->pmus;
|
||||
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
||||
if (pmu->boxes[pkg])
|
||||
continue;
|
||||
box = uncore_alloc_box(type, cpu_to_node(cpu));
|
||||
if (!box)
|
||||
goto cleanup;
|
||||
box->pmu = pmu;
|
||||
box->pkgid = pkg;
|
||||
list_add(&box->active_list, &allocated);
|
||||
}
|
||||
}
|
||||
/* Install them in the pmus */
|
||||
list_for_each_entry_safe(box, tmp, &allocated, active_list) {
|
||||
list_del_init(&box->active_list);
|
||||
box->pmu->boxes[pkg] = box;
|
||||
}
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
list_for_each_entry_safe(box, tmp, &allocated, active_list) {
|
||||
list_del_init(&box->active_list);
|
||||
kfree(box);
|
||||
}
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int uncore_event_cpu_online(unsigned int cpu)
|
||||
{
|
||||
int target;
|
||||
struct intel_uncore_type *type, **types = uncore_msr_uncores;
|
||||
struct intel_uncore_pmu *pmu;
|
||||
struct intel_uncore_box *box;
|
||||
int i, ret, pkg, target;
|
||||
|
||||
pkg = topology_logical_package_id(cpu);
|
||||
ret = allocate_boxes(types, pkg, cpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (; *types; types++) {
|
||||
type = *types;
|
||||
pmu = type->pmus;
|
||||
for (i = 0; i < type->num_boxes; i++, pmu++) {
|
||||
box = pmu->boxes[pkg];
|
||||
if (!box && atomic_inc_return(&box->refcnt) == 1)
|
||||
uncore_box_init(box);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if there is an online cpu in the package
|
||||
@ -1389,38 +1363,16 @@ static int __init intel_uncore_init(void)
|
||||
if (cret && pret)
|
||||
return -ENODEV;
|
||||
|
||||
/*
|
||||
* Install callbacks. Core will call them for each online cpu.
|
||||
*
|
||||
* The first online cpu of each package allocates and takes
|
||||
* the refcounts for all other online cpus in that package.
|
||||
* If msrs are not enabled no allocation is required and
|
||||
* uncore_cpu_prepare() is not called for each online cpu.
|
||||
*/
|
||||
if (!cret) {
|
||||
ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
|
||||
"perf/x86/intel/uncore:prepare",
|
||||
uncore_cpu_prepare, NULL);
|
||||
if (ret)
|
||||
goto err;
|
||||
} else {
|
||||
cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
|
||||
"perf/x86/intel/uncore:prepare",
|
||||
uncore_cpu_prepare, NULL);
|
||||
}
|
||||
first_init = 1;
|
||||
cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
|
||||
"perf/x86/uncore:starting",
|
||||
uncore_cpu_starting, uncore_cpu_dying);
|
||||
first_init = 0;
|
||||
cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
|
||||
"perf/x86/uncore:online",
|
||||
uncore_event_cpu_online, uncore_event_cpu_offline);
|
||||
/* Install hotplug callbacks to setup the targets for each package */
|
||||
ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
|
||||
"perf/x86/intel/uncore:online",
|
||||
uncore_event_cpu_online,
|
||||
uncore_event_cpu_offline);
|
||||
if (ret)
|
||||
goto err;
|
||||
return 0;
|
||||
|
||||
err:
|
||||
/* Undo box->init_box() */
|
||||
on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1);
|
||||
uncore_types_exit(uncore_msr_uncores);
|
||||
uncore_pci_exit();
|
||||
return ret;
|
||||
@ -1429,9 +1381,7 @@ module_init(intel_uncore_init);
|
||||
|
||||
static void __exit intel_uncore_exit(void)
|
||||
{
|
||||
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
|
||||
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
|
||||
cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
|
||||
cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
|
||||
uncore_types_exit(uncore_msr_uncores);
|
||||
uncore_pci_exit();
|
||||
}
|
||||
|
@ -140,6 +140,7 @@ extern void __init load_ucode_bsp(void);
|
||||
extern void load_ucode_ap(void);
|
||||
void reload_early_microcode(void);
|
||||
extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
|
||||
extern bool initrd_gone;
|
||||
#else
|
||||
static inline int __init microcode_init(void) { return 0; };
|
||||
static inline void __init load_ucode_bsp(void) { }
|
||||
|
@ -104,6 +104,7 @@ struct cpuinfo_x86 {
|
||||
__u8 x86_phys_bits;
|
||||
/* CPUID returned core id bits: */
|
||||
__u8 x86_coreid_bits;
|
||||
__u8 cu_id;
|
||||
/* Max extended CPUID function supported: */
|
||||
__u32 extended_cpuid_level;
|
||||
/* Maximum supported CPUID level, -1=no CPUID: */
|
||||
|
@ -1875,7 +1875,6 @@ static struct irq_chip ioapic_chip __read_mostly = {
|
||||
.irq_ack = irq_chip_ack_parent,
|
||||
.irq_eoi = ioapic_ack_level,
|
||||
.irq_set_affinity = ioapic_set_affinity,
|
||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE,
|
||||
};
|
||||
|
||||
@ -1887,7 +1886,6 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
|
||||
.irq_ack = irq_chip_ack_parent,
|
||||
.irq_eoi = ioapic_ir_ack_level,
|
||||
.irq_set_affinity = ioapic_set_affinity,
|
||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||
.flags = IRQCHIP_SKIP_SET_WAKE,
|
||||
};
|
||||
|
||||
@ -2117,6 +2115,7 @@ static inline void __init check_timer(void)
|
||||
if (idx != -1 && irq_trigger(idx))
|
||||
unmask_ioapic_irq(irq_get_chip_data(0));
|
||||
}
|
||||
irq_domain_deactivate_irq(irq_data);
|
||||
irq_domain_activate_irq(irq_data);
|
||||
if (timer_irq_works()) {
|
||||
if (disable_timer_pin_1 > 0)
|
||||
@ -2138,6 +2137,7 @@ static inline void __init check_timer(void)
|
||||
* legacy devices should be connected to IO APIC #0
|
||||
*/
|
||||
replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
|
||||
irq_domain_deactivate_irq(irq_data);
|
||||
irq_domain_activate_irq(irq_data);
|
||||
legacy_pic->unmask(0);
|
||||
if (timer_irq_works()) {
|
||||
|
@ -309,8 +309,22 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
|
||||
|
||||
/* get information required for multi-node processors */
|
||||
if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
|
||||
u32 eax, ebx, ecx, edx;
|
||||
|
||||
node_id = cpuid_ecx(0x8000001e) & 7;
|
||||
cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
|
||||
|
||||
node_id = ecx & 0xff;
|
||||
smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
|
||||
|
||||
if (c->x86 == 0x15)
|
||||
c->cu_id = ebx & 0xff;
|
||||
|
||||
if (c->x86 >= 0x17) {
|
||||
c->cpu_core_id = ebx & 0xff;
|
||||
|
||||
if (smp_num_siblings > 1)
|
||||
c->x86_max_cores /= smp_num_siblings;
|
||||
}
|
||||
|
||||
/*
|
||||
* We may have multiple LLCs if L3 caches exist, so check if we
|
||||
|
@ -1015,6 +1015,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
|
||||
c->x86_model_id[0] = '\0'; /* Unset */
|
||||
c->x86_max_cores = 1;
|
||||
c->x86_coreid_bits = 0;
|
||||
c->cu_id = 0xff;
|
||||
#ifdef CONFIG_X86_64
|
||||
c->x86_clflush_size = 64;
|
||||
c->x86_phys_bits = 36;
|
||||
|
@ -1373,20 +1373,15 @@ static unsigned long mce_adjust_timer_default(unsigned long interval)
|
||||
|
||||
static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
|
||||
|
||||
static void __restart_timer(struct timer_list *t, unsigned long interval)
|
||||
static void __start_timer(struct timer_list *t, unsigned long interval)
|
||||
{
|
||||
unsigned long when = jiffies + interval;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
if (timer_pending(t)) {
|
||||
if (time_before(when, t->expires))
|
||||
mod_timer(t, when);
|
||||
} else {
|
||||
t->expires = round_jiffies(when);
|
||||
add_timer_on(t, smp_processor_id());
|
||||
}
|
||||
if (!timer_pending(t) || time_before(when, t->expires))
|
||||
mod_timer(t, round_jiffies(when));
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
@ -1421,7 +1416,7 @@ static void mce_timer_fn(unsigned long data)
|
||||
|
||||
done:
|
||||
__this_cpu_write(mce_next_interval, iv);
|
||||
__restart_timer(t, iv);
|
||||
__start_timer(t, iv);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1432,7 +1427,7 @@ void mce_timer_kick(unsigned long interval)
|
||||
struct timer_list *t = this_cpu_ptr(&mce_timer);
|
||||
unsigned long iv = __this_cpu_read(mce_next_interval);
|
||||
|
||||
__restart_timer(t, interval);
|
||||
__start_timer(t, interval);
|
||||
|
||||
if (interval < iv)
|
||||
__this_cpu_write(mce_next_interval, interval);
|
||||
@ -1779,17 +1774,15 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c)
|
||||
}
|
||||
}
|
||||
|
||||
static void mce_start_timer(unsigned int cpu, struct timer_list *t)
|
||||
static void mce_start_timer(struct timer_list *t)
|
||||
{
|
||||
unsigned long iv = check_interval * HZ;
|
||||
|
||||
if (mca_cfg.ignore_ce || !iv)
|
||||
return;
|
||||
|
||||
per_cpu(mce_next_interval, cpu) = iv;
|
||||
|
||||
t->expires = round_jiffies(jiffies + iv);
|
||||
add_timer_on(t, cpu);
|
||||
this_cpu_write(mce_next_interval, iv);
|
||||
__start_timer(t, iv);
|
||||
}
|
||||
|
||||
static void __mcheck_cpu_setup_timer(void)
|
||||
@ -1806,7 +1799,7 @@ static void __mcheck_cpu_init_timer(void)
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
||||
setup_pinned_timer(t, mce_timer_fn, cpu);
|
||||
mce_start_timer(cpu, t);
|
||||
mce_start_timer(t);
|
||||
}
|
||||
|
||||
/* Handle unconfigured int18 (should never happen) */
|
||||
@ -2566,7 +2559,7 @@ static int mce_cpu_dead(unsigned int cpu)
|
||||
|
||||
static int mce_cpu_online(unsigned int cpu)
|
||||
{
|
||||
struct timer_list *t = &per_cpu(mce_timer, cpu);
|
||||
struct timer_list *t = this_cpu_ptr(&mce_timer);
|
||||
int ret;
|
||||
|
||||
mce_device_create(cpu);
|
||||
@ -2577,13 +2570,13 @@ static int mce_cpu_online(unsigned int cpu)
|
||||
return ret;
|
||||
}
|
||||
mce_reenable_cpu();
|
||||
mce_start_timer(cpu, t);
|
||||
mce_start_timer(t);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mce_cpu_pre_down(unsigned int cpu)
|
||||
{
|
||||
struct timer_list *t = &per_cpu(mce_timer, cpu);
|
||||
struct timer_list *t = this_cpu_ptr(&mce_timer);
|
||||
|
||||
mce_disable_cpu();
|
||||
del_timer_sync(t);
|
||||
|
@ -384,8 +384,9 @@ void load_ucode_amd_ap(unsigned int family)
|
||||
reget:
|
||||
if (!get_builtin_microcode(&cp, family)) {
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
cp = find_cpio_data(ucode_path, (void *)initrd_start,
|
||||
initrd_end - initrd_start, NULL);
|
||||
if (!initrd_gone)
|
||||
cp = find_cpio_data(ucode_path, (void *)initrd_start,
|
||||
initrd_end - initrd_start, NULL);
|
||||
#endif
|
||||
if (!(cp.data && cp.size)) {
|
||||
/*
|
||||
|
@ -46,6 +46,8 @@
|
||||
static struct microcode_ops *microcode_ops;
|
||||
static bool dis_ucode_ldr = true;
|
||||
|
||||
bool initrd_gone;
|
||||
|
||||
LIST_HEAD(microcode_cache);
|
||||
|
||||
/*
|
||||
@ -190,21 +192,24 @@ void load_ucode_ap(void)
|
||||
static int __init save_microcode_in_initrd(void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||
int ret = -EINVAL;
|
||||
|
||||
switch (c->x86_vendor) {
|
||||
case X86_VENDOR_INTEL:
|
||||
if (c->x86 >= 6)
|
||||
return save_microcode_in_initrd_intel();
|
||||
ret = save_microcode_in_initrd_intel();
|
||||
break;
|
||||
case X86_VENDOR_AMD:
|
||||
if (c->x86 >= 0x10)
|
||||
return save_microcode_in_initrd_amd(c->x86);
|
||||
ret = save_microcode_in_initrd_amd(c->x86);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
initrd_gone = true;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
|
||||
@ -247,9 +252,16 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa)
|
||||
* has the virtual address of the beginning of the initrd. It also
|
||||
* possibly relocates the ramdisk. In either case, initrd_start contains
|
||||
* the updated address so use that instead.
|
||||
*
|
||||
* initrd_gone is for the hotplug case where we've thrown out initrd
|
||||
* already.
|
||||
*/
|
||||
if (!use_pa && initrd_start)
|
||||
start = initrd_start;
|
||||
if (!use_pa) {
|
||||
if (initrd_gone)
|
||||
return (struct cpio_data){ NULL, 0, "" };
|
||||
if (initrd_start)
|
||||
start = initrd_start;
|
||||
}
|
||||
|
||||
return find_cpio_data(path, (void *)start, size, NULL);
|
||||
#else /* !CONFIG_BLK_DEV_INITRD */
|
||||
|
@ -41,7 +41,7 @@
|
||||
|
||||
static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin";
|
||||
|
||||
/* Current microcode patch used in early patching */
|
||||
/* Current microcode patch used in early patching on the APs. */
|
||||
struct microcode_intel *intel_ucode_patch;
|
||||
|
||||
static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
|
||||
@ -607,12 +607,6 @@ int __init save_microcode_in_initrd_intel(void)
|
||||
struct ucode_cpu_info uci;
|
||||
struct cpio_data cp;
|
||||
|
||||
/*
|
||||
* AP loading didn't find any microcode patch, no need to save anything.
|
||||
*/
|
||||
if (!intel_ucode_patch || IS_ERR(intel_ucode_patch))
|
||||
return 0;
|
||||
|
||||
if (!load_builtin_intel_microcode(&cp))
|
||||
cp = find_microcode_in_initrd(ucode_path, false);
|
||||
|
||||
@ -628,7 +622,6 @@ int __init save_microcode_in_initrd_intel(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* @res_patch, output: a pointer to the patch we found.
|
||||
*/
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <asm/fpu/regset.h>
|
||||
#include <asm/fpu/signal.h>
|
||||
#include <asm/fpu/types.h>
|
||||
#include <asm/fpu/xstate.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
#include <linux/hardirq.h>
|
||||
@ -183,7 +184,8 @@ void fpstate_init(union fpregs_state *state)
|
||||
* it will #GP. Make sure it is replaced after the memset().
|
||||
*/
|
||||
if (static_cpu_has(X86_FEATURE_XSAVES))
|
||||
state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT;
|
||||
state->xsave.header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT |
|
||||
xfeatures_mask;
|
||||
|
||||
if (static_cpu_has(X86_FEATURE_FXSR))
|
||||
fpstate_init_fxstate(&state->fxsave);
|
||||
|
@ -352,6 +352,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
|
||||
} else {
|
||||
struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
|
||||
|
||||
irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
|
||||
irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
|
||||
disable_irq(hdev->irq);
|
||||
irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
|
||||
|
@ -433,9 +433,15 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||
int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
|
||||
|
||||
if (c->phys_proc_id == o->phys_proc_id &&
|
||||
per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) &&
|
||||
c->cpu_core_id == o->cpu_core_id)
|
||||
return topology_sane(c, o, "smt");
|
||||
per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
|
||||
if (c->cpu_core_id == o->cpu_core_id)
|
||||
return topology_sane(c, o, "smt");
|
||||
|
||||
if ((c->cu_id != 0xff) &&
|
||||
(o->cu_id != 0xff) &&
|
||||
(c->cu_id == o->cu_id))
|
||||
return topology_sane(c, o, "smt");
|
||||
}
|
||||
|
||||
} else if (c->phys_proc_id == o->phys_proc_id &&
|
||||
c->cpu_core_id == o->cpu_core_id) {
|
||||
|
@ -1356,6 +1356,9 @@ void __init tsc_init(void)
|
||||
(unsigned long)cpu_khz / 1000,
|
||||
(unsigned long)cpu_khz % 1000);
|
||||
|
||||
/* Sanitize TSC ADJUST before cyc2ns gets initialized */
|
||||
tsc_store_and_check_tsc_adjust(true);
|
||||
|
||||
/*
|
||||
* Secondary CPUs do not run through tsc_init(), so set up
|
||||
* all the scale factors for all CPUs, assuming the same
|
||||
@ -1386,8 +1389,6 @@ void __init tsc_init(void)
|
||||
|
||||
if (unsynchronized_tsc())
|
||||
mark_tsc_unstable("TSCs unsynchronized");
|
||||
else
|
||||
tsc_store_and_check_tsc_adjust(true);
|
||||
|
||||
check_system_tsc_reliable();
|
||||
|
||||
|
@ -286,13 +286,6 @@ void check_tsc_sync_source(int cpu)
|
||||
if (unsynchronized_tsc())
|
||||
return;
|
||||
|
||||
if (tsc_clocksource_reliable) {
|
||||
if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING)
|
||||
pr_info(
|
||||
"Skipped synchronization checks as TSC is reliable.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the maximum number of test runs to
|
||||
* 1 if the CPU does not provide the TSC_ADJUST MSR
|
||||
@ -380,14 +373,19 @@ void check_tsc_sync_target(void)
|
||||
int cpus = 2;
|
||||
|
||||
/* Also aborts if there is no TSC. */
|
||||
if (unsynchronized_tsc() || tsc_clocksource_reliable)
|
||||
if (unsynchronized_tsc())
|
||||
return;
|
||||
|
||||
/*
|
||||
* Store, verify and sanitize the TSC adjust register. If
|
||||
* successful skip the test.
|
||||
*
|
||||
* The test is also skipped when the TSC is marked reliable. This
|
||||
* is true for SoCs which have no fallback clocksource. On these
|
||||
* SoCs the TSC is frequency synchronized, but still the TSC ADJUST
|
||||
* register might have been wreckaged by the BIOS..
|
||||
*/
|
||||
if (tsc_store_and_check_tsc_adjust(false)) {
|
||||
if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable) {
|
||||
atomic_inc(&skip_test);
|
||||
return;
|
||||
}
|
||||
|
@ -3182,6 +3182,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
|
||||
memcpy(dest, xsave, XSAVE_HDR_OFFSET);
|
||||
|
||||
/* Set XSTATE_BV */
|
||||
xstate_bv &= vcpu->arch.guest_supported_xcr0 | XFEATURE_MASK_FPSSE;
|
||||
*(u64 *)(dest + XSAVE_HDR_OFFSET) = xstate_bv;
|
||||
|
||||
/*
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
@ -406,6 +407,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
|
||||
} else
|
||||
note_page(m, &st, __pgprot(0), 1);
|
||||
|
||||
cond_resched();
|
||||
start++;
|
||||
}
|
||||
|
||||
|
@ -268,6 +268,22 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
|
||||
|
||||
efi_scratch.use_pgd = true;
|
||||
|
||||
/*
|
||||
* Certain firmware versions are way too sentimential and still believe
|
||||
* they are exclusive and unquestionable owners of the first physical page,
|
||||
* even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
|
||||
* (but then write-access it later during SetVirtualAddressMap()).
|
||||
*
|
||||
* Create a 1:1 mapping for this page, to avoid triple faults during early
|
||||
* boot with such firmware. We are free to hand this page to the BIOS,
|
||||
* as trim_bios_range() will reserve the first page and isolate it away
|
||||
* from memory allocators anyway.
|
||||
*/
|
||||
if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, _PAGE_RW)) {
|
||||
pr_err("Failed to create 1:1 mapping for the first page!\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* When making calls to the firmware everything needs to be 1:1
|
||||
* mapped and addressable with 32-bit pointers. Map the kernel
|
||||
|
@ -419,7 +419,7 @@ subsys_initcall(topology_init);
|
||||
|
||||
void cpu_reset(void)
|
||||
{
|
||||
#if XCHAL_HAVE_PTP_MMU
|
||||
#if XCHAL_HAVE_PTP_MMU && IS_ENABLED(CONFIG_MMU)
|
||||
local_irq_disable();
|
||||
/*
|
||||
* We have full MMU: all autoload ways, ways 7, 8 and 9 of DTLB must
|
||||
|
@ -306,11 +306,6 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
|
||||
if (ret == 0 || (ret && ret != -EOPNOTSUPP))
|
||||
goto out;
|
||||
|
||||
ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
|
||||
ZERO_PAGE(0), biop);
|
||||
if (ret == 0 || (ret && ret != -EOPNOTSUPP))
|
||||
goto out;
|
||||
|
||||
ret = 0;
|
||||
while (nr_sects != 0) {
|
||||
bio = next_bio(bio, min(nr_sects, (sector_t)BIO_MAX_PAGES),
|
||||
@ -369,6 +364,10 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask,
|
||||
ZERO_PAGE(0)))
|
||||
return 0;
|
||||
|
||||
blk_start_plug(&plug);
|
||||
ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
|
||||
&bio, discard);
|
||||
|
@ -356,6 +356,7 @@ int crypto_register_alg(struct crypto_alg *alg)
|
||||
struct crypto_larval *larval;
|
||||
int err;
|
||||
|
||||
alg->cra_flags &= ~CRYPTO_ALG_DEAD;
|
||||
err = crypto_check_alg(alg);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -661,9 +661,9 @@ static int aead_recvmsg_sync(struct socket *sock, struct msghdr *msg, int flags)
|
||||
unlock:
|
||||
list_for_each_entry_safe(rsgl, tmp, &ctx->list, list) {
|
||||
af_alg_free_sg(&rsgl->sgl);
|
||||
list_del(&rsgl->list);
|
||||
if (rsgl != &ctx->first_rsgl)
|
||||
sock_kfree_s(sk, rsgl, sizeof(*rsgl));
|
||||
list_del(&rsgl->list);
|
||||
}
|
||||
INIT_LIST_HEAD(&ctx->list);
|
||||
aead_wmem_wakeup(sk);
|
||||
|
@ -2704,6 +2704,7 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
|
||||
struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
|
||||
struct device *dev = acpi_desc->dev;
|
||||
struct acpi_nfit_flush_work flush;
|
||||
int rc;
|
||||
|
||||
/* bounce the device lock to flush acpi_nfit_add / acpi_nfit_notify */
|
||||
device_lock(dev);
|
||||
@ -2716,7 +2717,10 @@ static int acpi_nfit_flush_probe(struct nvdimm_bus_descriptor *nd_desc)
|
||||
INIT_WORK_ONSTACK(&flush.work, flush_probe);
|
||||
COMPLETION_INITIALIZER_ONSTACK(flush.cmp);
|
||||
queue_work(nfit_wq, &flush.work);
|
||||
return wait_for_completion_interruptible(&flush.cmp);
|
||||
|
||||
rc = wait_for_completion_interruptible(&flush.cmp);
|
||||
cancel_work_sync(&flush.work);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
|
||||
|
@ -1702,6 +1702,8 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
|
||||
|
||||
if (qc->err_mask & ~AC_ERR_OTHER)
|
||||
qc->err_mask &= ~AC_ERR_OTHER;
|
||||
} else if (qc->tf.command == ATA_CMD_REQ_SENSE_DATA) {
|
||||
qc->result_tf.command |= ATA_SENSE;
|
||||
}
|
||||
|
||||
/* finish up */
|
||||
@ -4356,10 +4358,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
||||
{ "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
|
||||
|
||||
/*
|
||||
* Device times out with higher max sects.
|
||||
* These devices time out with higher max sects.
|
||||
* https://bugzilla.kernel.org/show_bug.cgi?id=121671
|
||||
*/
|
||||
{ "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
|
||||
{ "LITEON CX1-JB*-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 },
|
||||
|
||||
/* Devices we expect to fail diagnostics */
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user