forked from Minki/linux
irqchip updates for 5.18
- Add support for the STM32MP13 variant - Move parent device away from struct irq_chip - Remove all instances of non-const strings assigned to struct irq_chip::name, enabling a nice cleanup for VIC and GIC) - Simplify the Qualcomm PDC driver - A bunch of SiFive PLIC cleanups - Add support for a new variant of the Meson GPIO block - Add support for the irqchip side of the Apple M1 PMU - Add support for the Apple M1 Pro/Max AICv2 irqchip - Add support for the Qualcomm MPM wakeup gadget - Move the Xilinx driver over to the generic irqdomain handling - Tiny speedup for IPIs on GICv3 systems - The usual odd cleanups -----BEGIN PGP SIGNATURE----- iQJDBAABCgAtFiEEn9UcU+C1Yxj9lZw9I9DQutE9ekMFAmItxJMPHG1hekBrZXJu ZWwub3JnAAoJECPQ0LrRPXpD7ccQAIkkNoC6yQ+9lhbdRrlo6KUtUT2apDheIF+5 Yfo7dTeKMUb4NpQs+b4v01A0B3KSLPuwTulWfGXhsLRXVcfEEnkBCQzy/IQnkYTQ DDvxENRz40SS0WJF1G74a7KsqHt+epyHZkB6KJQV4BYrZKxt2h0tWNSiNf1IDN/e 9mZq2kLgEk0kfRCR9u6NYGMugbrgbdtiLgwBARKdRtAAkjBlGEtC2slp0a3WTsyg QfnMWMOK22wa34eZzFG8VrJMVwGyeqMP/ZW30EoClBzPyLUM5aZWRr+LSvLYQC4n ho6ua1+a2726TBT6vtWNi0KDNcXwhL6JheO4m2bCoWPvu4YengfKQ5QllAFvSR3W e4oT/xwkBcf+n5ehXEfxqTRRxG398oWYI60kX586dIcr9qN9WBsw1S5aPkDeZ+nT 6THbQ5uZrIqkeWOoJmvg+iwKkE/NQY/xUENW0zeG2f4/YLIGeKK7e1/XCl1jqzlk vIvf/bYr64TgOvvHhIeh1G5iXQnk1TWoCzW0DQ8BIXhjlbVRG39QuvwjXKok4AhK QgKMi6N1ge4nKO1gcYbR174gDz+MylZP41ddDACVXT/5hzsfyxLF36ixdyMLKwtr Lybb4PGB5Pf0Zgxu6cVWeVsEZEwtlMCmIi1XUW4YRv2saypTPD5V78Ug6jbyPMXE G7J5dxwS =cf1B -----END PGP SIGNATURE----- Merge tag 'irqchip-5.18' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into irq/core Pull irqchip updates from Marc Zyngier: - Add support for the STM32MP13 variant - Move parent device away from struct irq_chip - Remove all instances of non-const strings assigned to struct irq_chip::name, enabling a nice cleanup for VIC and GIC) - Simplify the Qualcomm PDC driver - A bunch of SiFive PLIC cleanups - Add support for a new variant of the Meson GPIO block - Add support for the irqchip side of the Apple M1 PMU - Add support for the Apple M1 Pro/Max AICv2 irqchip - Add support for the Qualcomm MPM wakeup gadget - Move the Xilinx driver over to the generic irqdomain handling - Tiny speedup for IPIs on GICv3 systems - The usual odd cleanups Link: https://lore.kernel.org/all/20220313105142.704579-1-maz@kernel.org
This commit is contained in:
commit
f0fae8a0ed
@ -20,6 +20,8 @@ properties:
|
||||
items:
|
||||
- enum:
|
||||
- apm,potenza-pmu
|
||||
- apple,firestorm-pmu
|
||||
- apple,icestorm-pmu
|
||||
- arm,armv8-pmuv3 # Only for s/w models
|
||||
- arm,arm1136-pmu
|
||||
- arm,arm1176-pmu
|
||||
|
@ -18,6 +18,7 @@ Required properties:
|
||||
"amlogic,meson-g12a-gpio-intc" for G12A SoCs (S905D2, S905X2, S905Y2)
|
||||
"amlogic,meson-sm1-gpio-intc" for SM1 SoCs (S905D3, S905X3, S905Y3)
|
||||
"amlogic,meson-a1-gpio-intc" for A1 SoCs (A113L)
|
||||
"amlogic,meson-s4-gpio-intc" for S4 SoCs (S802X2, S905Y4, S805X2G, S905W2)
|
||||
- reg : Specifies base physical address and size of the registers.
|
||||
- interrupt-controller : Identifies the node as an interrupt controller.
|
||||
- #interrupt-cells : Specifies the number of cells needed to encode an
|
||||
|
@ -56,6 +56,8 @@ properties:
|
||||
- 1: virtual HV timer
|
||||
- 2: physical guest timer
|
||||
- 3: virtual guest timer
|
||||
- 4: 'efficient' CPU PMU
|
||||
- 5: 'performance' CPU PMU
|
||||
|
||||
The 3rd cell contains the interrupt flags. This is normally
|
||||
IRQ_TYPE_LEVEL_HIGH (4).
|
||||
@ -68,6 +70,35 @@ properties:
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
affinities:
|
||||
type: object
|
||||
additionalProperties: false
|
||||
description:
|
||||
FIQ affinity can be expressed as a single "affinities" node,
|
||||
containing a set of sub-nodes, one per FIQ with a non-default
|
||||
affinity.
|
||||
patternProperties:
|
||||
"^.+-affinity$":
|
||||
type: object
|
||||
additionalProperties: false
|
||||
properties:
|
||||
apple,fiq-index:
|
||||
description:
|
||||
The interrupt number specified as a FIQ, and for which
|
||||
the affinity is not the default.
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
maximum: 5
|
||||
|
||||
cpus:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle-array
|
||||
description:
|
||||
Should be a list of phandles to CPU nodes (as described in
|
||||
Documentation/devicetree/bindings/arm/cpus.yaml).
|
||||
|
||||
required:
|
||||
- fiq-index
|
||||
- cpus
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- '#interrupt-cells'
|
||||
|
@ -0,0 +1,98 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/interrupt-controller/apple,aic2.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Apple Interrupt Controller 2
|
||||
|
||||
maintainers:
|
||||
- Hector Martin <marcan@marcan.st>
|
||||
|
||||
description: |
|
||||
The Apple Interrupt Controller 2 is a simple interrupt controller present on
|
||||
Apple ARM SoC platforms starting with t600x (M1 Pro and Max).
|
||||
|
||||
It provides the following features:
|
||||
|
||||
- Level-triggered hardware IRQs wired to SoC blocks
|
||||
- Single mask bit per IRQ
|
||||
- Automatic masking on event delivery (auto-ack)
|
||||
- Software triggering (ORed with hw line)
|
||||
- Automatic prioritization (single event/ack register per CPU, lower IRQs =
|
||||
higher priority)
|
||||
- Automatic masking on ack
|
||||
- Support for multiple dies
|
||||
|
||||
This device also represents the FIQ interrupt sources on platforms using AIC,
|
||||
which do not go through a discrete interrupt controller. It also handles
|
||||
FIQ-based Fast IPIs.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- const: apple,t6000-aic
|
||||
- const: apple,aic2
|
||||
|
||||
interrupt-controller: true
|
||||
|
||||
'#interrupt-cells':
|
||||
const: 4
|
||||
description: |
|
||||
The 1st cell contains the interrupt type:
|
||||
- 0: Hardware IRQ
|
||||
- 1: FIQ
|
||||
|
||||
The 2nd cell contains the die ID.
|
||||
|
||||
The next cell contains the interrupt number.
|
||||
- HW IRQs: interrupt number
|
||||
- FIQs:
|
||||
- 0: physical HV timer
|
||||
- 1: virtual HV timer
|
||||
- 2: physical guest timer
|
||||
- 3: virtual guest timer
|
||||
|
||||
The last cell contains the interrupt flags. This is normally
|
||||
IRQ_TYPE_LEVEL_HIGH (4).
|
||||
|
||||
reg:
|
||||
items:
|
||||
- description: Address and size of the main AIC2 registers.
|
||||
- description: Address and size of the AIC2 Event register.
|
||||
|
||||
reg-names:
|
||||
items:
|
||||
- const: core
|
||||
- const: event
|
||||
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- '#interrupt-cells'
|
||||
- interrupt-controller
|
||||
- reg
|
||||
- reg-names
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
allOf:
|
||||
- $ref: /schemas/interrupt-controller.yaml#
|
||||
|
||||
examples:
|
||||
- |
|
||||
soc {
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
|
||||
aic: interrupt-controller@28e100000 {
|
||||
compatible = "apple,t6000-aic", "apple,aic2";
|
||||
#interrupt-cells = <4>;
|
||||
interrupt-controller;
|
||||
reg = <0x2 0x8e100000 0x0 0xc000>,
|
||||
<0x2 0x8e10c000 0x0 0x4>;
|
||||
reg-names = "core", "event";
|
||||
};
|
||||
};
|
@ -0,0 +1,96 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/interrupt-controller/qcom,mpm.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Qualcom MPM Interrupt Controller
|
||||
|
||||
maintainers:
|
||||
- Shawn Guo <shawn.guo@linaro.org>
|
||||
|
||||
description:
|
||||
Qualcomm Technologies Inc. SoCs based on the RPM architecture have a
|
||||
MSM Power Manager (MPM) that is in always-on domain. In addition to managing
|
||||
resources during sleep, the hardware also has an interrupt controller that
|
||||
monitors the interrupts when the system is asleep, wakes up the APSS when
|
||||
one of these interrupts occur and replays it to GIC interrupt controller
|
||||
after GIC becomes operational.
|
||||
|
||||
allOf:
|
||||
- $ref: /schemas/interrupt-controller.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- const: qcom,mpm
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
description:
|
||||
Specifies the base address and size of vMPM registers in RPM MSG RAM.
|
||||
|
||||
interrupts:
|
||||
maxItems: 1
|
||||
description:
|
||||
Specify the IRQ used by RPM to wakeup APSS.
|
||||
|
||||
mboxes:
|
||||
maxItems: 1
|
||||
description:
|
||||
Specify the mailbox used to notify RPM for writing vMPM registers.
|
||||
|
||||
interrupt-controller: true
|
||||
|
||||
'#interrupt-cells':
|
||||
const: 2
|
||||
description:
|
||||
The first cell is the MPM pin number for the interrupt, and the second
|
||||
is the trigger type.
|
||||
|
||||
qcom,mpm-pin-count:
|
||||
description:
|
||||
Specify the total MPM pin count that a SoC supports.
|
||||
$ref: /schemas/types.yaml#/definitions/uint32
|
||||
|
||||
qcom,mpm-pin-map:
|
||||
description:
|
||||
A set of MPM pin numbers and the corresponding GIC SPIs.
|
||||
$ref: /schemas/types.yaml#/definitions/uint32-matrix
|
||||
items:
|
||||
items:
|
||||
- description: MPM pin number
|
||||
- description: GIC SPI number for the MPM pin
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- interrupts
|
||||
- mboxes
|
||||
- interrupt-controller
|
||||
- '#interrupt-cells'
|
||||
- qcom,mpm-pin-count
|
||||
- qcom,mpm-pin-map
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/interrupt-controller/arm-gic.h>
|
||||
mpm: interrupt-controller@45f01b8 {
|
||||
compatible = "qcom,mpm";
|
||||
interrupts = <GIC_SPI 197 IRQ_TYPE_EDGE_RISING>;
|
||||
reg = <0x45f01b8 0x1000>;
|
||||
mboxes = <&apcs_glb 1>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
interrupt-parent = <&intc>;
|
||||
qcom,mpm-pin-count = <96>;
|
||||
qcom,mpm-pin-map = <2 275>,
|
||||
<5 296>,
|
||||
<12 422>,
|
||||
<24 79>,
|
||||
<86 183>,
|
||||
<90 260>,
|
||||
<91 260>;
|
||||
};
|
@ -20,6 +20,7 @@ properties:
|
||||
- items:
|
||||
- enum:
|
||||
- st,stm32mp1-exti
|
||||
- st,stm32mp13-exti
|
||||
- const: syscon
|
||||
|
||||
"#interrupt-cells":
|
||||
|
@ -1769,7 +1769,7 @@ T: git https://github.com/AsahiLinux/linux.git
|
||||
F: Documentation/devicetree/bindings/arm/apple.yaml
|
||||
F: Documentation/devicetree/bindings/arm/apple/*
|
||||
F: Documentation/devicetree/bindings/i2c/apple,i2c.yaml
|
||||
F: Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml
|
||||
F: Documentation/devicetree/bindings/interrupt-controller/apple,*
|
||||
F: Documentation/devicetree/bindings/mailbox/apple,mailbox.yaml
|
||||
F: Documentation/devicetree/bindings/pci/apple,pcie.yaml
|
||||
F: Documentation/devicetree/bindings/pinctrl/apple,pinctrl.yaml
|
||||
|
@ -97,6 +97,18 @@
|
||||
<AIC_FIQ AIC_TMR_HV_VIRT IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
|
||||
pmu-e {
|
||||
compatible = "apple,icestorm-pmu";
|
||||
interrupt-parent = <&aic>;
|
||||
interrupts = <AIC_FIQ AIC_CPU_PMU_E IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
|
||||
pmu-p {
|
||||
compatible = "apple,firestorm-pmu";
|
||||
interrupt-parent = <&aic>;
|
||||
interrupts = <AIC_FIQ AIC_CPU_PMU_P IRQ_TYPE_LEVEL_HIGH>;
|
||||
};
|
||||
|
||||
clkref: clock-ref {
|
||||
compatible = "fixed-clock";
|
||||
#clock-cells = <0>;
|
||||
@ -213,6 +225,18 @@
|
||||
interrupt-controller;
|
||||
reg = <0x2 0x3b100000 0x0 0x8000>;
|
||||
power-domains = <&ps_aic>;
|
||||
|
||||
affinities {
|
||||
e-core-pmu-affinity {
|
||||
apple,fiq-index = <AIC_CPU_PMU_E>;
|
||||
cpus = <&cpu0 &cpu1 &cpu2 &cpu3>;
|
||||
};
|
||||
|
||||
p-core-pmu-affinity {
|
||||
apple,fiq-index = <AIC_CPU_PMU_P>;
|
||||
cpus = <&cpu4 &cpu5 &cpu6 &cpu7>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
pmgr: power-management@23b700000 {
|
||||
|
19
arch/arm64/include/asm/apple_m1_pmu.h
Normal file
19
arch/arm64/include/asm/apple_m1_pmu.h
Normal file
@ -0,0 +1,19 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#ifndef __ASM_APPLE_M1_PMU_h
|
||||
#define __ASM_APPLE_M1_PMU_h
|
||||
|
||||
#include <linux/bits.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
/* Core PMC control register */
|
||||
#define SYS_IMP_APL_PMCR0_EL1 sys_reg(3, 1, 15, 0, 0)
|
||||
#define PMCR0_IMODE GENMASK(10, 8)
|
||||
#define PMCR0_IMODE_OFF 0
|
||||
#define PMCR0_IMODE_PMI 1
|
||||
#define PMCR0_IMODE_AIC 2
|
||||
#define PMCR0_IMODE_HALT 3
|
||||
#define PMCR0_IMODE_FIQ 4
|
||||
#define PMCR0_IACT BIT(11)
|
||||
|
||||
#endif /* __ASM_APPLE_M1_PMU_h */
|
@ -45,6 +45,8 @@ config MICROBLAZE
|
||||
select SET_FS
|
||||
select ZONE_DMA
|
||||
select TRACE_IRQFLAGS_SUPPORT
|
||||
select GENERIC_IRQ_MULTI_HANDLER
|
||||
select HANDLE_DOMAIN_IRQ
|
||||
|
||||
# Endianness selection
|
||||
choice
|
||||
|
@ -11,7 +11,4 @@
|
||||
struct pt_regs;
|
||||
extern void do_IRQ(struct pt_regs *regs);
|
||||
|
||||
/* should be defined in each interrupt controller driver */
|
||||
extern unsigned int xintc_get_irq(void);
|
||||
|
||||
#endif /* _ASM_MICROBLAZE_IRQ_H */
|
||||
|
@ -20,27 +20,13 @@
|
||||
#include <linux/irqchip.h>
|
||||
#include <linux/of_irq.h>
|
||||
|
||||
static u32 concurrent_irq;
|
||||
|
||||
void __irq_entry do_IRQ(struct pt_regs *regs)
|
||||
{
|
||||
unsigned int irq;
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
trace_hardirqs_off();
|
||||
|
||||
irq_enter();
|
||||
irq = xintc_get_irq();
|
||||
next_irq:
|
||||
BUG_ON(!irq);
|
||||
generic_handle_irq(irq);
|
||||
|
||||
irq = xintc_get_irq();
|
||||
if (irq != -1U) {
|
||||
pr_debug("next irq: %d\n", irq);
|
||||
++concurrent_irq;
|
||||
goto next_irq;
|
||||
}
|
||||
|
||||
handle_arch_irq(regs);
|
||||
irq_exit();
|
||||
set_irq_regs(old_regs);
|
||||
trace_hardirqs_on();
|
||||
|
@ -239,7 +239,6 @@ mediatek_gpio_bank_probe(struct device *dev, int bank)
|
||||
|
||||
rg->chip.offset = bank * MTK_BANK_WIDTH;
|
||||
rg->irq_chip.name = dev_name(dev);
|
||||
rg->irq_chip.parent_device = dev;
|
||||
rg->irq_chip.irq_unmask = mediatek_gpio_irq_unmask;
|
||||
rg->irq_chip.irq_mask = mediatek_gpio_irq_mask;
|
||||
rg->irq_chip.irq_mask_ack = mediatek_gpio_irq_mask;
|
||||
|
@ -986,7 +986,8 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
|
||||
writel_relaxed(0, base + bank->regs->ctrl);
|
||||
}
|
||||
|
||||
static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
|
||||
static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc,
|
||||
struct device *pm_dev)
|
||||
{
|
||||
struct gpio_irq_chip *irq;
|
||||
static int gpio;
|
||||
@ -1052,6 +1053,7 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
|
||||
if (ret)
|
||||
return dev_err_probe(bank->chip.parent, ret, "Could not register gpio chip\n");
|
||||
|
||||
irq_domain_set_pm_device(bank->chip.irq.domain, pm_dev);
|
||||
ret = devm_request_irq(bank->chip.parent, bank->irq,
|
||||
omap_gpio_irq_handler,
|
||||
0, dev_name(bank->chip.parent), bank);
|
||||
@ -1402,7 +1404,6 @@ static int omap_gpio_probe(struct platform_device *pdev)
|
||||
irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock,
|
||||
irqc->name = dev_name(&pdev->dev);
|
||||
irqc->flags = IRQCHIP_MASK_ON_SUSPEND;
|
||||
irqc->parent_device = dev;
|
||||
|
||||
bank->irq = platform_get_irq(pdev, 0);
|
||||
if (bank->irq <= 0) {
|
||||
@ -1466,7 +1467,7 @@ static int omap_gpio_probe(struct platform_device *pdev)
|
||||
|
||||
omap_gpio_mod_init(bank);
|
||||
|
||||
ret = omap_gpio_chip_init(bank, irqc);
|
||||
ret = omap_gpio_chip_init(bank, irqc, dev);
|
||||
if (ret) {
|
||||
pm_runtime_put_sync(dev);
|
||||
pm_runtime_disable(dev);
|
||||
|
@ -530,7 +530,6 @@ static int gpio_rcar_probe(struct platform_device *pdev)
|
||||
|
||||
irq_chip = &p->irq_chip;
|
||||
irq_chip->name = "gpio-rcar";
|
||||
irq_chip->parent_device = dev;
|
||||
irq_chip->irq_mask = gpio_rcar_irq_disable;
|
||||
irq_chip->irq_unmask = gpio_rcar_irq_enable;
|
||||
irq_chip->irq_set_type = gpio_rcar_irq_set_type;
|
||||
@ -552,6 +551,7 @@ static int gpio_rcar_probe(struct platform_device *pdev)
|
||||
goto err0;
|
||||
}
|
||||
|
||||
irq_domain_set_pm_device(gpio_chip->irq.domain, dev);
|
||||
ret = devm_request_irq(dev, p->irq_parent, gpio_rcar_irq_handler,
|
||||
IRQF_SHARED, name, p);
|
||||
if (ret) {
|
||||
|
@ -281,7 +281,6 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
|
||||
u8 irq_status;
|
||||
|
||||
irq_chip->name = chip->label;
|
||||
irq_chip->parent_device = &pdev->dev;
|
||||
irq_chip->irq_mask = tqmx86_gpio_irq_mask;
|
||||
irq_chip->irq_unmask = tqmx86_gpio_irq_unmask;
|
||||
irq_chip->irq_set_type = tqmx86_gpio_irq_set_type;
|
||||
@ -316,6 +315,8 @@ static int tqmx86_gpio_probe(struct platform_device *pdev)
|
||||
goto out_pm_dis;
|
||||
}
|
||||
|
||||
irq_domain_set_pm_device(girq->domain, dev);
|
||||
|
||||
dev_info(dev, "GPIO functionality initialized with %d pins\n",
|
||||
chip->ngpio);
|
||||
|
||||
|
@ -430,6 +430,14 @@ config QCOM_PDC
|
||||
Power Domain Controller driver to manage and configure wakeup
|
||||
IRQs for Qualcomm Technologies Inc (QTI) mobile chips.
|
||||
|
||||
config QCOM_MPM
|
||||
tristate "QCOM MPM"
|
||||
depends on ARCH_QCOM
|
||||
select IRQ_DOMAIN_HIERARCHY
|
||||
help
|
||||
MSM Power Manager driver to manage and configure wakeup
|
||||
IRQs for Qualcomm Technologies Inc (QTI) mobile chips.
|
||||
|
||||
config CSKY_MPINTC
|
||||
bool
|
||||
depends on CSKY
|
||||
|
@ -94,6 +94,7 @@ obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o
|
||||
obj-$(CONFIG_GOLDFISH_PIC) += irq-goldfish-pic.o
|
||||
obj-$(CONFIG_NDS32) += irq-ativic32.o
|
||||
obj-$(CONFIG_QCOM_PDC) += qcom-pdc.o
|
||||
obj-$(CONFIG_QCOM_MPM) += irq-qcom-mpm.o
|
||||
obj-$(CONFIG_CSKY_MPINTC) += irq-csky-mpintc.o
|
||||
obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o
|
||||
obj-$(CONFIG_RISCV_INTC) += irq-riscv-intc.o
|
||||
|
@ -24,7 +24,7 @@
|
||||
* - Default "this CPU" register view and explicit per-CPU views
|
||||
*
|
||||
* In addition, this driver also handles FIQs, as these are routed to the same
|
||||
* IRQ vector. These are used for Fast IPIs (TODO), the ARMv8 timer IRQs, and
|
||||
* IRQ vector. These are used for Fast IPIs, the ARMv8 timer IRQs, and
|
||||
* performance counters (TODO).
|
||||
*
|
||||
* Implementation notes:
|
||||
@ -52,9 +52,12 @@
|
||||
#include <linux/irqchip.h>
|
||||
#include <linux/irqchip/arm-vgic-info.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/limits.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/apple_m1_pmu.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/exception.h>
|
||||
#include <asm/sysreg.h>
|
||||
#include <asm/virt.h>
|
||||
@ -62,20 +65,22 @@
|
||||
#include <dt-bindings/interrupt-controller/apple-aic.h>
|
||||
|
||||
/*
|
||||
* AIC registers (MMIO)
|
||||
* AIC v1 registers (MMIO)
|
||||
*/
|
||||
|
||||
#define AIC_INFO 0x0004
|
||||
#define AIC_INFO_NR_HW GENMASK(15, 0)
|
||||
#define AIC_INFO_NR_IRQ GENMASK(15, 0)
|
||||
|
||||
#define AIC_CONFIG 0x0010
|
||||
|
||||
#define AIC_WHOAMI 0x2000
|
||||
#define AIC_EVENT 0x2004
|
||||
#define AIC_EVENT_TYPE GENMASK(31, 16)
|
||||
#define AIC_EVENT_DIE GENMASK(31, 24)
|
||||
#define AIC_EVENT_TYPE GENMASK(23, 16)
|
||||
#define AIC_EVENT_NUM GENMASK(15, 0)
|
||||
|
||||
#define AIC_EVENT_TYPE_HW 1
|
||||
#define AIC_EVENT_TYPE_FIQ 0 /* Software use */
|
||||
#define AIC_EVENT_TYPE_IRQ 1
|
||||
#define AIC_EVENT_TYPE_IPI 4
|
||||
#define AIC_EVENT_IPI_OTHER 1
|
||||
#define AIC_EVENT_IPI_SELF 2
|
||||
@ -91,34 +96,73 @@
|
||||
#define AIC_IPI_SELF BIT(31)
|
||||
|
||||
#define AIC_TARGET_CPU 0x3000
|
||||
#define AIC_SW_SET 0x4000
|
||||
#define AIC_SW_CLR 0x4080
|
||||
#define AIC_MASK_SET 0x4100
|
||||
#define AIC_MASK_CLR 0x4180
|
||||
|
||||
#define AIC_CPU_IPI_SET(cpu) (0x5008 + ((cpu) << 7))
|
||||
#define AIC_CPU_IPI_CLR(cpu) (0x500c + ((cpu) << 7))
|
||||
#define AIC_CPU_IPI_MASK_SET(cpu) (0x5024 + ((cpu) << 7))
|
||||
#define AIC_CPU_IPI_MASK_CLR(cpu) (0x5028 + ((cpu) << 7))
|
||||
|
||||
#define AIC_MAX_IRQ 0x400
|
||||
|
||||
/*
|
||||
* AIC v2 registers (MMIO)
|
||||
*/
|
||||
|
||||
#define AIC2_VERSION 0x0000
|
||||
#define AIC2_VERSION_VER GENMASK(7, 0)
|
||||
|
||||
#define AIC2_INFO1 0x0004
|
||||
#define AIC2_INFO1_NR_IRQ GENMASK(15, 0)
|
||||
#define AIC2_INFO1_LAST_DIE GENMASK(27, 24)
|
||||
|
||||
#define AIC2_INFO2 0x0008
|
||||
|
||||
#define AIC2_INFO3 0x000c
|
||||
#define AIC2_INFO3_MAX_IRQ GENMASK(15, 0)
|
||||
#define AIC2_INFO3_MAX_DIE GENMASK(27, 24)
|
||||
|
||||
#define AIC2_RESET 0x0010
|
||||
#define AIC2_RESET_RESET BIT(0)
|
||||
|
||||
#define AIC2_CONFIG 0x0014
|
||||
#define AIC2_CONFIG_ENABLE BIT(0)
|
||||
#define AIC2_CONFIG_PREFER_PCPU BIT(28)
|
||||
|
||||
#define AIC2_TIMEOUT 0x0028
|
||||
#define AIC2_CLUSTER_PRIO 0x0030
|
||||
#define AIC2_DELAY_GROUPS 0x0100
|
||||
|
||||
#define AIC2_IRQ_CFG 0x2000
|
||||
|
||||
/*
|
||||
* AIC2 registers are laid out like this, starting at AIC2_IRQ_CFG:
|
||||
*
|
||||
* Repeat for each die:
|
||||
* IRQ_CFG: u32 * MAX_IRQS
|
||||
* SW_SET: u32 * (MAX_IRQS / 32)
|
||||
* SW_CLR: u32 * (MAX_IRQS / 32)
|
||||
* MASK_SET: u32 * (MAX_IRQS / 32)
|
||||
* MASK_CLR: u32 * (MAX_IRQS / 32)
|
||||
* HW_STATE: u32 * (MAX_IRQS / 32)
|
||||
*
|
||||
* This is followed by a set of event registers, each 16K page aligned.
|
||||
* The first one is the AP event register we will use. Unfortunately,
|
||||
* the actual implemented die count is not specified anywhere in the
|
||||
* capability registers, so we have to explicitly specify the event
|
||||
* register as a second reg entry in the device tree to remain
|
||||
* forward-compatible.
|
||||
*/
|
||||
|
||||
#define AIC2_IRQ_CFG_TARGET GENMASK(3, 0)
|
||||
#define AIC2_IRQ_CFG_DELAY_IDX GENMASK(7, 5)
|
||||
|
||||
#define MASK_REG(x) (4 * ((x) >> 5))
|
||||
#define MASK_BIT(x) BIT((x) & GENMASK(4, 0))
|
||||
|
||||
/*
|
||||
* IMP-DEF sysregs that control FIQ sources
|
||||
* Note: sysreg-based IPIs are not supported yet.
|
||||
*/
|
||||
|
||||
/* Core PMC control register */
|
||||
#define SYS_IMP_APL_PMCR0_EL1 sys_reg(3, 1, 15, 0, 0)
|
||||
#define PMCR0_IMODE GENMASK(10, 8)
|
||||
#define PMCR0_IMODE_OFF 0
|
||||
#define PMCR0_IMODE_PMI 1
|
||||
#define PMCR0_IMODE_AIC 2
|
||||
#define PMCR0_IMODE_HALT 3
|
||||
#define PMCR0_IMODE_FIQ 4
|
||||
#define PMCR0_IACT BIT(11)
|
||||
|
||||
/* IPI request registers */
|
||||
#define SYS_IMP_APL_IPI_RR_LOCAL_EL1 sys_reg(3, 5, 15, 0, 0)
|
||||
#define SYS_IMP_APL_IPI_RR_GLOBAL_EL1 sys_reg(3, 5, 15, 0, 1)
|
||||
@ -155,7 +199,18 @@
|
||||
#define SYS_IMP_APL_UPMSR_EL1 sys_reg(3, 7, 15, 6, 4)
|
||||
#define UPMSR_IACT BIT(0)
|
||||
|
||||
#define AIC_NR_FIQ 4
|
||||
/* MPIDR fields */
|
||||
#define MPIDR_CPU(x) MPIDR_AFFINITY_LEVEL(x, 0)
|
||||
#define MPIDR_CLUSTER(x) MPIDR_AFFINITY_LEVEL(x, 1)
|
||||
|
||||
#define AIC_IRQ_HWIRQ(die, irq) (FIELD_PREP(AIC_EVENT_DIE, die) | \
|
||||
FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_IRQ) | \
|
||||
FIELD_PREP(AIC_EVENT_NUM, irq))
|
||||
#define AIC_FIQ_HWIRQ(x) (FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_FIQ) | \
|
||||
FIELD_PREP(AIC_EVENT_NUM, x))
|
||||
#define AIC_HWIRQ_IRQ(x) FIELD_GET(AIC_EVENT_NUM, x)
|
||||
#define AIC_HWIRQ_DIE(x) FIELD_GET(AIC_EVENT_DIE, x)
|
||||
#define AIC_NR_FIQ 6
|
||||
#define AIC_NR_SWIPI 32
|
||||
|
||||
/*
|
||||
@ -173,11 +228,81 @@
|
||||
#define AIC_TMR_EL02_PHYS AIC_TMR_GUEST_PHYS
|
||||
#define AIC_TMR_EL02_VIRT AIC_TMR_GUEST_VIRT
|
||||
|
||||
DEFINE_STATIC_KEY_TRUE(use_fast_ipi);
|
||||
|
||||
struct aic_info {
|
||||
int version;
|
||||
|
||||
/* Register offsets */
|
||||
u32 event;
|
||||
u32 target_cpu;
|
||||
u32 irq_cfg;
|
||||
u32 sw_set;
|
||||
u32 sw_clr;
|
||||
u32 mask_set;
|
||||
u32 mask_clr;
|
||||
|
||||
u32 die_stride;
|
||||
|
||||
/* Features */
|
||||
bool fast_ipi;
|
||||
};
|
||||
|
||||
static const struct aic_info aic1_info = {
|
||||
.version = 1,
|
||||
|
||||
.event = AIC_EVENT,
|
||||
.target_cpu = AIC_TARGET_CPU,
|
||||
};
|
||||
|
||||
static const struct aic_info aic1_fipi_info = {
|
||||
.version = 1,
|
||||
|
||||
.event = AIC_EVENT,
|
||||
.target_cpu = AIC_TARGET_CPU,
|
||||
|
||||
.fast_ipi = true,
|
||||
};
|
||||
|
||||
static const struct aic_info aic2_info = {
|
||||
.version = 2,
|
||||
|
||||
.irq_cfg = AIC2_IRQ_CFG,
|
||||
|
||||
.fast_ipi = true,
|
||||
};
|
||||
|
||||
static const struct of_device_id aic_info_match[] = {
|
||||
{
|
||||
.compatible = "apple,t8103-aic",
|
||||
.data = &aic1_fipi_info,
|
||||
},
|
||||
{
|
||||
.compatible = "apple,aic",
|
||||
.data = &aic1_info,
|
||||
},
|
||||
{
|
||||
.compatible = "apple,aic2",
|
||||
.data = &aic2_info,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
struct aic_irq_chip {
|
||||
void __iomem *base;
|
||||
void __iomem *event;
|
||||
struct irq_domain *hw_domain;
|
||||
struct irq_domain *ipi_domain;
|
||||
int nr_hw;
|
||||
struct {
|
||||
cpumask_t aff;
|
||||
} *fiq_aff[AIC_NR_FIQ];
|
||||
|
||||
int nr_irq;
|
||||
int max_irq;
|
||||
int nr_die;
|
||||
int max_die;
|
||||
|
||||
struct aic_info info;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked);
|
||||
@ -205,18 +330,24 @@ static void aic_ic_write(struct aic_irq_chip *ic, u32 reg, u32 val)
|
||||
|
||||
static void aic_irq_mask(struct irq_data *d)
|
||||
{
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
||||
struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
|
||||
|
||||
aic_ic_write(ic, AIC_MASK_SET + MASK_REG(irqd_to_hwirq(d)),
|
||||
MASK_BIT(irqd_to_hwirq(d)));
|
||||
u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride;
|
||||
u32 irq = AIC_HWIRQ_IRQ(hwirq);
|
||||
|
||||
aic_ic_write(ic, ic->info.mask_set + off + MASK_REG(irq), MASK_BIT(irq));
|
||||
}
|
||||
|
||||
static void aic_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
irq_hw_number_t hwirq = irqd_to_hwirq(d);
|
||||
struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
|
||||
|
||||
aic_ic_write(ic, AIC_MASK_CLR + MASK_REG(d->hwirq),
|
||||
MASK_BIT(irqd_to_hwirq(d)));
|
||||
u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride;
|
||||
u32 irq = AIC_HWIRQ_IRQ(hwirq);
|
||||
|
||||
aic_ic_write(ic, ic->info.mask_clr + off + MASK_REG(irq), MASK_BIT(irq));
|
||||
}
|
||||
|
||||
static void aic_irq_eoi(struct irq_data *d)
|
||||
@ -239,12 +370,12 @@ static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs)
|
||||
* We cannot use a relaxed read here, as reads from DMA buffers
|
||||
* need to be ordered after the IRQ fires.
|
||||
*/
|
||||
event = readl(ic->base + AIC_EVENT);
|
||||
event = readl(ic->event + ic->info.event);
|
||||
type = FIELD_GET(AIC_EVENT_TYPE, event);
|
||||
irq = FIELD_GET(AIC_EVENT_NUM, event);
|
||||
|
||||
if (type == AIC_EVENT_TYPE_HW)
|
||||
generic_handle_domain_irq(aic_irqc->hw_domain, irq);
|
||||
if (type == AIC_EVENT_TYPE_IRQ)
|
||||
generic_handle_domain_irq(aic_irqc->hw_domain, event);
|
||||
else if (type == AIC_EVENT_TYPE_IPI && irq == 1)
|
||||
aic_handle_ipi(regs);
|
||||
else if (event != 0)
|
||||
@ -271,12 +402,14 @@ static int aic_irq_set_affinity(struct irq_data *d,
|
||||
struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
|
||||
int cpu;
|
||||
|
||||
BUG_ON(!ic->info.target_cpu);
|
||||
|
||||
if (force)
|
||||
cpu = cpumask_first(mask_val);
|
||||
else
|
||||
cpu = cpumask_any_and(mask_val, cpu_online_mask);
|
||||
|
||||
aic_ic_write(ic, AIC_TARGET_CPU + hwirq * 4, BIT(cpu));
|
||||
aic_ic_write(ic, ic->info.target_cpu + AIC_HWIRQ_IRQ(hwirq) * 4, BIT(cpu));
|
||||
irq_data_update_effective_affinity(d, cpumask_of(cpu));
|
||||
|
||||
return IRQ_SET_MASK_OK;
|
||||
@ -300,15 +433,21 @@ static struct irq_chip aic_chip = {
|
||||
.irq_set_type = aic_irq_set_type,
|
||||
};
|
||||
|
||||
static struct irq_chip aic2_chip = {
|
||||
.name = "AIC2",
|
||||
.irq_mask = aic_irq_mask,
|
||||
.irq_unmask = aic_irq_unmask,
|
||||
.irq_eoi = aic_irq_eoi,
|
||||
.irq_set_type = aic_irq_set_type,
|
||||
};
|
||||
|
||||
/*
|
||||
* FIQ irqchip
|
||||
*/
|
||||
|
||||
static unsigned long aic_fiq_get_idx(struct irq_data *d)
|
||||
{
|
||||
struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
|
||||
|
||||
return irqd_to_hwirq(d) - ic->nr_hw;
|
||||
return AIC_HWIRQ_IRQ(irqd_to_hwirq(d));
|
||||
}
|
||||
|
||||
static void aic_fiq_set_mask(struct irq_data *d)
|
||||
@ -386,17 +525,21 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
|
||||
*/
|
||||
|
||||
if (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING) {
|
||||
pr_err_ratelimited("Fast IPI fired. Acking.\n");
|
||||
write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
|
||||
if (static_branch_likely(&use_fast_ipi)) {
|
||||
aic_handle_ipi(regs);
|
||||
} else {
|
||||
pr_err_ratelimited("Fast IPI fired. Acking.\n");
|
||||
write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
|
||||
}
|
||||
}
|
||||
|
||||
if (TIMER_FIRING(read_sysreg(cntp_ctl_el0)))
|
||||
generic_handle_domain_irq(aic_irqc->hw_domain,
|
||||
aic_irqc->nr_hw + AIC_TMR_EL0_PHYS);
|
||||
AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS));
|
||||
|
||||
if (TIMER_FIRING(read_sysreg(cntv_ctl_el0)))
|
||||
generic_handle_domain_irq(aic_irqc->hw_domain,
|
||||
aic_irqc->nr_hw + AIC_TMR_EL0_VIRT);
|
||||
AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT));
|
||||
|
||||
if (is_kernel_in_hyp_mode()) {
|
||||
uint64_t enabled = read_sysreg_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2);
|
||||
@ -404,24 +547,23 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
|
||||
if ((enabled & VM_TMR_FIQ_ENABLE_P) &&
|
||||
TIMER_FIRING(read_sysreg_s(SYS_CNTP_CTL_EL02)))
|
||||
generic_handle_domain_irq(aic_irqc->hw_domain,
|
||||
aic_irqc->nr_hw + AIC_TMR_EL02_PHYS);
|
||||
AIC_FIQ_HWIRQ(AIC_TMR_EL02_PHYS));
|
||||
|
||||
if ((enabled & VM_TMR_FIQ_ENABLE_V) &&
|
||||
TIMER_FIRING(read_sysreg_s(SYS_CNTV_CTL_EL02)))
|
||||
generic_handle_domain_irq(aic_irqc->hw_domain,
|
||||
aic_irqc->nr_hw + AIC_TMR_EL02_VIRT);
|
||||
AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT));
|
||||
}
|
||||
|
||||
if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) ==
|
||||
(FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_FIQ) | PMCR0_IACT)) {
|
||||
/*
|
||||
* Not supported yet, let's figure out how to handle this when
|
||||
* we implement these proprietary performance counters. For now,
|
||||
* just mask it and move on.
|
||||
*/
|
||||
pr_err_ratelimited("PMC FIQ fired. Masking.\n");
|
||||
sysreg_clear_set_s(SYS_IMP_APL_PMCR0_EL1, PMCR0_IMODE | PMCR0_IACT,
|
||||
FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_OFF));
|
||||
if (read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & PMCR0_IACT) {
|
||||
int irq;
|
||||
if (cpumask_test_cpu(smp_processor_id(),
|
||||
&aic_irqc->fiq_aff[AIC_CPU_PMU_P]->aff))
|
||||
irq = AIC_CPU_PMU_P;
|
||||
else
|
||||
irq = AIC_CPU_PMU_E;
|
||||
generic_handle_domain_irq(aic_irqc->hw_domain,
|
||||
AIC_FIQ_HWIRQ(irq));
|
||||
}
|
||||
|
||||
if (FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ &&
|
||||
@ -455,13 +597,29 @@ static int aic_irq_domain_map(struct irq_domain *id, unsigned int irq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
struct aic_irq_chip *ic = id->host_data;
|
||||
u32 type = FIELD_GET(AIC_EVENT_TYPE, hw);
|
||||
struct irq_chip *chip = &aic_chip;
|
||||
|
||||
if (hw < ic->nr_hw) {
|
||||
irq_domain_set_info(id, irq, hw, &aic_chip, id->host_data,
|
||||
if (ic->info.version == 2)
|
||||
chip = &aic2_chip;
|
||||
|
||||
if (type == AIC_EVENT_TYPE_IRQ) {
|
||||
irq_domain_set_info(id, irq, hw, chip, id->host_data,
|
||||
handle_fasteoi_irq, NULL, NULL);
|
||||
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
|
||||
} else {
|
||||
irq_set_percpu_devid(irq);
|
||||
int fiq = FIELD_GET(AIC_EVENT_NUM, hw);
|
||||
|
||||
switch (fiq) {
|
||||
case AIC_CPU_PMU_P:
|
||||
case AIC_CPU_PMU_E:
|
||||
irq_set_percpu_devid_partition(irq, &ic->fiq_aff[fiq]->aff);
|
||||
break;
|
||||
default:
|
||||
irq_set_percpu_devid(irq);
|
||||
break;
|
||||
}
|
||||
|
||||
irq_domain_set_info(id, irq, hw, &fiq_chip, id->host_data,
|
||||
handle_percpu_devid_irq, NULL, NULL);
|
||||
}
|
||||
@ -475,32 +633,46 @@ static int aic_irq_domain_translate(struct irq_domain *id,
|
||||
unsigned int *type)
|
||||
{
|
||||
struct aic_irq_chip *ic = id->host_data;
|
||||
u32 *args;
|
||||
u32 die = 0;
|
||||
|
||||
if (fwspec->param_count != 3 || !is_of_node(fwspec->fwnode))
|
||||
if (fwspec->param_count < 3 || fwspec->param_count > 4 ||
|
||||
!is_of_node(fwspec->fwnode))
|
||||
return -EINVAL;
|
||||
|
||||
args = &fwspec->param[1];
|
||||
|
||||
if (fwspec->param_count == 4) {
|
||||
die = args[0];
|
||||
args++;
|
||||
}
|
||||
|
||||
switch (fwspec->param[0]) {
|
||||
case AIC_IRQ:
|
||||
if (fwspec->param[1] >= ic->nr_hw)
|
||||
if (die >= ic->nr_die)
|
||||
return -EINVAL;
|
||||
*hwirq = fwspec->param[1];
|
||||
if (args[0] >= ic->nr_irq)
|
||||
return -EINVAL;
|
||||
*hwirq = AIC_IRQ_HWIRQ(die, args[0]);
|
||||
break;
|
||||
case AIC_FIQ:
|
||||
if (fwspec->param[1] >= AIC_NR_FIQ)
|
||||
if (die != 0)
|
||||
return -EINVAL;
|
||||
*hwirq = ic->nr_hw + fwspec->param[1];
|
||||
if (args[0] >= AIC_NR_FIQ)
|
||||
return -EINVAL;
|
||||
*hwirq = AIC_FIQ_HWIRQ(args[0]);
|
||||
|
||||
/*
|
||||
* In EL1 the non-redirected registers are the guest's,
|
||||
* not EL2's, so remap the hwirqs to match.
|
||||
*/
|
||||
if (!is_kernel_in_hyp_mode()) {
|
||||
switch (fwspec->param[1]) {
|
||||
switch (args[0]) {
|
||||
case AIC_TMR_GUEST_PHYS:
|
||||
*hwirq = ic->nr_hw + AIC_TMR_EL0_PHYS;
|
||||
*hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS);
|
||||
break;
|
||||
case AIC_TMR_GUEST_VIRT:
|
||||
*hwirq = ic->nr_hw + AIC_TMR_EL0_VIRT;
|
||||
*hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT);
|
||||
break;
|
||||
case AIC_TMR_HV_PHYS:
|
||||
case AIC_TMR_HV_VIRT:
|
||||
@ -514,7 +686,7 @@ static int aic_irq_domain_translate(struct irq_domain *id,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
|
||||
*type = args[1] & IRQ_TYPE_SENSE_MASK;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -563,6 +735,22 @@ static const struct irq_domain_ops aic_irq_domain_ops = {
|
||||
* IPI irqchip
|
||||
*/
|
||||
|
||||
static void aic_ipi_send_fast(int cpu)
|
||||
{
|
||||
u64 mpidr = cpu_logical_map(cpu);
|
||||
u64 my_mpidr = read_cpuid_mpidr();
|
||||
u64 cluster = MPIDR_CLUSTER(mpidr);
|
||||
u64 idx = MPIDR_CPU(mpidr);
|
||||
|
||||
if (MPIDR_CLUSTER(my_mpidr) == cluster)
|
||||
write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx),
|
||||
SYS_IMP_APL_IPI_RR_LOCAL_EL1);
|
||||
else
|
||||
write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx) | FIELD_PREP(IPI_RR_CLUSTER, cluster),
|
||||
SYS_IMP_APL_IPI_RR_GLOBAL_EL1);
|
||||
isb();
|
||||
}
|
||||
|
||||
static void aic_ipi_mask(struct irq_data *d)
|
||||
{
|
||||
u32 irq_bit = BIT(irqd_to_hwirq(d));
|
||||
@ -588,8 +776,12 @@ static void aic_ipi_unmask(struct irq_data *d)
|
||||
* If a pending vIPI was unmasked, raise a HW IPI to ourselves.
|
||||
* No barriers needed here since this is a self-IPI.
|
||||
*/
|
||||
if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit)
|
||||
aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id()));
|
||||
if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit) {
|
||||
if (static_branch_likely(&use_fast_ipi))
|
||||
aic_ipi_send_fast(smp_processor_id());
|
||||
else
|
||||
aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id()));
|
||||
}
|
||||
}
|
||||
|
||||
static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
|
||||
@ -617,8 +809,12 @@ static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
|
||||
smp_mb__after_atomic();
|
||||
|
||||
if (!(pending & irq_bit) &&
|
||||
(atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit))
|
||||
send |= AIC_IPI_SEND_CPU(cpu);
|
||||
(atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit)) {
|
||||
if (static_branch_likely(&use_fast_ipi))
|
||||
aic_ipi_send_fast(cpu);
|
||||
else
|
||||
send |= AIC_IPI_SEND_CPU(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -650,8 +846,16 @@ static void aic_handle_ipi(struct pt_regs *regs)
|
||||
/*
|
||||
* Ack the IPI. We need to order this after the AIC event read, but
|
||||
* that is enforced by normal MMIO ordering guarantees.
|
||||
*
|
||||
* For the Fast IPI case, this needs to be ordered before the vIPI
|
||||
* handling below, so we need to isb();
|
||||
*/
|
||||
aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER);
|
||||
if (static_branch_likely(&use_fast_ipi)) {
|
||||
write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
|
||||
isb();
|
||||
} else {
|
||||
aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER);
|
||||
}
|
||||
|
||||
/*
|
||||
* The mask read does not need to be ordered. Only we can change
|
||||
@ -679,7 +883,8 @@ static void aic_handle_ipi(struct pt_regs *regs)
|
||||
* No ordering needed here; at worst this just changes the timing of
|
||||
* when the next IPI will be delivered.
|
||||
*/
|
||||
aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
|
||||
if (!static_branch_likely(&use_fast_ipi))
|
||||
aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
|
||||
}
|
||||
|
||||
static int aic_ipi_alloc(struct irq_domain *d, unsigned int virq,
|
||||
@ -766,20 +971,27 @@ static int aic_init_cpu(unsigned int cpu)
|
||||
/* Commit all of the above */
|
||||
isb();
|
||||
|
||||
/*
|
||||
* Make sure the kernel's idea of logical CPU order is the same as AIC's
|
||||
* If we ever end up with a mismatch here, we will have to introduce
|
||||
* a mapping table similar to what other irqchip drivers do.
|
||||
*/
|
||||
WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id());
|
||||
if (aic_irqc->info.version == 1) {
|
||||
/*
|
||||
* Make sure the kernel's idea of logical CPU order is the same as AIC's
|
||||
* If we ever end up with a mismatch here, we will have to introduce
|
||||
* a mapping table similar to what other irqchip drivers do.
|
||||
*/
|
||||
WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id());
|
||||
|
||||
/*
|
||||
* Always keep IPIs unmasked at the hardware level (except auto-masking
|
||||
* by AIC during processing). We manage masks at the vIPI level.
|
||||
*/
|
||||
aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER);
|
||||
aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF);
|
||||
aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
|
||||
/*
|
||||
* Always keep IPIs unmasked at the hardware level (except auto-masking
|
||||
* by AIC during processing). We manage masks at the vIPI level.
|
||||
* These registers only exist on AICv1, AICv2 always uses fast IPIs.
|
||||
*/
|
||||
aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER);
|
||||
if (static_branch_likely(&use_fast_ipi)) {
|
||||
aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF | AIC_IPI_OTHER);
|
||||
} else {
|
||||
aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF);
|
||||
aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
|
||||
}
|
||||
}
|
||||
|
||||
/* Initialize the local mask state */
|
||||
__this_cpu_write(aic_fiq_unmasked, 0);
|
||||
@ -793,68 +1005,193 @@ static struct gic_kvm_info vgic_info __initdata = {
|
||||
.no_hw_deactivation = true,
|
||||
};
|
||||
|
||||
static void build_fiq_affinity(struct aic_irq_chip *ic, struct device_node *aff)
|
||||
{
|
||||
int i, n;
|
||||
u32 fiq;
|
||||
|
||||
if (of_property_read_u32(aff, "apple,fiq-index", &fiq) ||
|
||||
WARN_ON(fiq >= AIC_NR_FIQ) || ic->fiq_aff[fiq])
|
||||
return;
|
||||
|
||||
n = of_property_count_elems_of_size(aff, "cpus", sizeof(u32));
|
||||
if (WARN_ON(n < 0))
|
||||
return;
|
||||
|
||||
ic->fiq_aff[fiq] = kzalloc(sizeof(*ic->fiq_aff[fiq]), GFP_KERNEL);
|
||||
if (!ic->fiq_aff[fiq])
|
||||
return;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
struct device_node *cpu_node;
|
||||
u32 cpu_phandle;
|
||||
int cpu;
|
||||
|
||||
if (of_property_read_u32_index(aff, "cpus", i, &cpu_phandle))
|
||||
continue;
|
||||
|
||||
cpu_node = of_find_node_by_phandle(cpu_phandle);
|
||||
if (WARN_ON(!cpu_node))
|
||||
continue;
|
||||
|
||||
cpu = of_cpu_node_to_id(cpu_node);
|
||||
if (WARN_ON(cpu < 0))
|
||||
continue;
|
||||
|
||||
cpumask_set_cpu(cpu, &ic->fiq_aff[fiq]->aff);
|
||||
}
|
||||
}
|
||||
|
||||
static int __init aic_of_ic_init(struct device_node *node, struct device_node *parent)
|
||||
{
|
||||
int i;
|
||||
int i, die;
|
||||
u32 off, start_off;
|
||||
void __iomem *regs;
|
||||
u32 info;
|
||||
struct aic_irq_chip *irqc;
|
||||
struct device_node *affs;
|
||||
const struct of_device_id *match;
|
||||
|
||||
regs = of_iomap(node, 0);
|
||||
if (WARN_ON(!regs))
|
||||
return -EIO;
|
||||
|
||||
irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
|
||||
if (!irqc)
|
||||
if (!irqc) {
|
||||
iounmap(regs);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
aic_irqc = irqc;
|
||||
irqc->base = regs;
|
||||
|
||||
info = aic_ic_read(irqc, AIC_INFO);
|
||||
irqc->nr_hw = FIELD_GET(AIC_INFO_NR_HW, info);
|
||||
match = of_match_node(aic_info_match, node);
|
||||
if (!match)
|
||||
goto err_unmap;
|
||||
|
||||
irqc->hw_domain = irq_domain_create_linear(of_node_to_fwnode(node),
|
||||
irqc->nr_hw + AIC_NR_FIQ,
|
||||
&aic_irq_domain_ops, irqc);
|
||||
if (WARN_ON(!irqc->hw_domain)) {
|
||||
iounmap(irqc->base);
|
||||
kfree(irqc);
|
||||
return -ENODEV;
|
||||
irqc->info = *(struct aic_info *)match->data;
|
||||
|
||||
aic_irqc = irqc;
|
||||
|
||||
switch (irqc->info.version) {
|
||||
case 1: {
|
||||
u32 info;
|
||||
|
||||
info = aic_ic_read(irqc, AIC_INFO);
|
||||
irqc->nr_irq = FIELD_GET(AIC_INFO_NR_IRQ, info);
|
||||
irqc->max_irq = AIC_MAX_IRQ;
|
||||
irqc->nr_die = irqc->max_die = 1;
|
||||
|
||||
off = start_off = irqc->info.target_cpu;
|
||||
off += sizeof(u32) * irqc->max_irq; /* TARGET_CPU */
|
||||
|
||||
irqc->event = irqc->base;
|
||||
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
u32 info1, info3;
|
||||
|
||||
info1 = aic_ic_read(irqc, AIC2_INFO1);
|
||||
info3 = aic_ic_read(irqc, AIC2_INFO3);
|
||||
|
||||
irqc->nr_irq = FIELD_GET(AIC2_INFO1_NR_IRQ, info1);
|
||||
irqc->max_irq = FIELD_GET(AIC2_INFO3_MAX_IRQ, info3);
|
||||
irqc->nr_die = FIELD_GET(AIC2_INFO1_LAST_DIE, info1) + 1;
|
||||
irqc->max_die = FIELD_GET(AIC2_INFO3_MAX_DIE, info3);
|
||||
|
||||
off = start_off = irqc->info.irq_cfg;
|
||||
off += sizeof(u32) * irqc->max_irq; /* IRQ_CFG */
|
||||
|
||||
irqc->event = of_iomap(node, 1);
|
||||
if (WARN_ON(!irqc->event))
|
||||
goto err_unmap;
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
irqc->info.sw_set = off;
|
||||
off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_SET */
|
||||
irqc->info.sw_clr = off;
|
||||
off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_CLR */
|
||||
irqc->info.mask_set = off;
|
||||
off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_SET */
|
||||
irqc->info.mask_clr = off;
|
||||
off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_CLR */
|
||||
off += sizeof(u32) * (irqc->max_irq >> 5); /* HW_STATE */
|
||||
|
||||
if (irqc->info.fast_ipi)
|
||||
static_branch_enable(&use_fast_ipi);
|
||||
else
|
||||
static_branch_disable(&use_fast_ipi);
|
||||
|
||||
irqc->info.die_stride = off - start_off;
|
||||
|
||||
irqc->hw_domain = irq_domain_create_tree(of_node_to_fwnode(node),
|
||||
&aic_irq_domain_ops, irqc);
|
||||
if (WARN_ON(!irqc->hw_domain))
|
||||
goto err_unmap;
|
||||
|
||||
irq_domain_update_bus_token(irqc->hw_domain, DOMAIN_BUS_WIRED);
|
||||
|
||||
if (aic_init_smp(irqc, node)) {
|
||||
irq_domain_remove(irqc->hw_domain);
|
||||
iounmap(irqc->base);
|
||||
kfree(irqc);
|
||||
return -ENODEV;
|
||||
if (aic_init_smp(irqc, node))
|
||||
goto err_remove_domain;
|
||||
|
||||
affs = of_get_child_by_name(node, "affinities");
|
||||
if (affs) {
|
||||
struct device_node *chld;
|
||||
|
||||
for_each_child_of_node(affs, chld)
|
||||
build_fiq_affinity(irqc, chld);
|
||||
}
|
||||
|
||||
set_handle_irq(aic_handle_irq);
|
||||
set_handle_fiq(aic_handle_fiq);
|
||||
|
||||
for (i = 0; i < BITS_TO_U32(irqc->nr_hw); i++)
|
||||
aic_ic_write(irqc, AIC_MASK_SET + i * 4, U32_MAX);
|
||||
for (i = 0; i < BITS_TO_U32(irqc->nr_hw); i++)
|
||||
aic_ic_write(irqc, AIC_SW_CLR + i * 4, U32_MAX);
|
||||
for (i = 0; i < irqc->nr_hw; i++)
|
||||
aic_ic_write(irqc, AIC_TARGET_CPU + i * 4, 1);
|
||||
off = 0;
|
||||
for (die = 0; die < irqc->nr_die; die++) {
|
||||
for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++)
|
||||
aic_ic_write(irqc, irqc->info.mask_set + off + i * 4, U32_MAX);
|
||||
for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++)
|
||||
aic_ic_write(irqc, irqc->info.sw_clr + off + i * 4, U32_MAX);
|
||||
if (irqc->info.target_cpu)
|
||||
for (i = 0; i < irqc->nr_irq; i++)
|
||||
aic_ic_write(irqc, irqc->info.target_cpu + off + i * 4, 1);
|
||||
off += irqc->info.die_stride;
|
||||
}
|
||||
|
||||
if (irqc->info.version == 2) {
|
||||
u32 config = aic_ic_read(irqc, AIC2_CONFIG);
|
||||
|
||||
config |= AIC2_CONFIG_ENABLE;
|
||||
aic_ic_write(irqc, AIC2_CONFIG, config);
|
||||
}
|
||||
|
||||
if (!is_kernel_in_hyp_mode())
|
||||
pr_info("Kernel running in EL1, mapping interrupts");
|
||||
|
||||
if (static_branch_likely(&use_fast_ipi))
|
||||
pr_info("Using Fast IPIs");
|
||||
|
||||
cpuhp_setup_state(CPUHP_AP_IRQ_APPLE_AIC_STARTING,
|
||||
"irqchip/apple-aic/ipi:starting",
|
||||
aic_init_cpu, NULL);
|
||||
|
||||
vgic_set_kvm_info(&vgic_info);
|
||||
|
||||
pr_info("Initialized with %d IRQs, %d FIQs, %d vIPIs\n",
|
||||
irqc->nr_hw, AIC_NR_FIQ, AIC_NR_SWIPI);
|
||||
pr_info("Initialized with %d/%d IRQs * %d/%d die(s), %d FIQs, %d vIPIs",
|
||||
irqc->nr_irq, irqc->max_irq, irqc->nr_die, irqc->max_die, AIC_NR_FIQ, AIC_NR_SWIPI);
|
||||
|
||||
return 0;
|
||||
|
||||
err_remove_domain:
|
||||
irq_domain_remove(irqc->hw_domain);
|
||||
err_unmap:
|
||||
if (irqc->event && irqc->event != irqc->base)
|
||||
iounmap(irqc->event);
|
||||
iounmap(irqc->base);
|
||||
kfree(irqc);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
IRQCHIP_DECLARE(apple_m1_aic, "apple,aic", aic_of_ic_init);
|
||||
IRQCHIP_DECLARE(apple_aic, "apple,aic", aic_of_ic_init);
|
||||
IRQCHIP_DECLARE(apple_aic2, "apple,aic2", aic_of_ic_init);
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <linux/irq.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/irqchip.h>
|
||||
#include <linux/irqchip/versatile-fpga.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
|
@ -1211,7 +1211,7 @@ static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
|
||||
* Ensure that stores to Normal memory are visible to the
|
||||
* other CPUs before issuing the IPI.
|
||||
*/
|
||||
wmb();
|
||||
dsb(ishst);
|
||||
|
||||
for_each_cpu(cpu, mask) {
|
||||
u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/irqchip.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
@ -66,7 +67,6 @@ union gic_base {
|
||||
};
|
||||
|
||||
struct gic_chip_data {
|
||||
struct irq_chip chip;
|
||||
union gic_base dist_base;
|
||||
union gic_base cpu_base;
|
||||
void __iomem *raw_dist_base;
|
||||
@ -397,18 +397,15 @@ static void gic_handle_cascade_irq(struct irq_desc *desc)
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
static const struct irq_chip gic_chip = {
|
||||
.irq_mask = gic_mask_irq,
|
||||
.irq_unmask = gic_unmask_irq,
|
||||
.irq_eoi = gic_eoi_irq,
|
||||
.irq_set_type = gic_set_type,
|
||||
.irq_retrigger = gic_retrigger,
|
||||
.irq_get_irqchip_state = gic_irq_get_irqchip_state,
|
||||
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
|
||||
.flags = IRQCHIP_SET_TYPE_MASKED |
|
||||
IRQCHIP_SKIP_SET_WAKE |
|
||||
IRQCHIP_MASK_ON_SUSPEND,
|
||||
};
|
||||
static void gic_irq_print_chip(struct irq_data *d, struct seq_file *p)
|
||||
{
|
||||
struct gic_chip_data *gic = irq_data_get_irq_chip_data(d);
|
||||
|
||||
if (gic->domain->dev)
|
||||
seq_printf(p, gic->domain->dev->of_node->name);
|
||||
else
|
||||
seq_printf(p, "GIC-%d", (int)(gic - &gic_data[0]));
|
||||
}
|
||||
|
||||
void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
|
||||
{
|
||||
@ -799,8 +796,12 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
|
||||
bool force)
|
||||
{
|
||||
void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
|
||||
struct gic_chip_data *gic = irq_data_get_irq_chip_data(d);
|
||||
unsigned int cpu;
|
||||
|
||||
if (unlikely(gic != &gic_data[0]))
|
||||
return -EINVAL;
|
||||
|
||||
if (!force)
|
||||
cpu = cpumask_any_and(mask_val, cpu_online_mask);
|
||||
else
|
||||
@ -880,6 +881,39 @@ static __init void gic_smp_init(void)
|
||||
#define gic_ipi_send_mask NULL
|
||||
#endif
|
||||
|
||||
static const struct irq_chip gic_chip = {
|
||||
.irq_mask = gic_mask_irq,
|
||||
.irq_unmask = gic_unmask_irq,
|
||||
.irq_eoi = gic_eoi_irq,
|
||||
.irq_set_type = gic_set_type,
|
||||
.irq_retrigger = gic_retrigger,
|
||||
.irq_set_affinity = gic_set_affinity,
|
||||
.ipi_send_mask = gic_ipi_send_mask,
|
||||
.irq_get_irqchip_state = gic_irq_get_irqchip_state,
|
||||
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
|
||||
.irq_print_chip = gic_irq_print_chip,
|
||||
.flags = IRQCHIP_SET_TYPE_MASKED |
|
||||
IRQCHIP_SKIP_SET_WAKE |
|
||||
IRQCHIP_MASK_ON_SUSPEND,
|
||||
};
|
||||
|
||||
static const struct irq_chip gic_chip_mode1 = {
|
||||
.name = "GICv2",
|
||||
.irq_mask = gic_eoimode1_mask_irq,
|
||||
.irq_unmask = gic_unmask_irq,
|
||||
.irq_eoi = gic_eoimode1_eoi_irq,
|
||||
.irq_set_type = gic_set_type,
|
||||
.irq_retrigger = gic_retrigger,
|
||||
.irq_set_affinity = gic_set_affinity,
|
||||
.ipi_send_mask = gic_ipi_send_mask,
|
||||
.irq_get_irqchip_state = gic_irq_get_irqchip_state,
|
||||
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
|
||||
.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
|
||||
.flags = IRQCHIP_SET_TYPE_MASKED |
|
||||
IRQCHIP_SKIP_SET_WAKE |
|
||||
IRQCHIP_MASK_ON_SUSPEND,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_BL_SWITCHER
|
||||
/*
|
||||
* gic_send_sgi - send a SGI directly to given CPU interface number
|
||||
@ -1024,15 +1058,19 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
|
||||
{
|
||||
struct gic_chip_data *gic = d->host_data;
|
||||
struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
|
||||
const struct irq_chip *chip;
|
||||
|
||||
chip = (static_branch_likely(&supports_deactivate_key) &&
|
||||
gic == &gic_data[0]) ? &gic_chip_mode1 : &gic_chip;
|
||||
|
||||
switch (hw) {
|
||||
case 0 ... 31:
|
||||
irq_set_percpu_devid(irq);
|
||||
irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
|
||||
irq_domain_set_info(d, irq, hw, chip, d->host_data,
|
||||
handle_percpu_devid_irq, NULL, NULL);
|
||||
break;
|
||||
default:
|
||||
irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
|
||||
irq_domain_set_info(d, irq, hw, chip, d->host_data,
|
||||
handle_fasteoi_irq, NULL, NULL);
|
||||
irq_set_probe(irq);
|
||||
irqd_set_single_target(irqd);
|
||||
@ -1127,26 +1165,6 @@ static const struct irq_domain_ops gic_irq_domain_ops = {
|
||||
.unmap = gic_irq_domain_unmap,
|
||||
};
|
||||
|
||||
static void gic_init_chip(struct gic_chip_data *gic, struct device *dev,
|
||||
const char *name, bool use_eoimode1)
|
||||
{
|
||||
/* Initialize irq_chip */
|
||||
gic->chip = gic_chip;
|
||||
gic->chip.name = name;
|
||||
gic->chip.parent_device = dev;
|
||||
|
||||
if (use_eoimode1) {
|
||||
gic->chip.irq_mask = gic_eoimode1_mask_irq;
|
||||
gic->chip.irq_eoi = gic_eoimode1_eoi_irq;
|
||||
gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity;
|
||||
}
|
||||
|
||||
if (gic == &gic_data[0]) {
|
||||
gic->chip.irq_set_affinity = gic_set_affinity;
|
||||
gic->chip.ipi_send_mask = gic_ipi_send_mask;
|
||||
}
|
||||
}
|
||||
|
||||
static int gic_init_bases(struct gic_chip_data *gic,
|
||||
struct fwnode_handle *handle)
|
||||
{
|
||||
@ -1246,7 +1264,6 @@ error:
|
||||
static int __init __gic_init_bases(struct gic_chip_data *gic,
|
||||
struct fwnode_handle *handle)
|
||||
{
|
||||
char *name;
|
||||
int i, ret;
|
||||
|
||||
if (WARN_ON(!gic || gic->domain))
|
||||
@ -1266,18 +1283,8 @@ static int __init __gic_init_bases(struct gic_chip_data *gic,
|
||||
pr_info("GIC: Using split EOI/Deactivate mode\n");
|
||||
}
|
||||
|
||||
if (static_branch_likely(&supports_deactivate_key) && gic == &gic_data[0]) {
|
||||
name = kasprintf(GFP_KERNEL, "GICv2");
|
||||
gic_init_chip(gic, NULL, name, true);
|
||||
} else {
|
||||
name = kasprintf(GFP_KERNEL, "GIC-%d", (int)(gic-&gic_data[0]));
|
||||
gic_init_chip(gic, NULL, name, false);
|
||||
}
|
||||
|
||||
ret = gic_init_bases(gic, handle);
|
||||
if (ret)
|
||||
kfree(name);
|
||||
else if (gic == &gic_data[0])
|
||||
if (gic == &gic_data[0])
|
||||
gic_smp_init();
|
||||
|
||||
return ret;
|
||||
@ -1460,8 +1467,6 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
|
||||
if (!*gic)
|
||||
return -ENOMEM;
|
||||
|
||||
gic_init_chip(*gic, dev, dev->of_node->name, false);
|
||||
|
||||
ret = gic_of_setup(*gic, dev->of_node);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1472,6 +1477,7 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq)
|
||||
return ret;
|
||||
}
|
||||
|
||||
irq_domain_set_pm_device((*gic)->domain, dev);
|
||||
irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, *gic);
|
||||
|
||||
return 0;
|
||||
|
@ -61,7 +61,6 @@
|
||||
#define CHAN_MAX_NUM 0x8
|
||||
|
||||
struct intmux_irqchip_data {
|
||||
struct irq_chip chip;
|
||||
u32 saved_reg;
|
||||
int chanidx;
|
||||
int irq;
|
||||
@ -114,7 +113,7 @@ static void imx_intmux_irq_unmask(struct irq_data *d)
|
||||
raw_spin_unlock_irqrestore(&data->lock, flags);
|
||||
}
|
||||
|
||||
static struct irq_chip imx_intmux_irq_chip = {
|
||||
static struct irq_chip imx_intmux_irq_chip __ro_after_init = {
|
||||
.name = "intmux",
|
||||
.irq_mask = imx_intmux_irq_mask,
|
||||
.irq_unmask = imx_intmux_irq_unmask,
|
||||
@ -126,7 +125,7 @@ static int imx_intmux_irq_map(struct irq_domain *h, unsigned int irq,
|
||||
struct intmux_irqchip_data *data = h->host_data;
|
||||
|
||||
irq_set_chip_data(irq, data);
|
||||
irq_set_chip_and_handler(irq, &data->chip, handle_level_irq);
|
||||
irq_set_chip_and_handler(irq, &imx_intmux_irq_chip, handle_level_irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -241,8 +240,6 @@ static int imx_intmux_probe(struct platform_device *pdev)
|
||||
}
|
||||
|
||||
for (i = 0; i < channum; i++) {
|
||||
data->irqchip_data[i].chip = imx_intmux_irq_chip;
|
||||
data->irqchip_data[i].chip.parent_device = &pdev->dev;
|
||||
data->irqchip_data[i].chanidx = i;
|
||||
|
||||
data->irqchip_data[i].irq = irq_of_parse_and_map(np, i);
|
||||
@ -260,6 +257,7 @@ static int imx_intmux_probe(struct platform_device *pdev)
|
||||
goto out;
|
||||
}
|
||||
data->irqchip_data[i].domain = domain;
|
||||
irq_domain_set_pm_device(domain, &pdev->dev);
|
||||
|
||||
/* disable all interrupt sources of this channel firstly */
|
||||
writel_relaxed(0, data->regs + CHANIER(i));
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/exception.h>
|
||||
|
||||
@ -25,8 +26,8 @@
|
||||
|
||||
struct lpc32xx_irq_chip {
|
||||
void __iomem *base;
|
||||
phys_addr_t addr;
|
||||
struct irq_domain *domain;
|
||||
struct irq_chip chip;
|
||||
};
|
||||
|
||||
static struct lpc32xx_irq_chip *lpc32xx_mic_irqc;
|
||||
@ -118,6 +119,24 @@ static int lpc32xx_irq_set_type(struct irq_data *d, unsigned int type)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void lpc32xx_irq_print_chip(struct irq_data *d, struct seq_file *p)
|
||||
{
|
||||
struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
|
||||
|
||||
if (ic == lpc32xx_mic_irqc)
|
||||
seq_printf(p, "%08x.mic", ic->addr);
|
||||
else
|
||||
seq_printf(p, "%08x.sic", ic->addr);
|
||||
}
|
||||
|
||||
static const struct irq_chip lpc32xx_chip = {
|
||||
.irq_ack = lpc32xx_irq_ack,
|
||||
.irq_mask = lpc32xx_irq_mask,
|
||||
.irq_unmask = lpc32xx_irq_unmask,
|
||||
.irq_set_type = lpc32xx_irq_set_type,
|
||||
.irq_print_chip = lpc32xx_irq_print_chip,
|
||||
};
|
||||
|
||||
static void __exception_irq_entry lpc32xx_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
struct lpc32xx_irq_chip *ic = lpc32xx_mic_irqc;
|
||||
@ -153,7 +172,7 @@ static int lpc32xx_irq_domain_map(struct irq_domain *id, unsigned int virq,
|
||||
struct lpc32xx_irq_chip *ic = id->host_data;
|
||||
|
||||
irq_set_chip_data(virq, ic);
|
||||
irq_set_chip_and_handler(virq, &ic->chip, handle_level_irq);
|
||||
irq_set_chip_and_handler(virq, &lpc32xx_chip, handle_level_irq);
|
||||
irq_set_status_flags(virq, IRQ_LEVEL);
|
||||
irq_set_noprobe(virq);
|
||||
|
||||
@ -183,6 +202,7 @@ static int __init lpc32xx_of_ic_init(struct device_node *node,
|
||||
if (!irqc)
|
||||
return -ENOMEM;
|
||||
|
||||
irqc->addr = addr;
|
||||
irqc->base = of_iomap(node, 0);
|
||||
if (!irqc->base) {
|
||||
pr_err("%pOF: unable to map registers\n", node);
|
||||
@ -190,21 +210,11 @@ static int __init lpc32xx_of_ic_init(struct device_node *node,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
irqc->chip.irq_ack = lpc32xx_irq_ack;
|
||||
irqc->chip.irq_mask = lpc32xx_irq_mask;
|
||||
irqc->chip.irq_unmask = lpc32xx_irq_unmask;
|
||||
irqc->chip.irq_set_type = lpc32xx_irq_set_type;
|
||||
if (is_mic)
|
||||
irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.mic", addr);
|
||||
else
|
||||
irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.sic", addr);
|
||||
|
||||
irqc->domain = irq_domain_add_linear(node, NR_LPC32XX_IC_IRQS,
|
||||
&lpc32xx_irq_domain_ops, irqc);
|
||||
if (!irqc->domain) {
|
||||
pr_err("unable to add irq domain\n");
|
||||
iounmap(irqc->base);
|
||||
kfree(irqc->chip.name);
|
||||
kfree(irqc);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
@ -16,7 +16,7 @@
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
|
||||
#define NUM_CHANNEL 8
|
||||
#define MAX_NUM_CHANNEL 64
|
||||
#define MAX_INPUT_MUX 256
|
||||
|
||||
#define REG_EDGE_POL 0x00
|
||||
@ -26,6 +26,8 @@
|
||||
|
||||
/* use for A1 like chips */
|
||||
#define REG_PIN_A1_SEL 0x04
|
||||
/* Used for s4 chips */
|
||||
#define REG_EDGE_POL_S4 0x1c
|
||||
|
||||
/*
|
||||
* Note: The S905X3 datasheet reports that BOTH_EDGE is controlled by
|
||||
@ -51,15 +53,22 @@ static void meson_a1_gpio_irq_sel_pin(struct meson_gpio_irq_controller *ctl,
|
||||
unsigned int channel,
|
||||
unsigned long hwirq);
|
||||
static void meson_a1_gpio_irq_init(struct meson_gpio_irq_controller *ctl);
|
||||
static int meson8_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
|
||||
unsigned int type, u32 *channel_hwirq);
|
||||
static int meson_s4_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
|
||||
unsigned int type, u32 *channel_hwirq);
|
||||
|
||||
struct irq_ctl_ops {
|
||||
void (*gpio_irq_sel_pin)(struct meson_gpio_irq_controller *ctl,
|
||||
unsigned int channel, unsigned long hwirq);
|
||||
void (*gpio_irq_init)(struct meson_gpio_irq_controller *ctl);
|
||||
int (*gpio_irq_set_type)(struct meson_gpio_irq_controller *ctl,
|
||||
unsigned int type, u32 *channel_hwirq);
|
||||
};
|
||||
|
||||
struct meson_gpio_irq_params {
|
||||
unsigned int nr_hwirq;
|
||||
unsigned int nr_channels;
|
||||
bool support_edge_both;
|
||||
unsigned int edge_both_offset;
|
||||
unsigned int edge_single_offset;
|
||||
@ -68,28 +77,44 @@ struct meson_gpio_irq_params {
|
||||
struct irq_ctl_ops ops;
|
||||
};
|
||||
|
||||
#define INIT_MESON_COMMON(irqs, init, sel) \
|
||||
#define INIT_MESON_COMMON(irqs, init, sel, type) \
|
||||
.nr_hwirq = irqs, \
|
||||
.ops = { \
|
||||
.gpio_irq_init = init, \
|
||||
.gpio_irq_sel_pin = sel, \
|
||||
.gpio_irq_set_type = type, \
|
||||
},
|
||||
|
||||
#define INIT_MESON8_COMMON_DATA(irqs) \
|
||||
INIT_MESON_COMMON(irqs, meson_gpio_irq_init_dummy, \
|
||||
meson8_gpio_irq_sel_pin) \
|
||||
meson8_gpio_irq_sel_pin, \
|
||||
meson8_gpio_irq_set_type) \
|
||||
.edge_single_offset = 0, \
|
||||
.pol_low_offset = 16, \
|
||||
.pin_sel_mask = 0xff, \
|
||||
.nr_channels = 8, \
|
||||
|
||||
#define INIT_MESON_A1_COMMON_DATA(irqs) \
|
||||
INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \
|
||||
meson_a1_gpio_irq_sel_pin) \
|
||||
meson_a1_gpio_irq_sel_pin, \
|
||||
meson8_gpio_irq_set_type) \
|
||||
.support_edge_both = true, \
|
||||
.edge_both_offset = 16, \
|
||||
.edge_single_offset = 8, \
|
||||
.pol_low_offset = 0, \
|
||||
.pin_sel_mask = 0x7f, \
|
||||
.nr_channels = 8, \
|
||||
|
||||
#define INIT_MESON_S4_COMMON_DATA(irqs) \
|
||||
INIT_MESON_COMMON(irqs, meson_a1_gpio_irq_init, \
|
||||
meson_a1_gpio_irq_sel_pin, \
|
||||
meson_s4_gpio_irq_set_type) \
|
||||
.support_edge_both = true, \
|
||||
.edge_both_offset = 0, \
|
||||
.edge_single_offset = 12, \
|
||||
.pol_low_offset = 0, \
|
||||
.pin_sel_mask = 0xff, \
|
||||
.nr_channels = 12, \
|
||||
|
||||
static const struct meson_gpio_irq_params meson8_params = {
|
||||
INIT_MESON8_COMMON_DATA(134)
|
||||
@ -121,6 +146,10 @@ static const struct meson_gpio_irq_params a1_params = {
|
||||
INIT_MESON_A1_COMMON_DATA(62)
|
||||
};
|
||||
|
||||
static const struct meson_gpio_irq_params s4_params = {
|
||||
INIT_MESON_S4_COMMON_DATA(82)
|
||||
};
|
||||
|
||||
static const struct of_device_id meson_irq_gpio_matches[] = {
|
||||
{ .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params },
|
||||
{ .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params },
|
||||
@ -130,14 +159,15 @@ static const struct of_device_id meson_irq_gpio_matches[] = {
|
||||
{ .compatible = "amlogic,meson-g12a-gpio-intc", .data = &axg_params },
|
||||
{ .compatible = "amlogic,meson-sm1-gpio-intc", .data = &sm1_params },
|
||||
{ .compatible = "amlogic,meson-a1-gpio-intc", .data = &a1_params },
|
||||
{ .compatible = "amlogic,meson-s4-gpio-intc", .data = &s4_params },
|
||||
{ }
|
||||
};
|
||||
|
||||
struct meson_gpio_irq_controller {
|
||||
const struct meson_gpio_irq_params *params;
|
||||
void __iomem *base;
|
||||
u32 channel_irqs[NUM_CHANNEL];
|
||||
DECLARE_BITMAP(channel_map, NUM_CHANNEL);
|
||||
u32 channel_irqs[MAX_NUM_CHANNEL];
|
||||
DECLARE_BITMAP(channel_map, MAX_NUM_CHANNEL);
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
@ -207,8 +237,8 @@ meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
|
||||
spin_lock_irqsave(&ctl->lock, flags);
|
||||
|
||||
/* Find a free channel */
|
||||
idx = find_first_zero_bit(ctl->channel_map, NUM_CHANNEL);
|
||||
if (idx >= NUM_CHANNEL) {
|
||||
idx = find_first_zero_bit(ctl->channel_map, ctl->params->nr_channels);
|
||||
if (idx >= ctl->params->nr_channels) {
|
||||
spin_unlock_irqrestore(&ctl->lock, flags);
|
||||
pr_err("No channel available\n");
|
||||
return -ENOSPC;
|
||||
@ -256,9 +286,8 @@ meson_gpio_irq_release_channel(struct meson_gpio_irq_controller *ctl,
|
||||
clear_bit(idx, ctl->channel_map);
|
||||
}
|
||||
|
||||
static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl,
|
||||
unsigned int type,
|
||||
u32 *channel_hwirq)
|
||||
static int meson8_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
|
||||
unsigned int type, u32 *channel_hwirq)
|
||||
{
|
||||
u32 val = 0;
|
||||
unsigned int idx;
|
||||
@ -299,6 +328,51 @@ static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* gpio irq relative registers for s4
|
||||
* -PADCTRL_GPIO_IRQ_CTRL0
|
||||
* bit[31]: enable/disable all the irq lines
|
||||
* bit[12-23]: single edge trigger
|
||||
* bit[0-11]: polarity trigger
|
||||
*
|
||||
* -PADCTRL_GPIO_IRQ_CTRL[X]
|
||||
* bit[0-16]: 7 bits to choose gpio source for irq line 2*[X] - 2
|
||||
* bit[16-22]:7 bits to choose gpio source for irq line 2*[X] - 1
|
||||
* where X = 1-6
|
||||
*
|
||||
* -PADCTRL_GPIO_IRQ_CTRL[7]
|
||||
* bit[0-11]: both edge trigger
|
||||
*/
|
||||
static int meson_s4_gpio_irq_set_type(struct meson_gpio_irq_controller *ctl,
|
||||
unsigned int type, u32 *channel_hwirq)
|
||||
{
|
||||
u32 val = 0;
|
||||
unsigned int idx;
|
||||
|
||||
idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
|
||||
|
||||
type &= IRQ_TYPE_SENSE_MASK;
|
||||
|
||||
meson_gpio_irq_update_bits(ctl, REG_EDGE_POL_S4, BIT(idx), 0);
|
||||
|
||||
if (type == IRQ_TYPE_EDGE_BOTH) {
|
||||
val |= BIT(ctl->params->edge_both_offset + idx);
|
||||
meson_gpio_irq_update_bits(ctl, REG_EDGE_POL_S4,
|
||||
BIT(ctl->params->edge_both_offset + idx), val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))
|
||||
val |= BIT(ctl->params->pol_low_offset + idx);
|
||||
|
||||
if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
|
||||
val |= BIT(ctl->params->edge_single_offset + idx);
|
||||
|
||||
meson_gpio_irq_update_bits(ctl, REG_EDGE_POL,
|
||||
BIT(idx) | BIT(12 + idx), val);
|
||||
return 0;
|
||||
};
|
||||
|
||||
static unsigned int meson_gpio_irq_type_output(unsigned int type)
|
||||
{
|
||||
unsigned int sense = type & IRQ_TYPE_SENSE_MASK;
|
||||
@ -323,7 +397,7 @@ static int meson_gpio_irq_set_type(struct irq_data *data, unsigned int type)
|
||||
u32 *channel_hwirq = irq_data_get_irq_chip_data(data);
|
||||
int ret;
|
||||
|
||||
ret = meson_gpio_irq_type_setup(ctl, type, channel_hwirq);
|
||||
ret = ctl->params->ops.gpio_irq_set_type(ctl, type, channel_hwirq);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -450,10 +524,10 @@ static int meson_gpio_irq_parse_dt(struct device_node *node, struct meson_gpio_i
|
||||
ret = of_property_read_variable_u32_array(node,
|
||||
"amlogic,channel-interrupts",
|
||||
ctl->channel_irqs,
|
||||
NUM_CHANNEL,
|
||||
NUM_CHANNEL);
|
||||
ctl->params->nr_channels,
|
||||
ctl->params->nr_channels);
|
||||
if (ret < 0) {
|
||||
pr_err("can't get %d channel interrupts\n", NUM_CHANNEL);
|
||||
pr_err("can't get %d channel interrupts\n", ctl->params->nr_channels);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -507,7 +581,7 @@ static int meson_gpio_irq_of_init(struct device_node *node, struct device_node *
|
||||
}
|
||||
|
||||
pr_info("%d to %d gpio interrupt mux initialized\n",
|
||||
ctl->params->nr_hwirq, NUM_CHANNEL);
|
||||
ctl->params->nr_hwirq, ctl->params->nr_channels);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#define PIC_CAUSE 0x0
|
||||
#define PIC_MASK 0x4
|
||||
@ -29,7 +30,7 @@ struct mvebu_pic {
|
||||
void __iomem *base;
|
||||
u32 parent_irq;
|
||||
struct irq_domain *domain;
|
||||
struct irq_chip irq_chip;
|
||||
struct platform_device *pdev;
|
||||
};
|
||||
|
||||
static void mvebu_pic_reset(struct mvebu_pic *pic)
|
||||
@ -66,6 +67,20 @@ static void mvebu_pic_unmask_irq(struct irq_data *d)
|
||||
writel(reg, pic->base + PIC_MASK);
|
||||
}
|
||||
|
||||
static void mvebu_pic_print_chip(struct irq_data *d, struct seq_file *p)
|
||||
{
|
||||
struct mvebu_pic *pic = irq_data_get_irq_chip_data(d);
|
||||
|
||||
seq_printf(p, dev_name(&pic->pdev->dev));
|
||||
}
|
||||
|
||||
static const struct irq_chip mvebu_pic_chip = {
|
||||
.irq_mask = mvebu_pic_mask_irq,
|
||||
.irq_unmask = mvebu_pic_unmask_irq,
|
||||
.irq_eoi = mvebu_pic_eoi_irq,
|
||||
.irq_print_chip = mvebu_pic_print_chip,
|
||||
};
|
||||
|
||||
static int mvebu_pic_irq_map(struct irq_domain *domain, unsigned int virq,
|
||||
irq_hw_number_t hwirq)
|
||||
{
|
||||
@ -73,8 +88,7 @@ static int mvebu_pic_irq_map(struct irq_domain *domain, unsigned int virq,
|
||||
|
||||
irq_set_percpu_devid(virq);
|
||||
irq_set_chip_data(virq, pic);
|
||||
irq_set_chip_and_handler(virq, &pic->irq_chip,
|
||||
handle_percpu_devid_irq);
|
||||
irq_set_chip_and_handler(virq, &mvebu_pic_chip, handle_percpu_devid_irq);
|
||||
irq_set_status_flags(virq, IRQ_LEVEL);
|
||||
irq_set_probe(virq);
|
||||
|
||||
@ -120,22 +134,16 @@ static int mvebu_pic_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *node = pdev->dev.of_node;
|
||||
struct mvebu_pic *pic;
|
||||
struct irq_chip *irq_chip;
|
||||
|
||||
pic = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pic), GFP_KERNEL);
|
||||
if (!pic)
|
||||
return -ENOMEM;
|
||||
|
||||
pic->pdev = pdev;
|
||||
pic->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(pic->base))
|
||||
return PTR_ERR(pic->base);
|
||||
|
||||
irq_chip = &pic->irq_chip;
|
||||
irq_chip->name = dev_name(&pdev->dev);
|
||||
irq_chip->irq_mask = mvebu_pic_mask_irq;
|
||||
irq_chip->irq_unmask = mvebu_pic_unmask_irq;
|
||||
irq_chip->irq_eoi = mvebu_pic_eoi_irq;
|
||||
|
||||
pic->parent_irq = irq_of_parse_and_map(node, 0);
|
||||
if (pic->parent_irq <= 0) {
|
||||
dev_err(&pdev->dev, "Failed to parse parent interrupt\n");
|
||||
|
@ -107,6 +107,7 @@ static int __init nvic_of_init(struct device_node *node,
|
||||
|
||||
if (!nvic_irq_domain) {
|
||||
pr_warn("Failed to allocate irq domain\n");
|
||||
iounmap(nvic_base);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -116,6 +117,7 @@ static int __init nvic_of_init(struct device_node *node,
|
||||
if (ret) {
|
||||
pr_warn("Failed to allocate irq chips\n");
|
||||
irq_domain_remove(nvic_irq_domain);
|
||||
iounmap(nvic_base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
461
drivers/irqchip/irq-qcom-mpm.c
Normal file
461
drivers/irqchip/irq-qcom-mpm.c
Normal file
@ -0,0 +1,461 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (c) 2021, Linaro Limited
|
||||
* Copyright (c) 2010-2020, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/irqchip.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/mailbox_client.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_domain.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/soc/qcom/irq.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
/*
|
||||
* This is the driver for Qualcomm MPM (MSM Power Manager) interrupt controller,
|
||||
* which is commonly found on Qualcomm SoCs built on the RPM architecture.
|
||||
* Sitting in always-on domain, MPM monitors the wakeup interrupts when SoC is
|
||||
* asleep, and wakes up the AP when one of those interrupts occurs. This driver
|
||||
* doesn't directly access physical MPM registers though. Instead, the access
|
||||
* is bridged via a piece of internal memory (SRAM) that is accessible to both
|
||||
* AP and RPM. This piece of memory is called 'vMPM' in the driver.
|
||||
*
|
||||
* When SoC is awake, the vMPM is owned by AP and the register setup by this
|
||||
* driver all happens on vMPM. When AP is about to get power collapsed, the
|
||||
* driver sends a mailbox notification to RPM, which will take over the vMPM
|
||||
* ownership and dump vMPM into physical MPM registers. On wakeup, AP is woken
|
||||
* up by a MPM pin/interrupt, and RPM will copy STATUS registers into vMPM.
|
||||
* Then AP start owning vMPM again.
|
||||
*
|
||||
* vMPM register map:
|
||||
*
|
||||
* 31 0
|
||||
* +--------------------------------+
|
||||
* | TIMER0 | 0x00
|
||||
* +--------------------------------+
|
||||
* | TIMER1 | 0x04
|
||||
* +--------------------------------+
|
||||
* | ENABLE0 | 0x08
|
||||
* +--------------------------------+
|
||||
* | ... | ...
|
||||
* +--------------------------------+
|
||||
* | ENABLEn |
|
||||
* +--------------------------------+
|
||||
* | FALLING_EDGE0 |
|
||||
* +--------------------------------+
|
||||
* | ... |
|
||||
* +--------------------------------+
|
||||
* | STATUSn |
|
||||
* +--------------------------------+
|
||||
*
|
||||
* n = DIV_ROUND_UP(pin_cnt, 32)
|
||||
*
|
||||
*/
|
||||
|
||||
#define MPM_REG_ENABLE 0
|
||||
#define MPM_REG_FALLING_EDGE 1
|
||||
#define MPM_REG_RISING_EDGE 2
|
||||
#define MPM_REG_POLARITY 3
|
||||
#define MPM_REG_STATUS 4
|
||||
|
||||
/* MPM pin map to GIC hwirq */
|
||||
struct mpm_gic_map {
|
||||
int pin;
|
||||
irq_hw_number_t hwirq;
|
||||
};
|
||||
|
||||
struct qcom_mpm_priv {
|
||||
void __iomem *base;
|
||||
raw_spinlock_t lock;
|
||||
struct mbox_client mbox_client;
|
||||
struct mbox_chan *mbox_chan;
|
||||
struct mpm_gic_map *maps;
|
||||
unsigned int map_cnt;
|
||||
unsigned int reg_stride;
|
||||
struct irq_domain *domain;
|
||||
struct generic_pm_domain genpd;
|
||||
};
|
||||
|
||||
static u32 qcom_mpm_read(struct qcom_mpm_priv *priv, unsigned int reg,
|
||||
unsigned int index)
|
||||
{
|
||||
unsigned int offset = (reg * priv->reg_stride + index + 2) * 4;
|
||||
|
||||
return readl_relaxed(priv->base + offset);
|
||||
}
|
||||
|
||||
static void qcom_mpm_write(struct qcom_mpm_priv *priv, unsigned int reg,
|
||||
unsigned int index, u32 val)
|
||||
{
|
||||
unsigned int offset = (reg * priv->reg_stride + index + 2) * 4;
|
||||
|
||||
writel_relaxed(val, priv->base + offset);
|
||||
|
||||
/* Ensure the write is completed */
|
||||
wmb();
|
||||
}
|
||||
|
||||
static void qcom_mpm_enable_irq(struct irq_data *d, bool en)
|
||||
{
|
||||
struct qcom_mpm_priv *priv = d->chip_data;
|
||||
int pin = d->hwirq;
|
||||
unsigned int index = pin / 32;
|
||||
unsigned int shift = pin % 32;
|
||||
unsigned long flags, val;
|
||||
|
||||
raw_spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
val = qcom_mpm_read(priv, MPM_REG_ENABLE, index);
|
||||
__assign_bit(shift, &val, en);
|
||||
qcom_mpm_write(priv, MPM_REG_ENABLE, index, val);
|
||||
|
||||
raw_spin_unlock_irqrestore(&priv->lock, flags);
|
||||
}
|
||||
|
||||
static void qcom_mpm_mask(struct irq_data *d)
|
||||
{
|
||||
qcom_mpm_enable_irq(d, false);
|
||||
|
||||
if (d->parent_data)
|
||||
irq_chip_mask_parent(d);
|
||||
}
|
||||
|
||||
static void qcom_mpm_unmask(struct irq_data *d)
|
||||
{
|
||||
qcom_mpm_enable_irq(d, true);
|
||||
|
||||
if (d->parent_data)
|
||||
irq_chip_unmask_parent(d);
|
||||
}
|
||||
|
||||
static void mpm_set_type(struct qcom_mpm_priv *priv, bool set, unsigned int reg,
|
||||
unsigned int index, unsigned int shift)
|
||||
{
|
||||
unsigned long flags, val;
|
||||
|
||||
raw_spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
val = qcom_mpm_read(priv, reg, index);
|
||||
__assign_bit(shift, &val, set);
|
||||
qcom_mpm_write(priv, reg, index, val);
|
||||
|
||||
raw_spin_unlock_irqrestore(&priv->lock, flags);
|
||||
}
|
||||
|
||||
static int qcom_mpm_set_type(struct irq_data *d, unsigned int type)
|
||||
{
|
||||
struct qcom_mpm_priv *priv = d->chip_data;
|
||||
int pin = d->hwirq;
|
||||
unsigned int index = pin / 32;
|
||||
unsigned int shift = pin % 32;
|
||||
|
||||
if (type & IRQ_TYPE_EDGE_RISING)
|
||||
mpm_set_type(priv, true, MPM_REG_RISING_EDGE, index, shift);
|
||||
else
|
||||
mpm_set_type(priv, false, MPM_REG_RISING_EDGE, index, shift);
|
||||
|
||||
if (type & IRQ_TYPE_EDGE_FALLING)
|
||||
mpm_set_type(priv, true, MPM_REG_FALLING_EDGE, index, shift);
|
||||
else
|
||||
mpm_set_type(priv, false, MPM_REG_FALLING_EDGE, index, shift);
|
||||
|
||||
if (type & IRQ_TYPE_LEVEL_HIGH)
|
||||
mpm_set_type(priv, true, MPM_REG_POLARITY, index, shift);
|
||||
else
|
||||
mpm_set_type(priv, false, MPM_REG_POLARITY, index, shift);
|
||||
|
||||
if (!d->parent_data)
|
||||
return 0;
|
||||
|
||||
if (type & IRQ_TYPE_EDGE_BOTH)
|
||||
type = IRQ_TYPE_EDGE_RISING;
|
||||
|
||||
if (type & IRQ_TYPE_LEVEL_MASK)
|
||||
type = IRQ_TYPE_LEVEL_HIGH;
|
||||
|
||||
return irq_chip_set_type_parent(d, type);
|
||||
}
|
||||
|
||||
static struct irq_chip qcom_mpm_chip = {
|
||||
.name = "mpm",
|
||||
.irq_eoi = irq_chip_eoi_parent,
|
||||
.irq_mask = qcom_mpm_mask,
|
||||
.irq_unmask = qcom_mpm_unmask,
|
||||
.irq_retrigger = irq_chip_retrigger_hierarchy,
|
||||
.irq_set_type = qcom_mpm_set_type,
|
||||
.irq_set_affinity = irq_chip_set_affinity_parent,
|
||||
.flags = IRQCHIP_MASK_ON_SUSPEND |
|
||||
IRQCHIP_SKIP_SET_WAKE,
|
||||
};
|
||||
|
||||
static struct mpm_gic_map *get_mpm_gic_map(struct qcom_mpm_priv *priv, int pin)
|
||||
{
|
||||
struct mpm_gic_map *maps = priv->maps;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < priv->map_cnt; i++) {
|
||||
if (maps[i].pin == pin)
|
||||
return &maps[i];
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int qcom_mpm_alloc(struct irq_domain *domain, unsigned int virq,
|
||||
unsigned int nr_irqs, void *data)
|
||||
{
|
||||
struct qcom_mpm_priv *priv = domain->host_data;
|
||||
struct irq_fwspec *fwspec = data;
|
||||
struct irq_fwspec parent_fwspec;
|
||||
struct mpm_gic_map *map;
|
||||
irq_hw_number_t pin;
|
||||
unsigned int type;
|
||||
int ret;
|
||||
|
||||
ret = irq_domain_translate_twocell(domain, fwspec, &pin, &type);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = irq_domain_set_hwirq_and_chip(domain, virq, pin,
|
||||
&qcom_mpm_chip, priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
map = get_mpm_gic_map(priv, pin);
|
||||
if (map == NULL)
|
||||
return irq_domain_disconnect_hierarchy(domain->parent, virq);
|
||||
|
||||
if (type & IRQ_TYPE_EDGE_BOTH)
|
||||
type = IRQ_TYPE_EDGE_RISING;
|
||||
|
||||
if (type & IRQ_TYPE_LEVEL_MASK)
|
||||
type = IRQ_TYPE_LEVEL_HIGH;
|
||||
|
||||
parent_fwspec.fwnode = domain->parent->fwnode;
|
||||
parent_fwspec.param_count = 3;
|
||||
parent_fwspec.param[0] = 0;
|
||||
parent_fwspec.param[1] = map->hwirq;
|
||||
parent_fwspec.param[2] = type;
|
||||
|
||||
return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
|
||||
&parent_fwspec);
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops qcom_mpm_ops = {
|
||||
.alloc = qcom_mpm_alloc,
|
||||
.free = irq_domain_free_irqs_common,
|
||||
.translate = irq_domain_translate_twocell,
|
||||
};
|
||||
|
||||
/* Triggered by RPM when system resumes from deep sleep */
|
||||
static irqreturn_t qcom_mpm_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct qcom_mpm_priv *priv = dev_id;
|
||||
unsigned long enable, pending;
|
||||
irqreturn_t ret = IRQ_NONE;
|
||||
unsigned long flags;
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < priv->reg_stride; i++) {
|
||||
raw_spin_lock_irqsave(&priv->lock, flags);
|
||||
enable = qcom_mpm_read(priv, MPM_REG_ENABLE, i);
|
||||
pending = qcom_mpm_read(priv, MPM_REG_STATUS, i);
|
||||
pending &= enable;
|
||||
raw_spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
for_each_set_bit(j, &pending, 32) {
|
||||
unsigned int pin = 32 * i + j;
|
||||
struct irq_desc *desc = irq_resolve_mapping(priv->domain, pin);
|
||||
struct irq_data *d = &desc->irq_data;
|
||||
|
||||
if (!irqd_is_level_type(d))
|
||||
irq_set_irqchip_state(d->irq,
|
||||
IRQCHIP_STATE_PENDING, true);
|
||||
ret = IRQ_HANDLED;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mpm_pd_power_off(struct generic_pm_domain *genpd)
|
||||
{
|
||||
struct qcom_mpm_priv *priv = container_of(genpd, struct qcom_mpm_priv,
|
||||
genpd);
|
||||
int i, ret;
|
||||
|
||||
for (i = 0; i < priv->reg_stride; i++)
|
||||
qcom_mpm_write(priv, MPM_REG_STATUS, i, 0);
|
||||
|
||||
/* Notify RPM to write vMPM into HW */
|
||||
ret = mbox_send_message(priv->mbox_chan, NULL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool gic_hwirq_is_mapped(struct mpm_gic_map *maps, int cnt, u32 hwirq)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < cnt; i++)
|
||||
if (maps[i].hwirq == hwirq)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
|
||||
{
|
||||
struct platform_device *pdev = of_find_device_by_node(np);
|
||||
struct device *dev = &pdev->dev;
|
||||
struct irq_domain *parent_domain;
|
||||
struct generic_pm_domain *genpd;
|
||||
struct qcom_mpm_priv *priv;
|
||||
unsigned int pin_cnt;
|
||||
int i, irq;
|
||||
int ret;
|
||||
|
||||
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = of_property_read_u32(np, "qcom,mpm-pin-count", &pin_cnt);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to read qcom,mpm-pin-count: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
priv->reg_stride = DIV_ROUND_UP(pin_cnt, 32);
|
||||
|
||||
ret = of_property_count_u32_elems(np, "qcom,mpm-pin-map");
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to read qcom,mpm-pin-map: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (ret % 2) {
|
||||
dev_err(dev, "invalid qcom,mpm-pin-map\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
priv->map_cnt = ret / 2;
|
||||
priv->maps = devm_kcalloc(dev, priv->map_cnt, sizeof(*priv->maps),
|
||||
GFP_KERNEL);
|
||||
if (!priv->maps)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < priv->map_cnt; i++) {
|
||||
u32 pin, hwirq;
|
||||
|
||||
of_property_read_u32_index(np, "qcom,mpm-pin-map", i * 2, &pin);
|
||||
of_property_read_u32_index(np, "qcom,mpm-pin-map", i * 2 + 1, &hwirq);
|
||||
|
||||
if (gic_hwirq_is_mapped(priv->maps, i, hwirq)) {
|
||||
dev_warn(dev, "failed to map pin %d as GIC hwirq %d is already mapped\n",
|
||||
pin, hwirq);
|
||||
continue;
|
||||
}
|
||||
|
||||
priv->maps[i].pin = pin;
|
||||
priv->maps[i].hwirq = hwirq;
|
||||
}
|
||||
|
||||
raw_spin_lock_init(&priv->lock);
|
||||
|
||||
priv->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (!priv->base)
|
||||
return PTR_ERR(priv->base);
|
||||
|
||||
for (i = 0; i < priv->reg_stride; i++) {
|
||||
qcom_mpm_write(priv, MPM_REG_ENABLE, i, 0);
|
||||
qcom_mpm_write(priv, MPM_REG_FALLING_EDGE, i, 0);
|
||||
qcom_mpm_write(priv, MPM_REG_RISING_EDGE, i, 0);
|
||||
qcom_mpm_write(priv, MPM_REG_POLARITY, i, 0);
|
||||
qcom_mpm_write(priv, MPM_REG_STATUS, i, 0);
|
||||
}
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
genpd = &priv->genpd;
|
||||
genpd->flags = GENPD_FLAG_IRQ_SAFE;
|
||||
genpd->power_off = mpm_pd_power_off;
|
||||
|
||||
genpd->name = devm_kasprintf(dev, GFP_KERNEL, "%s", dev_name(dev));
|
||||
if (!genpd->name)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = pm_genpd_init(genpd, NULL, false);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to init genpd: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = of_genpd_add_provider_simple(np, genpd);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to add genpd provider: %d\n", ret);
|
||||
goto remove_genpd;
|
||||
}
|
||||
|
||||
priv->mbox_client.dev = dev;
|
||||
priv->mbox_chan = mbox_request_channel(&priv->mbox_client, 0);
|
||||
if (IS_ERR(priv->mbox_chan)) {
|
||||
ret = PTR_ERR(priv->mbox_chan);
|
||||
dev_err(dev, "failed to acquire IPC channel: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
parent_domain = irq_find_host(parent);
|
||||
if (!parent_domain) {
|
||||
dev_err(dev, "failed to find MPM parent domain\n");
|
||||
ret = -ENXIO;
|
||||
goto free_mbox;
|
||||
}
|
||||
|
||||
priv->domain = irq_domain_create_hierarchy(parent_domain,
|
||||
IRQ_DOMAIN_FLAG_QCOM_MPM_WAKEUP, pin_cnt,
|
||||
of_node_to_fwnode(np), &qcom_mpm_ops, priv);
|
||||
if (!priv->domain) {
|
||||
dev_err(dev, "failed to create MPM domain\n");
|
||||
ret = -ENOMEM;
|
||||
goto free_mbox;
|
||||
}
|
||||
|
||||
irq_domain_update_bus_token(priv->domain, DOMAIN_BUS_WAKEUP);
|
||||
|
||||
ret = devm_request_irq(dev, irq, qcom_mpm_handler, IRQF_NO_SUSPEND,
|
||||
"qcom_mpm", priv);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to request irq: %d\n", ret);
|
||||
goto remove_domain;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
remove_domain:
|
||||
irq_domain_remove(priv->domain);
|
||||
free_mbox:
|
||||
mbox_free_channel(priv->mbox_chan);
|
||||
remove_genpd:
|
||||
pm_genpd_remove(genpd);
|
||||
return ret;
|
||||
}
|
||||
|
||||
IRQCHIP_PLATFORM_DRIVER_BEGIN(qcom_mpm)
|
||||
IRQCHIP_MATCH("qcom,mpm", qcom_mpm_init)
|
||||
IRQCHIP_PLATFORM_DRIVER_END(qcom_mpm)
|
||||
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. MSM Power Manager");
|
||||
MODULE_LICENSE("GPL v2");
|
@ -508,7 +508,6 @@ static int intc_irqpin_probe(struct platform_device *pdev)
|
||||
|
||||
irq_chip = &p->irq_chip;
|
||||
irq_chip->name = "intc-irqpin";
|
||||
irq_chip->parent_device = dev;
|
||||
irq_chip->irq_mask = disable_fn;
|
||||
irq_chip->irq_unmask = enable_fn;
|
||||
irq_chip->irq_set_type = intc_irqpin_irq_set_type;
|
||||
@ -523,6 +522,8 @@ static int intc_irqpin_probe(struct platform_device *pdev)
|
||||
goto err0;
|
||||
}
|
||||
|
||||
irq_domain_set_pm_device(p->irq_domain, dev);
|
||||
|
||||
if (p->shared_irqs) {
|
||||
/* request one shared interrupt */
|
||||
if (devm_request_irq(dev, p->irq[0].requested_irq,
|
||||
|
@ -188,13 +188,14 @@ static int irqc_probe(struct platform_device *pdev)
|
||||
p->gc->reg_base = p->cpu_int_base;
|
||||
p->gc->chip_types[0].regs.enable = IRQC_EN_SET;
|
||||
p->gc->chip_types[0].regs.disable = IRQC_EN_STS;
|
||||
p->gc->chip_types[0].chip.parent_device = dev;
|
||||
p->gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
|
||||
p->gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
|
||||
p->gc->chip_types[0].chip.irq_set_type = irqc_irq_set_type;
|
||||
p->gc->chip_types[0].chip.irq_set_wake = irqc_irq_set_wake;
|
||||
p->gc->chip_types[0].chip.flags = IRQCHIP_MASK_ON_SUSPEND;
|
||||
|
||||
irq_domain_set_pm_device(p->irq_domain, dev);
|
||||
|
||||
/* request interrupts one by one */
|
||||
for (k = 0; k < p->number_of_irqs; k++) {
|
||||
if (devm_request_irq(dev, p->irq[k].requested_irq,
|
||||
|
@ -44,8 +44,8 @@
|
||||
* Each hart context has a vector of interrupt enable bits associated with it.
|
||||
* There's one bit for each interrupt source.
|
||||
*/
|
||||
#define ENABLE_BASE 0x2000
|
||||
#define ENABLE_PER_HART 0x80
|
||||
#define CONTEXT_ENABLE_BASE 0x2000
|
||||
#define CONTEXT_ENABLE_SIZE 0x80
|
||||
|
||||
/*
|
||||
* Each hart context has a set of control registers associated with it. Right
|
||||
@ -53,7 +53,7 @@
|
||||
* take an interrupt, and a register to claim interrupts.
|
||||
*/
|
||||
#define CONTEXT_BASE 0x200000
|
||||
#define CONTEXT_PER_HART 0x1000
|
||||
#define CONTEXT_SIZE 0x1000
|
||||
#define CONTEXT_THRESHOLD 0x00
|
||||
#define CONTEXT_CLAIM 0x04
|
||||
|
||||
@ -81,17 +81,21 @@ static int plic_parent_irq __ro_after_init;
|
||||
static bool plic_cpuhp_setup_done __ro_after_init;
|
||||
static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
|
||||
|
||||
static inline void plic_toggle(struct plic_handler *handler,
|
||||
int hwirq, int enable)
|
||||
static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable)
|
||||
{
|
||||
u32 __iomem *reg = handler->enable_base + (hwirq / 32) * sizeof(u32);
|
||||
u32 __iomem *reg = enable_base + (hwirq / 32) * sizeof(u32);
|
||||
u32 hwirq_mask = 1 << (hwirq % 32);
|
||||
|
||||
raw_spin_lock(&handler->enable_lock);
|
||||
if (enable)
|
||||
writel(readl(reg) | hwirq_mask, reg);
|
||||
else
|
||||
writel(readl(reg) & ~hwirq_mask, reg);
|
||||
}
|
||||
|
||||
static void plic_toggle(struct plic_handler *handler, int hwirq, int enable)
|
||||
{
|
||||
raw_spin_lock(&handler->enable_lock);
|
||||
__plic_toggle(handler->enable_base, hwirq, enable);
|
||||
raw_spin_unlock(&handler->enable_lock);
|
||||
}
|
||||
|
||||
@ -324,8 +328,18 @@ static int __init plic_init(struct device_node *node,
|
||||
* Skip contexts other than external interrupts for our
|
||||
* privilege level.
|
||||
*/
|
||||
if (parent.args[0] != RV_IRQ_EXT)
|
||||
if (parent.args[0] != RV_IRQ_EXT) {
|
||||
/* Disable S-mode enable bits if running in M-mode. */
|
||||
if (IS_ENABLED(CONFIG_RISCV_M_MODE)) {
|
||||
void __iomem *enable_base = priv->regs +
|
||||
CONTEXT_ENABLE_BASE +
|
||||
i * CONTEXT_ENABLE_SIZE;
|
||||
|
||||
for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
|
||||
__plic_toggle(enable_base, hwirq, 0);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
hartid = riscv_of_parent_hartid(parent.np);
|
||||
if (hartid < 0) {
|
||||
@ -361,11 +375,11 @@ static int __init plic_init(struct device_node *node,
|
||||
|
||||
cpumask_set_cpu(cpu, &priv->lmask);
|
||||
handler->present = true;
|
||||
handler->hart_base =
|
||||
priv->regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
|
||||
handler->hart_base = priv->regs + CONTEXT_BASE +
|
||||
i * CONTEXT_SIZE;
|
||||
raw_spin_lock_init(&handler->enable_lock);
|
||||
handler->enable_base =
|
||||
priv->regs + ENABLE_BASE + i * ENABLE_PER_HART;
|
||||
handler->enable_base = priv->regs + CONTEXT_ENABLE_BASE +
|
||||
i * CONTEXT_ENABLE_SIZE;
|
||||
handler->priv = priv;
|
||||
done:
|
||||
for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
|
||||
|
@ -214,6 +214,48 @@ static const struct stm32_desc_irq stm32mp1_desc_irq[] = {
|
||||
{ .exti = 73, .irq_parent = 129, .chip = &stm32_exti_h_chip },
|
||||
};
|
||||
|
||||
static const struct stm32_desc_irq stm32mp13_desc_irq[] = {
|
||||
{ .exti = 0, .irq_parent = 6, .chip = &stm32_exti_h_chip },
|
||||
{ .exti = 1, .irq_parent = 7, .chip = &stm32_exti_h_chip },
|
||||
{ .exti = 2, .irq_parent = 8, .chip = &stm32_exti_h_chip },
|
||||
{ .exti = 3, .irq_parent = 9, .chip = &stm32_exti_h_chip },
|
||||
{ .exti = 4, .irq_parent = 10, .chip = &stm32_exti_h_chip },
|
||||
{ .exti = 5, .irq_parent = 24, .chip = &stm32_exti_h_chip },
|
||||
{ .exti = 6, .irq_parent = 65, .chip = &stm32_exti_h_chip },
|
||||
{ .exti = 7, .irq_parent = 66, .chip = &stm32_exti_h_chip },
|
||||
{ .exti = 8, .irq_parent = 67, .chip = &stm32_exti_h_chip },
|
||||
{ .exti = 9, .irq_parent = 68, .chip = &stm32_exti_h_chip },
|
||||
{ .exti = 10, .irq_parent = 41, .chip = &stm32_exti_h_chip },
|
||||
{ .exti = 11, .irq_parent = 43, .chip = &stm32_exti_h_chip },
|
||||
{ .exti = 12, .irq_parent = 77, .chip = &stm32_exti_h_chip },
|
||||
{ .exti = 13, .irq_parent = 78, .chip = &stm32_exti_h_chip },
|
||||
{ .exti = 14, .irq_parent = 106, .chip = &stm32_exti_h_chip },
|
||||
{ .exti = 15, .irq_parent = 109, .chip = &stm32_exti_h_chip },
|
||||
{ .exti = 16, .irq_parent = 1, .chip = &stm32_exti_h_chip },
|
||||
{ .exti = 19, .irq_parent = 3, .chip = &stm32_exti_h_chip_direct },
|
||||
{ .exti = 21, .irq_parent = 32, .chip = &stm32_exti_h_chip_direct },
|
||||
{ .exti = 22, .irq_parent = 34, .chip = &stm32_exti_h_chip_direct },
|
||||
{ .exti = 23, .irq_parent = 73, .chip = &stm32_exti_h_chip_direct },
|
||||
{ .exti = 24, .irq_parent = 93, .chip = &stm32_exti_h_chip_direct },
|
||||
{ .exti = 25, .irq_parent = 114, .chip = &stm32_exti_h_chip_direct },
|
||||
{ .exti = 26, .irq_parent = 38, .chip = &stm32_exti_h_chip_direct },
|
||||
{ .exti = 27, .irq_parent = 39, .chip = &stm32_exti_h_chip_direct },
|
||||
{ .exti = 28, .irq_parent = 40, .chip = &stm32_exti_h_chip_direct },
|
||||
{ .exti = 29, .irq_parent = 72, .chip = &stm32_exti_h_chip_direct },
|
||||
{ .exti = 30, .irq_parent = 53, .chip = &stm32_exti_h_chip_direct },
|
||||
{ .exti = 31, .irq_parent = 54, .chip = &stm32_exti_h_chip_direct },
|
||||
{ .exti = 32, .irq_parent = 83, .chip = &stm32_exti_h_chip_direct },
|
||||
{ .exti = 33, .irq_parent = 84, .chip = &stm32_exti_h_chip_direct },
|
||||
{ .exti = 44, .irq_parent = 96, .chip = &stm32_exti_h_chip_direct },
|
||||
{ .exti = 47, .irq_parent = 92, .chip = &stm32_exti_h_chip_direct },
|
||||
{ .exti = 48, .irq_parent = 116, .chip = &stm32_exti_h_chip_direct },
|
||||
{ .exti = 50, .irq_parent = 117, .chip = &stm32_exti_h_chip_direct },
|
||||
{ .exti = 52, .irq_parent = 118, .chip = &stm32_exti_h_chip_direct },
|
||||
{ .exti = 53, .irq_parent = 119, .chip = &stm32_exti_h_chip_direct },
|
||||
{ .exti = 68, .irq_parent = 63, .chip = &stm32_exti_h_chip_direct },
|
||||
{ .exti = 70, .irq_parent = 98, .chip = &stm32_exti_h_chip_direct },
|
||||
};
|
||||
|
||||
static const struct stm32_exti_drv_data stm32mp1_drv_data = {
|
||||
.exti_banks = stm32mp1_exti_banks,
|
||||
.bank_nr = ARRAY_SIZE(stm32mp1_exti_banks),
|
||||
@ -221,6 +263,13 @@ static const struct stm32_exti_drv_data stm32mp1_drv_data = {
|
||||
.irq_nr = ARRAY_SIZE(stm32mp1_desc_irq),
|
||||
};
|
||||
|
||||
static const struct stm32_exti_drv_data stm32mp13_drv_data = {
|
||||
.exti_banks = stm32mp1_exti_banks,
|
||||
.bank_nr = ARRAY_SIZE(stm32mp1_exti_banks),
|
||||
.desc_irqs = stm32mp13_desc_irq,
|
||||
.irq_nr = ARRAY_SIZE(stm32mp13_desc_irq),
|
||||
};
|
||||
|
||||
static const struct
|
||||
stm32_desc_irq *stm32_exti_get_desc(const struct stm32_exti_drv_data *drv_data,
|
||||
irq_hw_number_t hwirq)
|
||||
@ -922,6 +971,7 @@ static int stm32_exti_probe(struct platform_device *pdev)
|
||||
/* platform driver only for MP1 */
|
||||
static const struct of_device_id stm32_exti_ids[] = {
|
||||
{ .compatible = "st,stm32mp1-exti", .data = &stm32mp1_drv_data},
|
||||
{ .compatible = "st,stm32mp13-exti", .data = &stm32mp13_drv_data},
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, stm32_exti_ids);
|
||||
|
@ -19,14 +19,15 @@
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#define IRQ_MASK 0x4
|
||||
#define IRQ_STATUS 0x8
|
||||
|
||||
struct ts4800_irq_data {
|
||||
void __iomem *base;
|
||||
struct platform_device *pdev;
|
||||
struct irq_domain *domain;
|
||||
struct irq_chip irq_chip;
|
||||
};
|
||||
|
||||
static void ts4800_irq_mask(struct irq_data *d)
|
||||
@ -47,12 +48,25 @@ static void ts4800_irq_unmask(struct irq_data *d)
|
||||
writew(reg & ~mask, data->base + IRQ_MASK);
|
||||
}
|
||||
|
||||
static void ts4800_irq_print_chip(struct irq_data *d, struct seq_file *p)
|
||||
{
|
||||
struct ts4800_irq_data *data = irq_data_get_irq_chip_data(d);
|
||||
|
||||
seq_printf(p, "%s", dev_name(&data->pdev->dev));
|
||||
}
|
||||
|
||||
static const struct irq_chip ts4800_chip = {
|
||||
.irq_mask = ts4800_irq_mask,
|
||||
.irq_unmask = ts4800_irq_unmask,
|
||||
.irq_print_chip = ts4800_irq_print_chip,
|
||||
};
|
||||
|
||||
static int ts4800_irqdomain_map(struct irq_domain *d, unsigned int irq,
|
||||
irq_hw_number_t hwirq)
|
||||
{
|
||||
struct ts4800_irq_data *data = d->host_data;
|
||||
|
||||
irq_set_chip_and_handler(irq, &data->irq_chip, handle_simple_irq);
|
||||
irq_set_chip_and_handler(irq, &ts4800_chip, handle_simple_irq);
|
||||
irq_set_chip_data(irq, data);
|
||||
irq_set_noprobe(irq);
|
||||
|
||||
@ -92,13 +106,13 @@ static int ts4800_ic_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device_node *node = pdev->dev.of_node;
|
||||
struct ts4800_irq_data *data;
|
||||
struct irq_chip *irq_chip;
|
||||
int parent_irq;
|
||||
|
||||
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
|
||||
if (!data)
|
||||
return -ENOMEM;
|
||||
|
||||
data->pdev = pdev;
|
||||
data->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(data->base))
|
||||
return PTR_ERR(data->base);
|
||||
@ -111,11 +125,6 @@ static int ts4800_ic_probe(struct platform_device *pdev)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
irq_chip = &data->irq_chip;
|
||||
irq_chip->name = dev_name(&pdev->dev);
|
||||
irq_chip->irq_mask = ts4800_irq_mask;
|
||||
irq_chip->irq_unmask = ts4800_irq_unmask;
|
||||
|
||||
data->domain = irq_domain_add_linear(node, 8, &ts4800_ic_ops, data);
|
||||
if (!data->domain) {
|
||||
dev_err(&pdev->dev, "cannot add IRQ domain\n");
|
||||
|
@ -7,12 +7,12 @@
|
||||
#include <linux/io.h>
|
||||
#include <linux/irqchip.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
#include <linux/irqchip/versatile-fpga.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#include <asm/exception.h>
|
||||
#include <asm/mach/irq.h>
|
||||
@ -34,14 +34,12 @@
|
||||
/**
|
||||
* struct fpga_irq_data - irq data container for the FPGA IRQ controller
|
||||
* @base: memory offset in virtual memory
|
||||
* @chip: chip container for this instance
|
||||
* @domain: IRQ domain for this instance
|
||||
* @valid: mask for valid IRQs on this controller
|
||||
* @used_irqs: number of active IRQs on this controller
|
||||
*/
|
||||
struct fpga_irq_data {
|
||||
void __iomem *base;
|
||||
struct irq_chip chip;
|
||||
u32 valid;
|
||||
struct irq_domain *domain;
|
||||
u8 used_irqs;
|
||||
@ -67,6 +65,20 @@ static void fpga_irq_unmask(struct irq_data *d)
|
||||
writel(mask, f->base + IRQ_ENABLE_SET);
|
||||
}
|
||||
|
||||
static void fpga_irq_print_chip(struct irq_data *d, struct seq_file *p)
|
||||
{
|
||||
struct fpga_irq_data *f = irq_data_get_irq_chip_data(d);
|
||||
|
||||
seq_printf(p, irq_domain_get_of_node(f->domain)->name);
|
||||
}
|
||||
|
||||
static const struct irq_chip fpga_chip = {
|
||||
.irq_ack = fpga_irq_mask,
|
||||
.irq_mask = fpga_irq_mask,
|
||||
.irq_unmask = fpga_irq_unmask,
|
||||
.irq_print_chip = fpga_irq_print_chip,
|
||||
};
|
||||
|
||||
static void fpga_irq_handle(struct irq_desc *desc)
|
||||
{
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
@ -116,7 +128,7 @@ static int handle_one_fpga(struct fpga_irq_data *f, struct pt_regs *regs)
|
||||
* Keep iterating over all registered FPGA IRQ controllers until there are
|
||||
* no pending interrupts.
|
||||
*/
|
||||
asmlinkage void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs)
|
||||
static asmlinkage void __exception_irq_entry fpga_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
int i, handled;
|
||||
|
||||
@ -135,8 +147,7 @@ static int fpga_irqdomain_map(struct irq_domain *d, unsigned int irq,
|
||||
if (!(f->valid & BIT(hwirq)))
|
||||
return -EPERM;
|
||||
irq_set_chip_data(irq, f);
|
||||
irq_set_chip_and_handler(irq, &f->chip,
|
||||
handle_level_irq);
|
||||
irq_set_chip_and_handler(irq, &fpga_chip, handle_level_irq);
|
||||
irq_set_probe(irq);
|
||||
return 0;
|
||||
}
|
||||
@ -146,8 +157,8 @@ static const struct irq_domain_ops fpga_irqdomain_ops = {
|
||||
.xlate = irq_domain_xlate_onetwocell,
|
||||
};
|
||||
|
||||
void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start,
|
||||
int parent_irq, u32 valid, struct device_node *node)
|
||||
static void __init fpga_irq_init(void __iomem *base, int parent_irq,
|
||||
u32 valid, struct device_node *node)
|
||||
{
|
||||
struct fpga_irq_data *f;
|
||||
int i;
|
||||
@ -158,10 +169,6 @@ void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start,
|
||||
}
|
||||
f = &fpga_irq_devices[fpga_irq_id];
|
||||
f->base = base;
|
||||
f->chip.name = name;
|
||||
f->chip.irq_ack = fpga_irq_mask;
|
||||
f->chip.irq_mask = fpga_irq_mask;
|
||||
f->chip.irq_unmask = fpga_irq_unmask;
|
||||
f->valid = valid;
|
||||
|
||||
if (parent_irq != -1) {
|
||||
@ -169,20 +176,19 @@ void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start,
|
||||
f);
|
||||
}
|
||||
|
||||
/* This will also allocate irq descriptors */
|
||||
f->domain = irq_domain_add_simple(node, fls(valid), irq_start,
|
||||
f->domain = irq_domain_add_linear(node, fls(valid),
|
||||
&fpga_irqdomain_ops, f);
|
||||
|
||||
/* This will allocate all valid descriptors in the linear case */
|
||||
for (i = 0; i < fls(valid); i++)
|
||||
if (valid & BIT(i)) {
|
||||
if (!irq_start)
|
||||
irq_create_mapping(f->domain, i);
|
||||
/* Is this still required? */
|
||||
irq_create_mapping(f->domain, i);
|
||||
f->used_irqs++;
|
||||
}
|
||||
|
||||
pr_info("FPGA IRQ chip %d \"%s\" @ %p, %u irqs",
|
||||
fpga_irq_id, name, base, f->used_irqs);
|
||||
fpga_irq_id, node->name, base, f->used_irqs);
|
||||
if (parent_irq != -1)
|
||||
pr_cont(", parent IRQ: %d\n", parent_irq);
|
||||
else
|
||||
@ -192,8 +198,8 @@ void __init fpga_irq_init(void __iomem *base, const char *name, int irq_start,
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
int __init fpga_irq_of_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
static int __init fpga_irq_of_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
void __iomem *base;
|
||||
u32 clear_mask;
|
||||
@ -222,7 +228,7 @@ int __init fpga_irq_of_init(struct device_node *node,
|
||||
parent_irq = -1;
|
||||
}
|
||||
|
||||
fpga_irq_init(base, node->name, 0, parent_irq, valid_mask, node);
|
||||
fpga_irq_init(base, parent_irq, valid_mask, node);
|
||||
|
||||
/*
|
||||
* On Versatile AB/PB, some secondary interrupts have a direct
|
||||
|
@ -32,6 +32,8 @@
|
||||
#define MER_ME (1<<0)
|
||||
#define MER_HIE (1<<1)
|
||||
|
||||
#define SPURIOUS_IRQ (-1U)
|
||||
|
||||
static DEFINE_STATIC_KEY_FALSE(xintc_is_be);
|
||||
|
||||
struct xintc_irq_chip {
|
||||
@ -110,20 +112,6 @@ static struct irq_chip intc_dev = {
|
||||
.irq_mask_ack = intc_mask_ack,
|
||||
};
|
||||
|
||||
unsigned int xintc_get_irq(void)
|
||||
{
|
||||
unsigned int irq = -1;
|
||||
u32 hwirq;
|
||||
|
||||
hwirq = xintc_read(primary_intc, IVR);
|
||||
if (hwirq != -1U)
|
||||
irq = irq_find_mapping(primary_intc->root_domain, hwirq);
|
||||
|
||||
pr_debug("irq-xilinx: hwirq=%d, irq=%d\n", hwirq, irq);
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
||||
static int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
|
||||
{
|
||||
struct xintc_irq_chip *irqc = d->host_data;
|
||||
@ -164,6 +152,19 @@ static void xil_intc_irq_handler(struct irq_desc *desc)
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
static void xil_intc_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
u32 hwirq;
|
||||
|
||||
do {
|
||||
hwirq = xintc_read(primary_intc, IVR);
|
||||
if (unlikely(hwirq == SPURIOUS_IRQ))
|
||||
break;
|
||||
|
||||
generic_handle_domain_irq(primary_intc->root_domain, hwirq);
|
||||
} while (true);
|
||||
}
|
||||
|
||||
static int __init xilinx_intc_of_init(struct device_node *intc,
|
||||
struct device_node *parent)
|
||||
{
|
||||
@ -233,6 +234,7 @@ static int __init xilinx_intc_of_init(struct device_node *intc,
|
||||
} else {
|
||||
primary_intc = irqc;
|
||||
irq_set_default_host(primary_intc->root_domain);
|
||||
set_handle_irq(xil_intc_handle_irq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -21,23 +21,19 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define PDC_MAX_IRQS 168
|
||||
#define PDC_MAX_GPIO_IRQS 256
|
||||
|
||||
#define CLEAR_INTR(reg, intr) (reg & ~(1 << intr))
|
||||
#define ENABLE_INTR(reg, intr) (reg | (1 << intr))
|
||||
|
||||
#define IRQ_ENABLE_BANK 0x10
|
||||
#define IRQ_i_CFG 0x110
|
||||
|
||||
#define PDC_NO_PARENT_IRQ ~0UL
|
||||
|
||||
struct pdc_pin_region {
|
||||
u32 pin_base;
|
||||
u32 parent_base;
|
||||
u32 cnt;
|
||||
};
|
||||
|
||||
#define pin_to_hwirq(r, p) ((r)->parent_base + (p) - (r)->pin_base)
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(pdc_lock);
|
||||
static void __iomem *pdc_base;
|
||||
static struct pdc_pin_region *pdc_region;
|
||||
@ -56,17 +52,18 @@ static u32 pdc_reg_read(int reg, u32 i)
|
||||
static void pdc_enable_intr(struct irq_data *d, bool on)
|
||||
{
|
||||
int pin_out = d->hwirq;
|
||||
unsigned long enable;
|
||||
unsigned long flags;
|
||||
u32 index, mask;
|
||||
u32 enable;
|
||||
|
||||
index = pin_out / 32;
|
||||
mask = pin_out % 32;
|
||||
|
||||
raw_spin_lock(&pdc_lock);
|
||||
raw_spin_lock_irqsave(&pdc_lock, flags);
|
||||
enable = pdc_reg_read(IRQ_ENABLE_BANK, index);
|
||||
enable = on ? ENABLE_INTR(enable, mask) : CLEAR_INTR(enable, mask);
|
||||
__assign_bit(mask, &enable, on);
|
||||
pdc_reg_write(IRQ_ENABLE_BANK, index, enable);
|
||||
raw_spin_unlock(&pdc_lock);
|
||||
raw_spin_unlock_irqrestore(&pdc_lock, flags);
|
||||
}
|
||||
|
||||
static void qcom_pdc_gic_disable(struct irq_data *d)
|
||||
@ -186,34 +183,17 @@ static struct irq_chip qcom_pdc_gic_chip = {
|
||||
.irq_set_affinity = irq_chip_set_affinity_parent,
|
||||
};
|
||||
|
||||
static irq_hw_number_t get_parent_hwirq(int pin)
|
||||
static struct pdc_pin_region *get_pin_region(int pin)
|
||||
{
|
||||
int i;
|
||||
struct pdc_pin_region *region;
|
||||
|
||||
for (i = 0; i < pdc_region_cnt; i++) {
|
||||
region = &pdc_region[i];
|
||||
if (pin >= region->pin_base &&
|
||||
pin < region->pin_base + region->cnt)
|
||||
return (region->parent_base + pin - region->pin_base);
|
||||
if (pin >= pdc_region[i].pin_base &&
|
||||
pin < pdc_region[i].pin_base + pdc_region[i].cnt)
|
||||
return &pdc_region[i];
|
||||
}
|
||||
|
||||
return PDC_NO_PARENT_IRQ;
|
||||
}
|
||||
|
||||
static int qcom_pdc_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
|
||||
unsigned long *hwirq, unsigned int *type)
|
||||
{
|
||||
if (is_of_node(fwspec->fwnode)) {
|
||||
if (fwspec->param_count != 2)
|
||||
return -EINVAL;
|
||||
|
||||
*hwirq = fwspec->param[0];
|
||||
*type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int qcom_pdc_alloc(struct irq_domain *domain, unsigned int virq,
|
||||
@ -221,55 +201,12 @@ static int qcom_pdc_alloc(struct irq_domain *domain, unsigned int virq,
|
||||
{
|
||||
struct irq_fwspec *fwspec = data;
|
||||
struct irq_fwspec parent_fwspec;
|
||||
irq_hw_number_t hwirq, parent_hwirq;
|
||||
struct pdc_pin_region *region;
|
||||
irq_hw_number_t hwirq;
|
||||
unsigned int type;
|
||||
int ret;
|
||||
|
||||
ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
|
||||
&qcom_pdc_gic_chip, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
parent_hwirq = get_parent_hwirq(hwirq);
|
||||
if (parent_hwirq == PDC_NO_PARENT_IRQ)
|
||||
return irq_domain_disconnect_hierarchy(domain->parent, virq);
|
||||
|
||||
if (type & IRQ_TYPE_EDGE_BOTH)
|
||||
type = IRQ_TYPE_EDGE_RISING;
|
||||
|
||||
if (type & IRQ_TYPE_LEVEL_MASK)
|
||||
type = IRQ_TYPE_LEVEL_HIGH;
|
||||
|
||||
parent_fwspec.fwnode = domain->parent->fwnode;
|
||||
parent_fwspec.param_count = 3;
|
||||
parent_fwspec.param[0] = 0;
|
||||
parent_fwspec.param[1] = parent_hwirq;
|
||||
parent_fwspec.param[2] = type;
|
||||
|
||||
return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
|
||||
&parent_fwspec);
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops qcom_pdc_ops = {
|
||||
.translate = qcom_pdc_translate,
|
||||
.alloc = qcom_pdc_alloc,
|
||||
.free = irq_domain_free_irqs_common,
|
||||
};
|
||||
|
||||
static int qcom_pdc_gpio_alloc(struct irq_domain *domain, unsigned int virq,
|
||||
unsigned int nr_irqs, void *data)
|
||||
{
|
||||
struct irq_fwspec *fwspec = data;
|
||||
struct irq_fwspec parent_fwspec;
|
||||
irq_hw_number_t hwirq, parent_hwirq;
|
||||
unsigned int type;
|
||||
int ret;
|
||||
|
||||
ret = qcom_pdc_translate(domain, fwspec, &hwirq, &type);
|
||||
ret = irq_domain_translate_twocell(domain, fwspec, &hwirq, &type);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -281,8 +218,8 @@ static int qcom_pdc_gpio_alloc(struct irq_domain *domain, unsigned int virq,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
parent_hwirq = get_parent_hwirq(hwirq);
|
||||
if (parent_hwirq == PDC_NO_PARENT_IRQ)
|
||||
region = get_pin_region(hwirq);
|
||||
if (!region)
|
||||
return irq_domain_disconnect_hierarchy(domain->parent, virq);
|
||||
|
||||
if (type & IRQ_TYPE_EDGE_BOTH)
|
||||
@ -294,23 +231,16 @@ static int qcom_pdc_gpio_alloc(struct irq_domain *domain, unsigned int virq,
|
||||
parent_fwspec.fwnode = domain->parent->fwnode;
|
||||
parent_fwspec.param_count = 3;
|
||||
parent_fwspec.param[0] = 0;
|
||||
parent_fwspec.param[1] = parent_hwirq;
|
||||
parent_fwspec.param[1] = pin_to_hwirq(region, hwirq);
|
||||
parent_fwspec.param[2] = type;
|
||||
|
||||
return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
|
||||
&parent_fwspec);
|
||||
}
|
||||
|
||||
static int qcom_pdc_gpio_domain_select(struct irq_domain *d,
|
||||
struct irq_fwspec *fwspec,
|
||||
enum irq_domain_bus_token bus_token)
|
||||
{
|
||||
return bus_token == DOMAIN_BUS_WAKEUP;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops qcom_pdc_gpio_ops = {
|
||||
.select = qcom_pdc_gpio_domain_select,
|
||||
.alloc = qcom_pdc_gpio_alloc,
|
||||
static const struct irq_domain_ops qcom_pdc_ops = {
|
||||
.translate = irq_domain_translate_twocell,
|
||||
.alloc = qcom_pdc_alloc,
|
||||
.free = irq_domain_free_irqs_common,
|
||||
};
|
||||
|
||||
@ -361,7 +291,7 @@ static int pdc_setup_pin_mapping(struct device_node *np)
|
||||
|
||||
static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
|
||||
{
|
||||
struct irq_domain *parent_domain, *pdc_domain, *pdc_gpio_domain;
|
||||
struct irq_domain *parent_domain, *pdc_domain;
|
||||
int ret;
|
||||
|
||||
pdc_base = of_iomap(node, 0);
|
||||
@ -383,32 +313,21 @@ static int qcom_pdc_init(struct device_node *node, struct device_node *parent)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
pdc_domain = irq_domain_create_hierarchy(parent_domain, 0, PDC_MAX_IRQS,
|
||||
of_fwnode_handle(node),
|
||||
&qcom_pdc_ops, NULL);
|
||||
pdc_domain = irq_domain_create_hierarchy(parent_domain,
|
||||
IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP,
|
||||
PDC_MAX_GPIO_IRQS,
|
||||
of_fwnode_handle(node),
|
||||
&qcom_pdc_ops, NULL);
|
||||
if (!pdc_domain) {
|
||||
pr_err("%pOF: GIC domain add failed\n", node);
|
||||
pr_err("%pOF: PDC domain add failed\n", node);
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
pdc_gpio_domain = irq_domain_create_hierarchy(parent_domain,
|
||||
IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP,
|
||||
PDC_MAX_GPIO_IRQS,
|
||||
of_fwnode_handle(node),
|
||||
&qcom_pdc_gpio_ops, NULL);
|
||||
if (!pdc_gpio_domain) {
|
||||
pr_err("%pOF: PDC domain add failed for GPIO domain\n", node);
|
||||
ret = -ENOMEM;
|
||||
goto remove;
|
||||
}
|
||||
|
||||
irq_domain_update_bus_token(pdc_gpio_domain, DOMAIN_BUS_WAKEUP);
|
||||
irq_domain_update_bus_token(pdc_domain, DOMAIN_BUS_WAKEUP);
|
||||
|
||||
return 0;
|
||||
|
||||
remove:
|
||||
irq_domain_remove(pdc_domain);
|
||||
fail:
|
||||
kfree(pdc_region);
|
||||
iounmap(pdc_base);
|
||||
|
@ -219,7 +219,7 @@ static int apple_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
||||
if (hwirq < 0)
|
||||
return -ENOSPC;
|
||||
|
||||
fwspec.param[1] += hwirq;
|
||||
fwspec.param[fwspec.param_count - 2] += hwirq;
|
||||
|
||||
ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &fwspec);
|
||||
if (ret)
|
||||
|
@ -78,7 +78,6 @@ struct npcm7xx_gpio {
|
||||
struct gpio_chip gc;
|
||||
int irqbase;
|
||||
int irq;
|
||||
void *priv;
|
||||
struct irq_chip irq_chip;
|
||||
u32 pinctrl_id;
|
||||
int (*direction_input)(struct gpio_chip *chip, unsigned offset);
|
||||
@ -226,7 +225,7 @@ static void npcmgpio_irq_handler(struct irq_desc *desc)
|
||||
chained_irq_enter(chip, desc);
|
||||
sts = ioread32(bank->base + NPCM7XX_GP_N_EVST);
|
||||
en = ioread32(bank->base + NPCM7XX_GP_N_EVEN);
|
||||
dev_dbg(chip->parent_device, "==> got irq sts %.8x %.8x\n", sts,
|
||||
dev_dbg(bank->gc.parent, "==> got irq sts %.8x %.8x\n", sts,
|
||||
en);
|
||||
|
||||
sts &= en;
|
||||
@ -241,33 +240,33 @@ static int npcmgpio_set_irq_type(struct irq_data *d, unsigned int type)
|
||||
gpiochip_get_data(irq_data_get_irq_chip_data(d));
|
||||
unsigned int gpio = BIT(d->hwirq);
|
||||
|
||||
dev_dbg(d->chip->parent_device, "setirqtype: %u.%u = %u\n", gpio,
|
||||
dev_dbg(bank->gc.parent, "setirqtype: %u.%u = %u\n", gpio,
|
||||
d->irq, type);
|
||||
switch (type) {
|
||||
case IRQ_TYPE_EDGE_RISING:
|
||||
dev_dbg(d->chip->parent_device, "edge.rising\n");
|
||||
dev_dbg(bank->gc.parent, "edge.rising\n");
|
||||
npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio);
|
||||
npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
|
||||
break;
|
||||
case IRQ_TYPE_EDGE_FALLING:
|
||||
dev_dbg(d->chip->parent_device, "edge.falling\n");
|
||||
dev_dbg(bank->gc.parent, "edge.falling\n");
|
||||
npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio);
|
||||
npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
|
||||
break;
|
||||
case IRQ_TYPE_EDGE_BOTH:
|
||||
dev_dbg(d->chip->parent_device, "edge.both\n");
|
||||
dev_dbg(bank->gc.parent, "edge.both\n");
|
||||
npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_EVBE, gpio);
|
||||
break;
|
||||
case IRQ_TYPE_LEVEL_LOW:
|
||||
dev_dbg(d->chip->parent_device, "level.low\n");
|
||||
dev_dbg(bank->gc.parent, "level.low\n");
|
||||
npcm_gpio_set(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
|
||||
break;
|
||||
case IRQ_TYPE_LEVEL_HIGH:
|
||||
dev_dbg(d->chip->parent_device, "level.high\n");
|
||||
dev_dbg(bank->gc.parent, "level.high\n");
|
||||
npcm_gpio_clr(&bank->gc, bank->base + NPCM7XX_GP_N_POL, gpio);
|
||||
break;
|
||||
default:
|
||||
dev_dbg(d->chip->parent_device, "invalid irq type\n");
|
||||
dev_dbg(bank->gc.parent, "invalid irq type\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -289,7 +288,7 @@ static void npcmgpio_irq_ack(struct irq_data *d)
|
||||
gpiochip_get_data(irq_data_get_irq_chip_data(d));
|
||||
unsigned int gpio = d->hwirq;
|
||||
|
||||
dev_dbg(d->chip->parent_device, "irq_ack: %u.%u\n", gpio, d->irq);
|
||||
dev_dbg(bank->gc.parent, "irq_ack: %u.%u\n", gpio, d->irq);
|
||||
iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVST);
|
||||
}
|
||||
|
||||
@ -301,7 +300,7 @@ static void npcmgpio_irq_mask(struct irq_data *d)
|
||||
unsigned int gpio = d->hwirq;
|
||||
|
||||
/* Clear events */
|
||||
dev_dbg(d->chip->parent_device, "irq_mask: %u.%u\n", gpio, d->irq);
|
||||
dev_dbg(bank->gc.parent, "irq_mask: %u.%u\n", gpio, d->irq);
|
||||
iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVENC);
|
||||
}
|
||||
|
||||
@ -313,7 +312,7 @@ static void npcmgpio_irq_unmask(struct irq_data *d)
|
||||
unsigned int gpio = d->hwirq;
|
||||
|
||||
/* Enable events */
|
||||
dev_dbg(d->chip->parent_device, "irq_unmask: %u.%u\n", gpio, d->irq);
|
||||
dev_dbg(bank->gc.parent, "irq_unmask: %u.%u\n", gpio, d->irq);
|
||||
iowrite32(BIT(gpio), bank->base + NPCM7XX_GP_N_EVENS);
|
||||
}
|
||||
|
||||
@ -323,7 +322,7 @@ static unsigned int npcmgpio_irq_startup(struct irq_data *d)
|
||||
unsigned int gpio = d->hwirq;
|
||||
|
||||
/* active-high, input, clear interrupt, enable interrupt */
|
||||
dev_dbg(d->chip->parent_device, "startup: %u.%u\n", gpio, d->irq);
|
||||
dev_dbg(gc->parent, "startup: %u.%u\n", gpio, d->irq);
|
||||
npcmgpio_direction_input(gc, gpio);
|
||||
npcmgpio_irq_ack(d);
|
||||
npcmgpio_irq_unmask(d);
|
||||
|
@ -1307,7 +1307,6 @@ static int starfive_probe(struct platform_device *pdev)
|
||||
sfp->gc.base = -1;
|
||||
sfp->gc.ngpio = NR_GPIOS;
|
||||
|
||||
starfive_irq_chip.parent_device = dev;
|
||||
starfive_irq_chip.name = sfp->gc.label;
|
||||
|
||||
sfp->gc.irq.chip = &starfive_irq_chip;
|
||||
@ -1330,6 +1329,8 @@ static int starfive_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "could not register gpiochip\n");
|
||||
|
||||
irq_domain_set_pm_device(sfp->gc.irq.domain, dev);
|
||||
|
||||
out_pinctrl_enable:
|
||||
return pinctrl_enable(sfp->pctl);
|
||||
}
|
||||
|
@ -11,5 +11,7 @@
|
||||
#define AIC_TMR_HV_VIRT 1
|
||||
#define AIC_TMR_GUEST_PHYS 2
|
||||
#define AIC_TMR_GUEST_VIRT 3
|
||||
#define AIC_CPU_PMU_E 4
|
||||
#define AIC_CPU_PMU_P 5
|
||||
|
||||
#endif
|
||||
|
@ -456,7 +456,6 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
|
||||
/**
|
||||
* struct irq_chip - hardware interrupt chip descriptor
|
||||
*
|
||||
* @parent_device: pointer to parent device for irqchip
|
||||
* @name: name for /proc/interrupts
|
||||
* @irq_startup: start up the interrupt (defaults to ->enable if NULL)
|
||||
* @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL)
|
||||
@ -503,7 +502,6 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
|
||||
* @flags: chip specific flags
|
||||
*/
|
||||
struct irq_chip {
|
||||
struct device *parent_device;
|
||||
const char *name;
|
||||
unsigned int (*irq_startup)(struct irq_data *data);
|
||||
void (*irq_shutdown)(struct irq_data *data);
|
||||
@ -712,10 +710,11 @@ extern struct irq_chip no_irq_chip;
|
||||
extern struct irq_chip dummy_irq_chip;
|
||||
|
||||
extern void
|
||||
irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
|
||||
irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip,
|
||||
irq_flow_handler_t handle, const char *name);
|
||||
|
||||
static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *chip,
|
||||
static inline void irq_set_chip_and_handler(unsigned int irq,
|
||||
const struct irq_chip *chip,
|
||||
irq_flow_handler_t handle)
|
||||
{
|
||||
irq_set_chip_and_handler_name(irq, chip, handle, NULL);
|
||||
@ -805,7 +804,7 @@ static inline void irq_set_percpu_devid_flags(unsigned int irq)
|
||||
}
|
||||
|
||||
/* Set/get chip/data for an IRQ: */
|
||||
extern int irq_set_chip(unsigned int irq, struct irq_chip *chip);
|
||||
extern int irq_set_chip(unsigned int irq, const struct irq_chip *chip);
|
||||
extern int irq_set_handler_data(unsigned int irq, void *data);
|
||||
extern int irq_set_chip_data(unsigned int irq, void *data);
|
||||
extern int irq_set_irq_type(unsigned int irq, unsigned int type);
|
||||
|
@ -1,14 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef PLAT_FPGA_IRQ_H
|
||||
#define PLAT_FPGA_IRQ_H
|
||||
|
||||
struct device_node;
|
||||
struct pt_regs;
|
||||
|
||||
void fpga_handle_irq(struct pt_regs *regs);
|
||||
void fpga_irq_init(void __iomem *, const char *, int, int, u32,
|
||||
struct device_node *node);
|
||||
int fpga_irq_of_init(struct device_node *node,
|
||||
struct device_node *parent);
|
||||
|
||||
#endif
|
@ -151,6 +151,8 @@ struct irq_domain_chip_generic;
|
||||
* @gc: Pointer to a list of generic chips. There is a helper function for
|
||||
* setting up one or more generic chips for interrupt controllers
|
||||
* drivers using the generic chip library which uses this pointer.
|
||||
* @dev: Pointer to a device that the domain represent, and that will be
|
||||
* used for power management purposes.
|
||||
* @parent: Pointer to parent irq_domain to support hierarchy irq_domains
|
||||
*
|
||||
* Revmap data, used internally by irq_domain
|
||||
@ -171,6 +173,7 @@ struct irq_domain {
|
||||
struct fwnode_handle *fwnode;
|
||||
enum irq_domain_bus_token bus_token;
|
||||
struct irq_domain_chip_generic *gc;
|
||||
struct device *dev;
|
||||
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
|
||||
struct irq_domain *parent;
|
||||
#endif
|
||||
@ -226,6 +229,13 @@ static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d)
|
||||
return to_of_node(d->fwnode);
|
||||
}
|
||||
|
||||
static inline void irq_domain_set_pm_device(struct irq_domain *d,
|
||||
struct device *dev)
|
||||
{
|
||||
if (d)
|
||||
d->dev = dev;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_IRQ_DOMAIN
|
||||
struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
|
||||
const char *name, phys_addr_t *pa);
|
||||
@ -469,7 +479,8 @@ int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest);
|
||||
extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
|
||||
unsigned int virq);
|
||||
extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
|
||||
irq_hw_number_t hwirq, struct irq_chip *chip,
|
||||
irq_hw_number_t hwirq,
|
||||
const struct irq_chip *chip,
|
||||
void *chip_data, irq_flow_handler_t handler,
|
||||
void *handler_data, const char *handler_name);
|
||||
extern void irq_domain_reset_irq_data(struct irq_data *irq_data);
|
||||
@ -512,7 +523,7 @@ extern int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
|
||||
extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain,
|
||||
unsigned int virq,
|
||||
irq_hw_number_t hwirq,
|
||||
struct irq_chip *chip,
|
||||
const struct irq_chip *chip,
|
||||
void *chip_data);
|
||||
extern void irq_domain_free_irqs_common(struct irq_domain *domain,
|
||||
unsigned int virq,
|
||||
|
@ -38,7 +38,7 @@ struct irqaction chained_action = {
|
||||
* @irq: irq number
|
||||
* @chip: pointer to irq chip description structure
|
||||
*/
|
||||
int irq_set_chip(unsigned int irq, struct irq_chip *chip)
|
||||
int irq_set_chip(unsigned int irq, const struct irq_chip *chip)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
|
||||
@ -46,10 +46,7 @@ int irq_set_chip(unsigned int irq, struct irq_chip *chip)
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
if (!chip)
|
||||
chip = &no_irq_chip;
|
||||
|
||||
desc->irq_data.chip = chip;
|
||||
desc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip);
|
||||
irq_put_desc_unlock(desc, flags);
|
||||
/*
|
||||
* For !CONFIG_SPARSE_IRQ make the irq show up in
|
||||
@ -1073,7 +1070,7 @@ irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
|
||||
EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
|
||||
|
||||
void
|
||||
irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
|
||||
irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip,
|
||||
irq_flow_handler_t handle, const char *name)
|
||||
{
|
||||
irq_set_chip(irq, chip);
|
||||
@ -1558,6 +1555,14 @@ int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct device *irq_get_parent_device(struct irq_data *data)
|
||||
{
|
||||
if (data->domain)
|
||||
return data->domain->dev;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_chip_pm_get - Enable power for an IRQ chip
|
||||
* @data: Pointer to interrupt specific data
|
||||
@ -1567,12 +1572,13 @@ int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
*/
|
||||
int irq_chip_pm_get(struct irq_data *data)
|
||||
{
|
||||
struct device *dev = irq_get_parent_device(data);
|
||||
int retval;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device) {
|
||||
retval = pm_runtime_get_sync(data->chip->parent_device);
|
||||
if (IS_ENABLED(CONFIG_PM) && dev) {
|
||||
retval = pm_runtime_get_sync(dev);
|
||||
if (retval < 0) {
|
||||
pm_runtime_put_noidle(data->chip->parent_device);
|
||||
pm_runtime_put_noidle(dev);
|
||||
return retval;
|
||||
}
|
||||
}
|
||||
@ -1590,10 +1596,11 @@ int irq_chip_pm_get(struct irq_data *data)
|
||||
*/
|
||||
int irq_chip_pm_put(struct irq_data *data)
|
||||
{
|
||||
struct device *dev = irq_get_parent_device(data);
|
||||
int retval = 0;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PM) && data->chip->parent_device)
|
||||
retval = pm_runtime_put(data->chip->parent_device);
|
||||
if (IS_ENABLED(CONFIG_PM) && dev)
|
||||
retval = pm_runtime_put(dev);
|
||||
|
||||
return (retval < 0) ? retval : 0;
|
||||
}
|
||||
|
@ -69,8 +69,12 @@ irq_debug_show_chip(struct seq_file *m, struct irq_data *data, int ind)
|
||||
seq_printf(m, "chip: None\n");
|
||||
return;
|
||||
}
|
||||
seq_printf(m, "%*schip: %s\n", ind, "", chip->name);
|
||||
seq_printf(m, "%*sflags: 0x%lx\n", ind + 1, "", chip->flags);
|
||||
seq_printf(m, "%*schip: ", ind, "");
|
||||
if (chip->irq_print_chip)
|
||||
chip->irq_print_chip(data, m);
|
||||
else
|
||||
seq_printf(m, "%s", chip->name);
|
||||
seq_printf(m, "\n%*sflags: 0x%lx\n", ind + 1, "", chip->flags);
|
||||
irq_debug_show_bits(m, ind, chip->flags, irqchip_flags,
|
||||
ARRAY_SIZE(irqchip_flags));
|
||||
}
|
||||
|
@ -1319,7 +1319,8 @@ EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
|
||||
* @chip_data: The associated chip data
|
||||
*/
|
||||
int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq,
|
||||
irq_hw_number_t hwirq, struct irq_chip *chip,
|
||||
irq_hw_number_t hwirq,
|
||||
const struct irq_chip *chip,
|
||||
void *chip_data)
|
||||
{
|
||||
struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
|
||||
@ -1328,7 +1329,7 @@ int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq,
|
||||
return -ENOENT;
|
||||
|
||||
irq_data->hwirq = hwirq;
|
||||
irq_data->chip = chip ? chip : &no_irq_chip;
|
||||
irq_data->chip = (struct irq_chip *)(chip ? chip : &no_irq_chip);
|
||||
irq_data->chip_data = chip_data;
|
||||
|
||||
return 0;
|
||||
@ -1347,7 +1348,7 @@ EXPORT_SYMBOL_GPL(irq_domain_set_hwirq_and_chip);
|
||||
* @handler_name: The interrupt handler name
|
||||
*/
|
||||
void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
|
||||
irq_hw_number_t hwirq, struct irq_chip *chip,
|
||||
irq_hw_number_t hwirq, const struct irq_chip *chip,
|
||||
void *chip_data, irq_flow_handler_t handler,
|
||||
void *handler_data, const char *handler_name)
|
||||
{
|
||||
@ -1853,7 +1854,7 @@ EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
|
||||
* @handler_name: The interrupt handler name
|
||||
*/
|
||||
void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
|
||||
irq_hw_number_t hwirq, struct irq_chip *chip,
|
||||
irq_hw_number_t hwirq, const struct irq_chip *chip,
|
||||
void *chip_data, irq_flow_handler_t handler,
|
||||
void *handler_data, const char *handler_name)
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user