linux/arch/arm/kernel/irq.c
Russell King f81309067f ARM: move heavy barrier support out of line
The existing memory barrier macro causes a significant amount of code
to be inserted inline at every call site.  For example, in
gpio_set_irq_type(), we have this for mb():

c0344c08:       f57ff04e        dsb     st
c0344c0c:       e59f8190        ldr     r8, [pc, #400]  ; c0344da4 <gpio_set_irq_type+0x230>
c0344c10:       e3590004        cmp     r9, #4
c0344c14:       e5983014        ldr     r3, [r8, #20]
c0344c18:       0a000054        beq     c0344d70 <gpio_set_irq_type+0x1fc>
c0344c1c:       e3530000        cmp     r3, #0
c0344c20:       0a000004        beq     c0344c38 <gpio_set_irq_type+0xc4>
c0344c24:       e50b2030        str     r2, [fp, #-48]  ; 0xffffffd0
c0344c28:       e50bc034        str     ip, [fp, #-52]  ; 0xffffffcc
c0344c2c:       e12fff33        blx     r3
c0344c30:       e51bc034        ldr     ip, [fp, #-52]  ; 0xffffffcc
c0344c34:       e51b2030        ldr     r2, [fp, #-48]  ; 0xffffffd0
c0344c38:       e5963004        ldr     r3, [r6, #4]

Moving the outer_cache_sync() call out of line reduces the impact of
the barrier:

c0344968:       f57ff04e        dsb     st
c034496c:       e35a0004        cmp     sl, #4
c0344970:       e50b2030        str     r2, [fp, #-48]  ; 0xffffffd0
c0344974:       0a000044        beq     c0344a8c <gpio_set_irq_type+0x1b8>
c0344978:       ebf363dd        bl      c001d8f4 <arm_heavy_mb>
c034497c:       e5953004        ldr     r3, [r5, #4]

This should reduce the cache footprint of this code.  Overall, this
results in a reduction of around 20K in the kernel size:

    text    data      bss      dec     hex filename
10773970  667392 10369656 21811018 14ccf4a ../build/imx6/vmlinux-old
10754219  667392 10369656 21791267 14c8223 ../build/imx6/vmlinux-new

Another advantage to this approach is that we can finally resolve the
issue of SoCs which have their own memory barrier requirements within
multiplatform kernels (such as OMAP.)  Here, the bus interconnects
need additional handling to ensure that writes become visible in the
correct order (eg, between dma_map() operations, writes to DMA
coherent memory, and MMIO accesses.)

Acked-by: Tony Lindgren <tony@atomide.com>
Acked-by: Richard Woodruff <r-woodruff2@ti.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2015-07-25 15:28:05 +01:00

200 lines
5.2 KiB
C

/*
* linux/arch/arm/kernel/irq.c
*
* Copyright (C) 1992 Linus Torvalds
* Modifications for ARM processor Copyright (C) 1995-2000 Russell King.
*
* Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation.
* Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and
* Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This file contains the code used by various IRQ handling routines:
* asking for different IRQ's should be done through these routines
* instead of just grabbing them. Thus setups with different IRQ numbers
* shouldn't result in any weird surprises, and installing new handlers
* should be easier.
*
* IRQ's are in fact implemented a bit like signal handlers for the kernel.
* Naturally it's not a 1:1 relation, but there are similarities.
*/
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/random.h>
#include <linux/smp.h>
#include <linux/init.h>
#include <linux/seq_file.h>
#include <linux/ratelimit.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <linux/kallsyms.h>
#include <linux/proc_fs.h>
#include <linux/export.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/outercache.h>
#include <asm/exception.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
unsigned long irq_err_count;
int arch_show_interrupts(struct seq_file *p, int prec)
{
#ifdef CONFIG_FIQ
show_fiq_list(p, prec);
#endif
#ifdef CONFIG_SMP
show_ipi_list(p, prec);
#endif
seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
return 0;
}
/*
* handle_IRQ handles all hardware IRQ's. Decoded IRQs should
* not come via this function. Instead, they should provide their
* own 'handler'. Used by platform code implementing C-based 1st
* level decoding.
*/
void handle_IRQ(unsigned int irq, struct pt_regs *regs)
{
__handle_domain_irq(NULL, irq, false, regs);
}
/*
* asm_do_IRQ is the interface to be used from assembly code.
*/
asmlinkage void __exception_irq_entry
asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
{
handle_IRQ(irq, regs);
}
void set_irq_flags(unsigned int irq, unsigned int iflags)
{
unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
if (irq >= nr_irqs) {
pr_err("Trying to set irq flags for IRQ%d\n", irq);
return;
}
if (iflags & IRQF_VALID)
clr |= IRQ_NOREQUEST;
if (iflags & IRQF_PROBE)
clr |= IRQ_NOPROBE;
if (!(iflags & IRQF_NOAUTOEN))
clr |= IRQ_NOAUTOEN;
/* Order is clear bits in "clr" then set bits in "set" */
irq_modify_status(irq, clr, set & ~clr);
}
EXPORT_SYMBOL_GPL(set_irq_flags);
void __init init_IRQ(void)
{
int ret;
if (IS_ENABLED(CONFIG_OF) && !machine_desc->init_irq)
irqchip_init();
else
machine_desc->init_irq();
if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_CACHE_L2X0) &&
(machine_desc->l2c_aux_mask || machine_desc->l2c_aux_val)) {
if (!outer_cache.write_sec)
outer_cache.write_sec = machine_desc->l2c_write_sec;
ret = l2x0_of_init(machine_desc->l2c_aux_val,
machine_desc->l2c_aux_mask);
if (ret)
pr_err("L2C: failed to init: %d\n", ret);
}
}
#ifdef CONFIG_MULTI_IRQ_HANDLER
void __init set_handle_irq(void (*handle_irq)(struct pt_regs *))
{
if (handle_arch_irq)
return;
handle_arch_irq = handle_irq;
}
#endif
#ifdef CONFIG_SPARSE_IRQ
int __init arch_probe_nr_irqs(void)
{
nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS;
return nr_irqs;
}
#endif
#ifdef CONFIG_HOTPLUG_CPU
static bool migrate_one_irq(struct irq_desc *desc)
{
struct irq_data *d = irq_desc_get_irq_data(desc);
const struct cpumask *affinity = d->affinity;
struct irq_chip *c;
bool ret = false;
/*
* If this is a per-CPU interrupt, or the affinity does not
* include this CPU, then we have nothing to do.
*/
if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
return false;
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
affinity = cpu_online_mask;
ret = true;
}
c = irq_data_get_irq_chip(d);
if (!c->irq_set_affinity)
pr_debug("IRQ%u: unable to set affinity\n", d->irq);
else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
cpumask_copy(d->affinity, affinity);
return ret;
}
/*
* The current CPU has been marked offline. Migrate IRQs off this CPU.
* If the affinity settings do not allow other CPUs, force them onto any
* available CPU.
*
* Note: we must iterate over all IRQs, whether they have an attached
* action structure or not, as we need to get chained interrupts too.
*/
void migrate_irqs(void)
{
unsigned int i;
struct irq_desc *desc;
unsigned long flags;
local_irq_save(flags);
for_each_irq_desc(i, desc) {
bool affinity_broken;
raw_spin_lock(&desc->lock);
affinity_broken = migrate_one_irq(desc);
raw_spin_unlock(&desc->lock);
if (affinity_broken)
pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
i, smp_processor_id());
}
local_irq_restore(flags);
}
#endif /* CONFIG_HOTPLUG_CPU */