forked from Minki/linux
Merge branch 'master' of ssh://master.kernel.org/pub/scm/linux/kernel/git/travis/linux-2.6-cpus4096-for-ingo into cpus4096
This commit is contained in:
commit
e46d51787e
@ -55,7 +55,7 @@ int irq_select_affinity(unsigned int irq)
|
||||
cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
|
||||
last_cpu = cpu;
|
||||
|
||||
irq_desc[irq].affinity = cpumask_of_cpu(cpu);
|
||||
cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
|
||||
irq_desc[irq].chip->set_affinity(irq, cpumask_of(cpu));
|
||||
return 0;
|
||||
}
|
||||
|
@ -104,6 +104,11 @@ static struct irq_desc bad_irq_desc = {
|
||||
.lock = SPIN_LOCK_UNLOCKED
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
/* We are not allocating bad_irq_desc.affinity or .pending_mask */
|
||||
#error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK."
|
||||
#endif
|
||||
|
||||
/*
|
||||
* do_IRQ handles all hardware IRQ's. Decoded IRQs should not
|
||||
* come via this function. Instead, they should provide their
|
||||
@ -161,7 +166,7 @@ void __init init_IRQ(void)
|
||||
irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
bad_irq_desc.affinity = CPU_MASK_ALL;
|
||||
cpumask_setall(bad_irq_desc.affinity);
|
||||
bad_irq_desc.cpu = smp_processor_id();
|
||||
#endif
|
||||
init_arch_irq();
|
||||
@ -191,15 +196,16 @@ void migrate_irqs(void)
|
||||
struct irq_desc *desc = irq_desc + i;
|
||||
|
||||
if (desc->cpu == cpu) {
|
||||
unsigned int newcpu = any_online_cpu(desc->affinity);
|
||||
|
||||
if (newcpu == NR_CPUS) {
|
||||
unsigned int newcpu = cpumask_any_and(desc->affinity,
|
||||
cpu_online_mask);
|
||||
if (newcpu >= nr_cpu_ids) {
|
||||
if (printk_ratelimit())
|
||||
printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n",
|
||||
i, cpu);
|
||||
|
||||
cpus_setall(desc->affinity);
|
||||
newcpu = any_online_cpu(desc->affinity);
|
||||
cpumask_setall(desc->affinity);
|
||||
newcpu = cpumask_any_and(desc->affinity,
|
||||
cpu_online_mask);
|
||||
}
|
||||
|
||||
route_irq(desc, i, newcpu);
|
||||
|
@ -263,7 +263,7 @@ static void em_route_irq(int irq, unsigned int cpu)
|
||||
const struct cpumask *mask = cpumask_of(cpu);
|
||||
|
||||
spin_lock_irq(&desc->lock);
|
||||
desc->affinity = *mask;
|
||||
cpumask_copy(desc->affinity, mask);
|
||||
desc->chip->set_affinity(irq, mask);
|
||||
spin_unlock_irq(&desc->lock);
|
||||
}
|
||||
|
@ -69,6 +69,11 @@ static struct irq_desc bad_irq_desc = {
|
||||
#endif
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CPUMASK_OFFSTACK
|
||||
/* We are not allocating a variable-sized bad_irq_desc.affinity */
|
||||
#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
|
||||
#endif
|
||||
|
||||
int show_interrupts(struct seq_file *p, void *v)
|
||||
{
|
||||
int i = *(loff_t *) v, j;
|
||||
|
@ -880,7 +880,7 @@ iosapic_unregister_intr (unsigned int gsi)
|
||||
if (iosapic_intr_info[irq].count == 0) {
|
||||
#ifdef CONFIG_SMP
|
||||
/* Clear affinity */
|
||||
cpus_setall(idesc->affinity);
|
||||
cpumask_setall(idesc->affinity);
|
||||
#endif
|
||||
/* Clear the interrupt information */
|
||||
iosapic_intr_info[irq].dest = 0;
|
||||
|
@ -103,7 +103,7 @@ static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 };
|
||||
void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
|
||||
{
|
||||
if (irq < NR_IRQS) {
|
||||
cpumask_copy(&irq_desc[irq].affinity,
|
||||
cpumask_copy(irq_desc[irq].affinity,
|
||||
cpumask_of(cpu_logical_id(hwid)));
|
||||
irq_redir[irq] = (char) (redir & 0xff);
|
||||
}
|
||||
@ -148,7 +148,7 @@ static void migrate_irqs(void)
|
||||
if (desc->status == IRQ_PER_CPU)
|
||||
continue;
|
||||
|
||||
if (cpumask_any_and(&irq_desc[irq].affinity, cpu_online_mask)
|
||||
if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask)
|
||||
>= nr_cpu_ids) {
|
||||
/*
|
||||
* Save it for phase 2 processing
|
||||
|
@ -75,7 +75,7 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
|
||||
msg.data = data;
|
||||
|
||||
write_msi_msg(irq, &msg);
|
||||
irq_desc[irq].affinity = cpumask_of_cpu(cpu);
|
||||
cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
@ -187,7 +187,7 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
msg.address_lo |= MSI_ADDR_DESTID_CPU(cpu_physical_id(cpu));
|
||||
|
||||
dmar_msi_write(irq, &msg);
|
||||
irq_desc[irq].affinity = *mask;
|
||||
cpumask_copy(irq_desc[irq].affinity, mask);
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
|
@ -205,7 +205,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
|
||||
msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff);
|
||||
|
||||
write_msi_msg(irq, &msg);
|
||||
irq_desc[irq].affinity = *cpu_mask;
|
||||
cpumask_copy(irq_desc[irq].affinity, cpu_mask);
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
|
@ -66,7 +66,7 @@ extern void smtc_forward_irq(unsigned int irq);
|
||||
*/
|
||||
#define IRQ_AFFINITY_HOOK(irq) \
|
||||
do { \
|
||||
if (!cpu_isset(smp_processor_id(), irq_desc[irq].affinity)) { \
|
||||
if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity)) {\
|
||||
smtc_forward_irq(irq); \
|
||||
irq_exit(); \
|
||||
return; \
|
||||
|
@ -187,7 +187,7 @@ static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||
set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
|
||||
|
||||
}
|
||||
irq_desc[irq].affinity = *cpumask;
|
||||
cpumask_copy(irq_desc[irq].affinity, cpumask);
|
||||
spin_unlock_irqrestore(&gic_lock, flags);
|
||||
|
||||
}
|
||||
|
@ -686,7 +686,7 @@ void smtc_forward_irq(unsigned int irq)
|
||||
* and efficiency, we just pick the easiest one to find.
|
||||
*/
|
||||
|
||||
target = first_cpu(irq_desc[irq].affinity);
|
||||
target = cpumask_first(irq_desc[irq].affinity);
|
||||
|
||||
/*
|
||||
* We depend on the platform code to have correctly processed
|
||||
|
@ -116,7 +116,7 @@ struct plat_smp_ops msmtc_smp_ops = {
|
||||
|
||||
void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
|
||||
{
|
||||
cpumask_t tmask = *affinity;
|
||||
cpumask_t tmask;
|
||||
int cpu = 0;
|
||||
void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
|
||||
|
||||
@ -139,11 +139,12 @@ void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
|
||||
* be made to forward to an offline "CPU".
|
||||
*/
|
||||
|
||||
cpumask_copy(&tmask, affinity);
|
||||
for_each_cpu(cpu, affinity) {
|
||||
if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
|
||||
cpu_clear(cpu, tmask);
|
||||
}
|
||||
irq_desc[irq].affinity = tmask;
|
||||
cpumask_copy(irq_desc[irq].affinity, &tmask);
|
||||
|
||||
if (cpus_empty(tmask))
|
||||
/*
|
||||
|
@ -120,7 +120,7 @@ int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
|
||||
if (CHECK_IRQ_PER_CPU(irq)) {
|
||||
/* Bad linux design decision. The mask has already
|
||||
* been set; we must reset it */
|
||||
irq_desc[irq].affinity = CPU_MASK_ALL;
|
||||
cpumask_setall(irq_desc[irq].affinity);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -136,7 +136,7 @@ static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
|
||||
if (cpu_check_affinity(irq, dest))
|
||||
return;
|
||||
|
||||
irq_desc[irq].affinity = *dest;
|
||||
cpumask_copy(irq_desc[irq].affinity, dest);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -295,7 +295,7 @@ int txn_alloc_irq(unsigned int bits_wide)
|
||||
unsigned long txn_affinity_addr(unsigned int irq, int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
irq_desc[irq].affinity = cpumask_of_cpu(cpu);
|
||||
cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
|
||||
#endif
|
||||
|
||||
return per_cpu(cpu_data, cpu).txn_addr;
|
||||
@ -352,7 +352,7 @@ void do_cpu_irq_mask(struct pt_regs *regs)
|
||||
irq = eirr_to_irq(eirr_val);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
dest = irq_desc[irq].affinity;
|
||||
cpumask_copy(&dest, irq_desc[irq].affinity);
|
||||
if (CHECK_IRQ_PER_CPU(irq_desc[irq].status) &&
|
||||
!cpu_isset(smp_processor_id(), dest)) {
|
||||
int cpu = first_cpu(dest);
|
||||
|
@ -231,7 +231,7 @@ void fixup_irqs(cpumask_t map)
|
||||
if (irq_desc[irq].status & IRQ_PER_CPU)
|
||||
continue;
|
||||
|
||||
cpus_and(mask, irq_desc[irq].affinity, map);
|
||||
cpumask_and(&mask, irq_desc[irq].affinity, &map);
|
||||
if (any_online_cpu(mask) == NR_CPUS) {
|
||||
printk("Breaking affinity for irq %i\n", irq);
|
||||
mask = map;
|
||||
|
@ -153,9 +153,10 @@ static int get_irq_server(unsigned int virq, unsigned int strict_check)
|
||||
{
|
||||
int server;
|
||||
/* For the moment only implement delivery to all cpus or one cpu */
|
||||
cpumask_t cpumask = irq_desc[virq].affinity;
|
||||
cpumask_t cpumask;
|
||||
cpumask_t tmp = CPU_MASK_NONE;
|
||||
|
||||
cpumask_copy(&cpumask, irq_desc[virq].affinity);
|
||||
if (!distribute_irqs)
|
||||
return default_server;
|
||||
|
||||
@ -869,7 +870,7 @@ void xics_migrate_irqs_away(void)
|
||||
virq, cpu);
|
||||
|
||||
/* Reset affinity to all cpus */
|
||||
irq_desc[virq].affinity = CPU_MASK_ALL;
|
||||
cpumask_setall(irq_desc[virq].affinity);
|
||||
desc->chip->set_affinity(virq, cpu_all_mask);
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
@ -566,9 +566,10 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic)
|
||||
#ifdef CONFIG_SMP
|
||||
static int irq_choose_cpu(unsigned int virt_irq)
|
||||
{
|
||||
cpumask_t mask = irq_desc[virt_irq].affinity;
|
||||
cpumask_t mask;
|
||||
int cpuid;
|
||||
|
||||
cpumask_copy(&mask, irq_desc[virt_irq].affinity);
|
||||
if (cpus_equal(mask, CPU_MASK_ALL)) {
|
||||
static int irq_rover;
|
||||
static DEFINE_SPINLOCK(irq_rover_lock);
|
||||
|
@ -247,9 +247,10 @@ struct irq_handler_data {
|
||||
#ifdef CONFIG_SMP
|
||||
static int irq_choose_cpu(unsigned int virt_irq)
|
||||
{
|
||||
cpumask_t mask = irq_desc[virt_irq].affinity;
|
||||
cpumask_t mask;
|
||||
int cpuid;
|
||||
|
||||
cpumask_copy(&mask, irq_desc[virt_irq].affinity);
|
||||
if (cpus_equal(mask, CPU_MASK_ALL)) {
|
||||
static int irq_rover;
|
||||
static DEFINE_SPINLOCK(irq_rover_lock);
|
||||
@ -854,7 +855,7 @@ void fixup_irqs(void)
|
||||
!(irq_desc[irq].status & IRQ_PER_CPU)) {
|
||||
if (irq_desc[irq].chip->set_affinity)
|
||||
irq_desc[irq].chip->set_affinity(irq,
|
||||
&irq_desc[irq].affinity);
|
||||
irq_desc[irq].affinity);
|
||||
}
|
||||
spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
|
||||
}
|
||||
|
@ -115,14 +115,11 @@
|
||||
# endif
|
||||
#else
|
||||
|
||||
/* defined as a macro so nr_irqs = max_nr_irqs(nr_cpu_ids) can be used */
|
||||
# define max_nr_irqs(nr_cpus) \
|
||||
((8 * nr_cpus) > (32 * MAX_IO_APICS) ? \
|
||||
# define NR_IRQS \
|
||||
((8 * NR_CPUS) > (32 * MAX_IO_APICS) ? \
|
||||
(NR_VECTORS + (8 * NR_CPUS)) : \
|
||||
(NR_VECTORS + (32 * MAX_IO_APICS))) \
|
||||
|
||||
# define NR_IRQS max_nr_irqs(NR_CPUS)
|
||||
|
||||
#endif
|
||||
|
||||
#elif defined(CONFIG_X86_VOYAGER)
|
||||
|
@ -3850,6 +3850,22 @@ void __init probe_nr_irqs_gsi(void)
|
||||
nr_irqs_gsi = nr;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
int __init arch_probe_nr_irqs(void)
|
||||
{
|
||||
int nr;
|
||||
|
||||
nr = ((8 * nr_cpu_ids) > (32 * nr_ioapics) ?
|
||||
(NR_VECTORS + (8 * nr_cpu_ids)) :
|
||||
(NR_VECTORS + (32 * nr_ioapics)));
|
||||
|
||||
if (nr < nr_irqs && nr > nr_irqs_gsi)
|
||||
nr_irqs = nr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
ACPI-based IOAPIC Configuration
|
||||
-------------------------------------------------------------------------- */
|
||||
|
@ -467,6 +467,7 @@ int show_interrupts(struct seq_file *p, void *v);
|
||||
struct irq_desc;
|
||||
|
||||
extern int early_irq_init(void);
|
||||
extern int arch_probe_nr_irqs(void);
|
||||
extern int arch_early_irq_init(void);
|
||||
extern int arch_init_chip_data(struct irq_desc *desc, int cpu);
|
||||
|
||||
|
@ -59,10 +59,6 @@ EXPORT_SYMBOL_GPL(nr_irqs);
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
|
||||
#ifndef max_nr_irqs
|
||||
#define max_nr_irqs(nr_cpus) NR_IRQS
|
||||
#endif
|
||||
|
||||
static struct irq_desc irq_desc_init = {
|
||||
.irq = -1,
|
||||
.status = IRQ_DISABLED,
|
||||
@ -137,9 +133,8 @@ int __init early_irq_init(void)
|
||||
int legacy_count;
|
||||
int i;
|
||||
|
||||
/* initialize nr_irqs based on nr_cpu_ids */
|
||||
nr_irqs = max_nr_irqs(nr_cpu_ids);
|
||||
|
||||
/* initialize nr_irqs based on nr_cpu_ids */
|
||||
arch_probe_nr_irqs();
|
||||
printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs);
|
||||
|
||||
desc = irq_desc_legacy;
|
||||
|
@ -795,6 +795,11 @@ int __init __weak early_irq_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __init __weak arch_probe_nr_irqs(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __init __weak arch_early_irq_init(void)
|
||||
{
|
||||
return 0;
|
||||
|
Loading…
Reference in New Issue
Block a user