Merge branch 'merge' into next
This commit is contained in:
		
						commit
						ea4e89afed
					
				| @ -288,13 +288,6 @@ label##_hv:								\ | ||||
| /* Exception addition: Hard disable interrupts */ | ||||
| #define DISABLE_INTS	SOFT_DISABLE_INTS(r10,r11) | ||||
| 
 | ||||
| /* Exception addition: Keep interrupt state */ | ||||
| #define ENABLE_INTS				\ | ||||
| 	ld	r11,PACAKMSR(r13);		\ | ||||
| 	ld	r12,_MSR(r1);			\ | ||||
| 	rlwimi	r11,r12,0,MSR_EE;		\ | ||||
| 	mtmsrd	r11,1 | ||||
| 
 | ||||
| #define ADD_NVGPRS				\ | ||||
| 	bl	.save_nvgprs | ||||
| 
 | ||||
|  | ||||
| @ -18,10 +18,6 @@ | ||||
| #include <linux/atomic.h> | ||||
| 
 | ||||
| 
 | ||||
| /* Define a way to iterate across irqs. */ | ||||
| #define for_each_irq(i) \ | ||||
| 	for ((i) = 0; (i) < NR_IRQS; ++(i)) | ||||
| 
 | ||||
| extern atomic_t ppc_n_lost_interrupts; | ||||
| 
 | ||||
| /* This number is used when no interrupt has been assigned */ | ||||
|  | ||||
| @ -763,16 +763,6 @@ do_work: | ||||
| 	SOFT_DISABLE_INTS(r3,r4) | ||||
| 1:	bl	.preempt_schedule_irq | ||||
| 
 | ||||
| 	/* Hard-disable interrupts again (and update PACA) */ | ||||
| #ifdef CONFIG_PPC_BOOK3E | ||||
| 	wrteei	0 | ||||
| #else | ||||
| 	ld	r10,PACAKMSR(r13) /* Get kernel MSR without EE */ | ||||
| 	mtmsrd	r10,1 | ||||
| #endif /* CONFIG_PPC_BOOK3E */ | ||||
| 	li	r0,PACA_IRQ_HARD_DIS | ||||
| 	stb	r0,PACAIRQHAPPENED(r13) | ||||
| 
 | ||||
| 	/* Re-test flags and eventually loop */ | ||||
| 	clrrdi	r9,r1,THREAD_SHIFT | ||||
| 	ld	r4,TI_FLAGS(r9) | ||||
| @ -783,14 +773,6 @@ do_work: | ||||
| user_work: | ||||
| #endif /* CONFIG_PREEMPT */ | ||||
| 
 | ||||
| 	/* Enable interrupts */ | ||||
| #ifdef CONFIG_PPC_BOOK3E | ||||
| 	wrteei	1 | ||||
| #else | ||||
| 	ori	r10,r10,MSR_EE | ||||
| 	mtmsrd	r10,1 | ||||
| #endif /* CONFIG_PPC_BOOK3E */ | ||||
| 
 | ||||
| 	andi.	r0,r4,_TIF_NEED_RESCHED | ||||
| 	beq	1f | ||||
| 	bl	.restore_interrupts | ||||
|  | ||||
| @ -764,8 +764,8 @@ alignment_common: | ||||
| 	std	r3,_DAR(r1) | ||||
| 	std	r4,_DSISR(r1) | ||||
| 	bl	.save_nvgprs | ||||
| 	DISABLE_INTS | ||||
| 	addi	r3,r1,STACK_FRAME_OVERHEAD | ||||
| 	ENABLE_INTS | ||||
| 	bl	.alignment_exception | ||||
| 	b	.ret_from_except | ||||
| 
 | ||||
|  | ||||
| @ -260,11 +260,17 @@ EXPORT_SYMBOL(arch_local_irq_restore); | ||||
|  * if they are currently disabled. This is typically called before | ||||
|  * schedule() or do_signal() when returning to userspace. We do it | ||||
|  * in C to avoid the burden of dealing with lockdep etc... | ||||
|  * | ||||
|  * NOTE: This is called with interrupts hard disabled but not marked | ||||
|  * as such in paca->irq_happened, so we need to resync this. | ||||
|  */ | ||||
| void restore_interrupts(void) | ||||
| { | ||||
| 	if (irqs_disabled()) | ||||
| 	if (irqs_disabled()) { | ||||
| 		local_paca->irq_happened |= PACA_IRQ_HARD_DIS; | ||||
| 		local_irq_enable(); | ||||
| 	} else | ||||
| 		__hard_irq_enable(); | ||||
| } | ||||
| 
 | ||||
| #endif /* CONFIG_PPC64 */ | ||||
| @ -330,14 +336,10 @@ void migrate_irqs(void) | ||||
| 
 | ||||
| 	alloc_cpumask_var(&mask, GFP_KERNEL); | ||||
| 
 | ||||
| 	for_each_irq(irq) { | ||||
| 	for_each_irq_desc(irq, desc) { | ||||
| 		struct irq_data *data; | ||||
| 		struct irq_chip *chip; | ||||
| 
 | ||||
| 		desc = irq_to_desc(irq); | ||||
| 		if (!desc) | ||||
| 			continue; | ||||
| 
 | ||||
| 		data = irq_desc_get_irq_data(desc); | ||||
| 		if (irqd_is_per_cpu(data)) | ||||
| 			continue; | ||||
|  | ||||
| @ -23,14 +23,11 @@ | ||||
| 
 | ||||
| void machine_kexec_mask_interrupts(void) { | ||||
| 	unsigned int i; | ||||
| 	struct irq_desc *desc; | ||||
| 
 | ||||
| 	for_each_irq(i) { | ||||
| 		struct irq_desc *desc = irq_to_desc(i); | ||||
| 	for_each_irq_desc(i, desc) { | ||||
| 		struct irq_chip *chip; | ||||
| 
 | ||||
| 		if (!desc) | ||||
| 			continue; | ||||
| 
 | ||||
| 		chip = irq_desc_get_chip(desc); | ||||
| 		if (!chip) | ||||
| 			continue; | ||||
|  | ||||
| @ -248,7 +248,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) | ||||
| 				   addr, regs->nip, regs->link, code); | ||||
| 	} | ||||
| 
 | ||||
| 	if (!arch_irq_disabled_regs(regs)) | ||||
| 	if (arch_irqs_disabled() && !arch_irq_disabled_regs(regs)) | ||||
| 		local_irq_enable(); | ||||
| 
 | ||||
| 	memset(&info, 0, sizeof(info)); | ||||
| @ -1019,7 +1019,9 @@ void __kprobes program_check_exception(struct pt_regs *regs) | ||||
| 		return; | ||||
| 	} | ||||
| 
 | ||||
| 	local_irq_enable(); | ||||
| 	/* We restore the interrupt state now */ | ||||
| 	if (!arch_irq_disabled_regs(regs)) | ||||
| 		local_irq_enable(); | ||||
| 
 | ||||
| #ifdef CONFIG_MATH_EMULATION | ||||
| 	/* (reason & REASON_ILLEGAL) would be the obvious thing here,
 | ||||
| @ -1069,6 +1071,10 @@ void alignment_exception(struct pt_regs *regs) | ||||
| { | ||||
| 	int sig, code, fixed = 0; | ||||
| 
 | ||||
| 	/* We restore the interrupt state now */ | ||||
| 	if (!arch_irq_disabled_regs(regs)) | ||||
| 		local_irq_enable(); | ||||
| 
 | ||||
| 	/* we don't implement logging of alignment exceptions */ | ||||
| 	if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) | ||||
| 		fixed = fix_alignment(regs); | ||||
|  | ||||
| @ -114,7 +114,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) | ||||
| 		pr_devel("axon_msi: woff %x roff %x msi %x\n", | ||||
| 			  write_offset, msic->read_offset, msi); | ||||
| 
 | ||||
| 		if (msi < NR_IRQS && irq_get_chip_data(msi) == msic) { | ||||
| 		if (msi < nr_irqs && irq_get_chip_data(msi) == msic) { | ||||
| 			generic_handle_irq(msi); | ||||
| 			msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); | ||||
| 		} else { | ||||
| @ -276,9 +276,6 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | ||||
| 	if (rc) | ||||
| 		return rc; | ||||
| 
 | ||||
| 	/* We rely on being able to stash a virq in a u16 */ | ||||
| 	BUILD_BUG_ON(NR_IRQS > 65536); | ||||
| 
 | ||||
| 	list_for_each_entry(entry, &dev->msi_list, list) { | ||||
| 		virq = irq_create_direct_mapping(msic->irq_domain); | ||||
| 		if (virq == NO_IRQ) { | ||||
| @ -392,7 +389,8 @@ static int axon_msi_probe(struct platform_device *device) | ||||
| 	} | ||||
| 	memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); | ||||
| 
 | ||||
| 	msic->irq_domain = irq_domain_add_nomap(dn, 0, &msic_host_ops, msic); | ||||
| 	/* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */ | ||||
| 	msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic); | ||||
| 	if (!msic->irq_domain) { | ||||
| 		printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n", | ||||
| 		       dn->full_name); | ||||
|  | ||||
| @ -248,6 +248,6 @@ void beatic_deinit_IRQ(void) | ||||
| { | ||||
| 	int	i; | ||||
| 
 | ||||
| 	for (i = 1; i < NR_IRQS; i++) | ||||
| 	for (i = 1; i < nr_irqs; i++) | ||||
| 		beat_destruct_irq_plug(i); | ||||
| } | ||||
|  | ||||
| @ -57,9 +57,9 @@ static int max_real_irqs; | ||||
| 
 | ||||
| static DEFINE_RAW_SPINLOCK(pmac_pic_lock); | ||||
| 
 | ||||
| #define NR_MASK_WORDS	((NR_IRQS + 31) / 32) | ||||
| static unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; | ||||
| static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; | ||||
| /* The max irq number this driver deals with is 128; see max_irqs */ | ||||
| static DECLARE_BITMAP(ppc_lost_interrupts, 128); | ||||
| static DECLARE_BITMAP(ppc_cached_irq_mask, 128); | ||||
| static int pmac_irq_cascade = -1; | ||||
| static struct irq_domain *pmac_pic_host; | ||||
| 
 | ||||
|  | ||||
| @ -30,9 +30,9 @@ config PPC_SPLPAR | ||||
| 	  two or more partitions. | ||||
| 
 | ||||
| config EEH | ||||
| 	bool "PCI Extended Error Handling (EEH)" if EXPERT | ||||
| 	bool | ||||
| 	depends on PPC_PSERIES && PCI | ||||
| 	default y if !EXPERT | ||||
| 	default y | ||||
| 
 | ||||
| config PSERIES_MSI | ||||
|        bool | ||||
|  | ||||
| @ -51,8 +51,7 @@ | ||||
| static intctl_cpm2_t __iomem *cpm2_intctl; | ||||
| 
 | ||||
| static struct irq_domain *cpm2_pic_host; | ||||
| #define NR_MASK_WORDS   ((NR_IRQS + 31) / 32) | ||||
| static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; | ||||
| static unsigned long ppc_cached_irq_mask[2]; /* 2 32-bit registers */ | ||||
| 
 | ||||
| static const u_char irq_to_siureg[] = { | ||||
| 	1, 1, 1, 1, 1, 1, 1, 1, | ||||
|  | ||||
| @ -18,69 +18,45 @@ | ||||
| extern int cpm_get_irq(struct pt_regs *regs); | ||||
| 
 | ||||
| static struct irq_domain *mpc8xx_pic_host; | ||||
| #define NR_MASK_WORDS   ((NR_IRQS + 31) / 32) | ||||
| static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; | ||||
| static unsigned long mpc8xx_cached_irq_mask; | ||||
| static sysconf8xx_t __iomem *siu_reg; | ||||
| 
 | ||||
| int cpm_get_irq(struct pt_regs *regs); | ||||
| static inline unsigned long mpc8xx_irqd_to_bit(struct irq_data *d) | ||||
| { | ||||
| 	return 0x80000000 >> irqd_to_hwirq(d); | ||||
| } | ||||
| 
 | ||||
| static void mpc8xx_unmask_irq(struct irq_data *d) | ||||
| { | ||||
| 	int	bit, word; | ||||
| 	unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); | ||||
| 
 | ||||
| 	bit = irq_nr & 0x1f; | ||||
| 	word = irq_nr >> 5; | ||||
| 
 | ||||
| 	ppc_cached_irq_mask[word] |= (1 << (31-bit)); | ||||
| 	out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); | ||||
| 	mpc8xx_cached_irq_mask |= mpc8xx_irqd_to_bit(d); | ||||
| 	out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask); | ||||
| } | ||||
| 
 | ||||
| static void mpc8xx_mask_irq(struct irq_data *d) | ||||
| { | ||||
| 	int	bit, word; | ||||
| 	unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); | ||||
| 
 | ||||
| 	bit = irq_nr & 0x1f; | ||||
| 	word = irq_nr >> 5; | ||||
| 
 | ||||
| 	ppc_cached_irq_mask[word] &= ~(1 << (31-bit)); | ||||
| 	out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); | ||||
| 	mpc8xx_cached_irq_mask &= ~mpc8xx_irqd_to_bit(d); | ||||
| 	out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask); | ||||
| } | ||||
| 
 | ||||
| static void mpc8xx_ack(struct irq_data *d) | ||||
| { | ||||
| 	int	bit; | ||||
| 	unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); | ||||
| 
 | ||||
| 	bit = irq_nr & 0x1f; | ||||
| 	out_be32(&siu_reg->sc_sipend, 1 << (31-bit)); | ||||
| 	out_be32(&siu_reg->sc_sipend, mpc8xx_irqd_to_bit(d)); | ||||
| } | ||||
| 
 | ||||
| static void mpc8xx_end_irq(struct irq_data *d) | ||||
| { | ||||
| 	int bit, word; | ||||
| 	unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); | ||||
| 
 | ||||
| 	bit = irq_nr & 0x1f; | ||||
| 	word = irq_nr >> 5; | ||||
| 
 | ||||
| 	ppc_cached_irq_mask[word] |= (1 << (31-bit)); | ||||
| 	out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); | ||||
| 	mpc8xx_cached_irq_mask |= mpc8xx_irqd_to_bit(d); | ||||
| 	out_be32(&siu_reg->sc_simask, mpc8xx_cached_irq_mask); | ||||
| } | ||||
| 
 | ||||
| static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type) | ||||
| { | ||||
| 	if (flow_type & IRQ_TYPE_EDGE_FALLING) { | ||||
| 		irq_hw_number_t hw = (unsigned int)irqd_to_hwirq(d); | ||||
| 	/* only external IRQ senses are programmable */ | ||||
| 	if ((flow_type & IRQ_TYPE_EDGE_FALLING) && !(irqd_to_hwirq(d) & 1)) { | ||||
| 		unsigned int siel = in_be32(&siu_reg->sc_siel); | ||||
| 
 | ||||
| 		/* only external IRQ senses are programmable */ | ||||
| 		if ((hw & 1) == 0) { | ||||
| 			siel |= (0x80000000 >> hw); | ||||
| 			out_be32(&siu_reg->sc_siel, siel); | ||||
| 			__irq_set_handler_locked(d->irq, handle_edge_irq); | ||||
| 		} | ||||
| 		siel |= mpc8xx_irqd_to_bit(d); | ||||
| 		out_be32(&siu_reg->sc_siel, siel); | ||||
| 		__irq_set_handler_locked(d->irq, handle_edge_irq); | ||||
| 	} | ||||
| 	return 0; | ||||
| } | ||||
| @ -132,6 +108,9 @@ static int mpc8xx_pic_host_xlate(struct irq_domain *h, struct device_node *ct, | ||||
| 		IRQ_TYPE_EDGE_FALLING, | ||||
| 	}; | ||||
| 
 | ||||
| 	if (intspec[0] > 0x1f) | ||||
| 		return 0; | ||||
| 
 | ||||
| 	*out_hwirq = intspec[0]; | ||||
| 	if (intsize > 1 && intspec[1] < 4) | ||||
| 		*out_flags = map_pic_senses[intspec[1]]; | ||||
|  | ||||
| @ -188,6 +188,7 @@ void xics_migrate_irqs_away(void) | ||||
| { | ||||
| 	int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); | ||||
| 	unsigned int irq, virq; | ||||
| 	struct irq_desc *desc; | ||||
| 
 | ||||
| 	/* If we used to be the default server, move to the new "boot_cpuid" */ | ||||
| 	if (hw_cpu == xics_default_server) | ||||
| @ -202,8 +203,7 @@ void xics_migrate_irqs_away(void) | ||||
| 	/* Allow IPIs again... */ | ||||
| 	icp_ops->set_priority(DEFAULT_PRIORITY); | ||||
| 
 | ||||
| 	for_each_irq(virq) { | ||||
| 		struct irq_desc *desc; | ||||
| 	for_each_irq_desc(virq, desc) { | ||||
| 		struct irq_chip *chip; | ||||
| 		long server; | ||||
| 		unsigned long flags; | ||||
| @ -212,9 +212,8 @@ void xics_migrate_irqs_away(void) | ||||
| 		/* We can't set affinity on ISA interrupts */ | ||||
| 		if (virq < NUM_ISA_INTERRUPTS) | ||||
| 			continue; | ||||
| 		desc = irq_to_desc(virq); | ||||
| 		/* We only need to migrate enabled IRQS */ | ||||
| 		if (!desc || !desc->action) | ||||
| 		if (!desc->action) | ||||
| 			continue; | ||||
| 		if (desc->irq_data.domain != xics_host) | ||||
| 			continue; | ||||
|  | ||||
| @ -469,7 +469,7 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id) | ||||
| 	tty = NULL; | ||||
| 	if (r3 & (CHAEXT | CHATxIP | CHARxIP)) { | ||||
| 		if (!ZS_IS_OPEN(uap_a)) { | ||||
| 			pmz_debug("ChanA interrupt while open !\n"); | ||||
| 			pmz_debug("ChanA interrupt while not open !\n"); | ||||
| 			goto skip_a; | ||||
| 		} | ||||
| 		write_zsreg(uap_a, R0, RES_H_IUS); | ||||
| @ -493,8 +493,8 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id) | ||||
| 	spin_lock(&uap_b->port.lock); | ||||
| 	tty = NULL; | ||||
| 	if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) { | ||||
| 		if (!ZS_IS_OPEN(uap_a)) { | ||||
| 			pmz_debug("ChanB interrupt while open !\n"); | ||||
| 		if (!ZS_IS_OPEN(uap_b)) { | ||||
| 			pmz_debug("ChanB interrupt while not open !\n"); | ||||
| 			goto skip_b; | ||||
| 		} | ||||
| 		write_zsreg(uap_b, R0, RES_H_IUS); | ||||
|  | ||||
		Loading…
	
		Reference in New Issue
	
	Block a user