linux/arch/arc/include/asm/irqflags.h
Vineet Gupta 4788a5942b ARC: Support for high priority interrupts in the in-core intc
There is a bit of hack/kludge right now where we disable preemption if a
L2 (High prio) IRQ is taken while L1 (Low prio) is active.

Need to revisit this

Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
2013-02-15 23:16:01 +05:30

154 lines
2.9 KiB
C

/*
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARC_IRQFLAGS_H
#define __ASM_ARC_IRQFLAGS_H
/* vineetg: March 2010 : local_irq_save( ) optimisation
* -Remove explicit mov of current status32 into reg, that is not needed
* -Use BIC insn instead of INVERTED + AND
* -Conditionally disable interrupts (if they are not enabled, don't disable)
*/
#ifdef __KERNEL__
#include <asm/arcregs.h>
#ifndef __ASSEMBLY__
/******************************************************************
* IRQ Control Macros
******************************************************************/
/*
* Save IRQ state and disable IRQs
*/
static inline long arch_local_irq_save(void)
{
unsigned long temp, flags;
__asm__ __volatile__(
" lr %1, [status32] \n"
" bic %0, %1, %2 \n"
" and.f 0, %1, %2 \n"
" flag.nz %0 \n"
: "=r"(temp), "=r"(flags)
: "n"((STATUS_E1_MASK | STATUS_E2_MASK))
: "cc");
return flags;
}
/*
* restore saved IRQ state
*/
static inline void arch_local_irq_restore(unsigned long flags)
{
__asm__ __volatile__(
" flag %0 \n"
:
: "r"(flags));
}
/*
* Unconditionally Enable IRQs
*/
extern void arch_local_irq_enable(void);
/*
* Unconditionally Disable IRQs
*/
static inline void arch_local_irq_disable(void)
{
unsigned long temp;
__asm__ __volatile__(
" lr %0, [status32] \n"
" and %0, %0, %1 \n"
" flag %0 \n"
: "=&r"(temp)
: "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)));
}
/*
* save IRQ state
*/
static inline long arch_local_save_flags(void)
{
unsigned long temp;
__asm__ __volatile__(
" lr %0, [status32] \n"
: "=&r"(temp));
return temp;
}
/*
* Query IRQ state
*/
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
return !(flags & (STATUS_E1_MASK
#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
| STATUS_E2_MASK
#endif
));
}
static inline int arch_irqs_disabled(void)
{
return arch_irqs_disabled_flags(arch_local_save_flags());
}
static inline void arch_mask_irq(unsigned int irq)
{
unsigned int ienb;
ienb = read_aux_reg(AUX_IENABLE);
ienb &= ~(1 << irq);
write_aux_reg(AUX_IENABLE, ienb);
}
static inline void arch_unmask_irq(unsigned int irq)
{
unsigned int ienb;
ienb = read_aux_reg(AUX_IENABLE);
ienb |= (1 << irq);
write_aux_reg(AUX_IENABLE, ienb);
}
#else
.macro IRQ_DISABLE scratch
lr \scratch, [status32]
bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
flag \scratch
.endm
.macro IRQ_DISABLE_SAVE scratch, save
lr \scratch, [status32]
mov \save, \scratch /* Make a copy */
bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
flag \scratch
.endm
.macro IRQ_ENABLE scratch
lr \scratch, [status32]
or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
flag \scratch
.endm
#endif /* __ASSEMBLY__ */
#endif /* KERNEL */
#endif