Merge branch 'linus' into cpus4096

This commit is contained in:
Ingo Molnar 2009-01-15 15:45:31 +01:00
commit 49a93bc978
396 changed files with 27996 additions and 4472 deletions

View File

@ -23,7 +23,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <mach/imx-regs.h>
#include <mach/hardware.h>
/*
* Very simple approach: We can't disable clocks, so we do

View File

@ -245,11 +245,11 @@ void __init imx_set_mmc_info(struct imxmmc_platform_data *info)
imx_mmc_device.dev.platform_data = info;
}
static struct imxfb_mach_info imx_fb_info;
static struct imx_fb_platform_data imx_fb_info;
void __init set_imx_fb_info(struct imxfb_mach_info *hard_imx_fb_info)
void __init set_imx_fb_info(struct imx_fb_platform_data *hard_imx_fb_info)
{
memcpy(&imx_fb_info,hard_imx_fb_info,sizeof(struct imxfb_mach_info));
memcpy(&imx_fb_info,hard_imx_fb_info,sizeof(struct imx_fb_platform_data));
}
static struct resource imxfb_resources[] = {

View File

@ -373,110 +373,4 @@
#define TSTAT_CAPT (1<<1)
#define TSTAT_COMP (1<<0)
/*
* LCD Controller
*/
#define LCDC_SSA __REG(IMX_LCDC_BASE+0x00)
#define LCDC_SIZE __REG(IMX_LCDC_BASE+0x04)
#define SIZE_XMAX(x) ((((x) >> 4) & 0x3f) << 20)
#define SIZE_YMAX(y) ( (y) & 0x1ff )
#define LCDC_VPW __REG(IMX_LCDC_BASE+0x08)
#define VPW_VPW(x) ( (x) & 0x3ff )
#define LCDC_CPOS __REG(IMX_LCDC_BASE+0x0C)
#define CPOS_CC1 (1<<31)
#define CPOS_CC0 (1<<30)
#define CPOS_OP (1<<28)
#define CPOS_CXP(x) (((x) & 3ff) << 16)
#define CPOS_CYP(y) ((y) & 0x1ff)
#define LCDC_LCWHB __REG(IMX_LCDC_BASE+0x10)
#define LCWHB_BK_EN (1<<31)
#define LCWHB_CW(w) (((w) & 0x1f) << 24)
#define LCWHB_CH(h) (((h) & 0x1f) << 16)
#define LCWHB_BD(x) ((x) & 0xff)
#define LCDC_LCHCC __REG(IMX_LCDC_BASE+0x14)
#define LCHCC_CUR_COL_R(r) (((r) & 0x1f) << 11)
#define LCHCC_CUR_COL_G(g) (((g) & 0x3f) << 5)
#define LCHCC_CUR_COL_B(b) ((b) & 0x1f)
#define LCDC_PCR __REG(IMX_LCDC_BASE+0x18)
#define PCR_TFT (1<<31)
#define PCR_COLOR (1<<30)
#define PCR_PBSIZ_1 (0<<28)
#define PCR_PBSIZ_2 (1<<28)
#define PCR_PBSIZ_4 (2<<28)
#define PCR_PBSIZ_8 (3<<28)
#define PCR_BPIX_1 (0<<25)
#define PCR_BPIX_2 (1<<25)
#define PCR_BPIX_4 (2<<25)
#define PCR_BPIX_8 (3<<25)
#define PCR_BPIX_12 (4<<25)
#define PCR_BPIX_16 (4<<25)
#define PCR_PIXPOL (1<<24)
#define PCR_FLMPOL (1<<23)
#define PCR_LPPOL (1<<22)
#define PCR_CLKPOL (1<<21)
#define PCR_OEPOL (1<<20)
#define PCR_SCLKIDLE (1<<19)
#define PCR_END_SEL (1<<18)
#define PCR_END_BYTE_SWAP (1<<17)
#define PCR_REV_VS (1<<16)
#define PCR_ACD_SEL (1<<15)
#define PCR_ACD(x) (((x) & 0x7f) << 8)
#define PCR_SCLK_SEL (1<<7)
#define PCR_SHARP (1<<6)
#define PCR_PCD(x) ((x) & 0x3f)
#define LCDC_HCR __REG(IMX_LCDC_BASE+0x1C)
#define HCR_H_WIDTH(x) (((x) & 0x3f) << 26)
#define HCR_H_WAIT_1(x) (((x) & 0xff) << 8)
#define HCR_H_WAIT_2(x) ((x) & 0xff)
#define LCDC_VCR __REG(IMX_LCDC_BASE+0x20)
#define VCR_V_WIDTH(x) (((x) & 0x3f) << 26)
#define VCR_V_WAIT_1(x) (((x) & 0xff) << 8)
#define VCR_V_WAIT_2(x) ((x) & 0xff)
#define LCDC_POS __REG(IMX_LCDC_BASE+0x24)
#define POS_POS(x) ((x) & 1f)
#define LCDC_LSCR1 __REG(IMX_LCDC_BASE+0x28)
#define LSCR1_PS_RISE_DELAY(x) (((x) & 0x7f) << 26)
#define LSCR1_CLS_RISE_DELAY(x) (((x) & 0x3f) << 16)
#define LSCR1_REV_TOGGLE_DELAY(x) (((x) & 0xf) << 8)
#define LSCR1_GRAY2(x) (((x) & 0xf) << 4)
#define LSCR1_GRAY1(x) (((x) & 0xf))
#define LCDC_PWMR __REG(IMX_LCDC_BASE+0x2C)
#define PWMR_CLS(x) (((x) & 0x1ff) << 16)
#define PWMR_LDMSK (1<<15)
#define PWMR_SCR1 (1<<10)
#define PWMR_SCR0 (1<<9)
#define PWMR_CC_EN (1<<8)
#define PWMR_PW(x) ((x) & 0xff)
#define LCDC_DMACR __REG(IMX_LCDC_BASE+0x30)
#define DMACR_BURST (1<<31)
#define DMACR_HM(x) (((x) & 0xf) << 16)
#define DMACR_TM(x) ((x) &0xf)
#define LCDC_RMCR __REG(IMX_LCDC_BASE+0x34)
#define RMCR_LCDC_EN (1<<1)
#define RMCR_SELF_REF (1<<0)
#define LCDC_LCDICR __REG(IMX_LCDC_BASE+0x38)
#define LCDICR_INT_SYN (1<<2)
#define LCDICR_INT_CON (1)
#define LCDC_LCDISR __REG(IMX_LCDC_BASE+0x40)
#define LCDISR_UDR_ERR (1<<3)
#define LCDISR_ERR_RES (1<<2)
#define LCDISR_EOF (1<<1)
#define LCDISR_BOF (1<<0)
#endif // _IMX_REGS_H

View File

@ -29,6 +29,7 @@
#include <asm/mach-types.h>
#include <mach/regs-serial.h>
#include <mach/map.h>
#include "cpu.h"

View File

@ -28,7 +28,6 @@
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
#include <mach/system.h>
#include <mach/map.h>
#include <mach/regs-timer.h>

View File

@ -27,6 +27,7 @@ EXPORT_SYMBOL(__cpuc_flush_kern_all);
EXPORT_SYMBOL(__cpuc_flush_user_all);
EXPORT_SYMBOL(__cpuc_flush_user_range);
EXPORT_SYMBOL(__cpuc_coherent_kern_range);
EXPORT_SYMBOL(dmac_inv_range); /* because of flush_ioremap_region() */
#else
EXPORT_SYMBOL(cpu_cache);
#endif

View File

@ -72,10 +72,14 @@ static struct irq_controller amiga_irq_controller = {
void __init amiga_init_IRQ(void)
{
request_irq(IRQ_AUTO_1, ami_int1, 0, "int1", NULL);
request_irq(IRQ_AUTO_3, ami_int3, 0, "int3", NULL);
request_irq(IRQ_AUTO_4, ami_int4, 0, "int4", NULL);
request_irq(IRQ_AUTO_5, ami_int5, 0, "int5", NULL);
if (request_irq(IRQ_AUTO_1, ami_int1, 0, "int1", NULL))
pr_err("Couldn't register int%d\n", 1);
if (request_irq(IRQ_AUTO_3, ami_int3, 0, "int3", NULL))
pr_err("Couldn't register int%d\n", 3);
if (request_irq(IRQ_AUTO_4, ami_int4, 0, "int4", NULL))
pr_err("Couldn't register int%d\n", 4);
if (request_irq(IRQ_AUTO_5, ami_int5, 0, "int5", NULL))
pr_err("Couldn't register int%d\n", 5);
m68k_setup_irq_controller(&amiga_irq_controller, IRQ_USER, AMI_STD_IRQS);

View File

@ -176,5 +176,7 @@ void __init cia_init_IRQ(struct ciabase *base)
/* override auto int and install CIA handler */
m68k_setup_irq_controller(&auto_irq_controller, base->handler_irq, 1);
m68k_irq_startup(base->handler_irq);
request_irq(base->handler_irq, cia_handler, IRQF_SHARED, base->name, base);
if (request_irq(base->handler_irq, cia_handler, IRQF_SHARED,
base->name, base))
pr_err("Couldn't register %s interrupt\n", base->name);
}

View File

@ -493,7 +493,8 @@ static void __init amiga_sched_init(irq_handler_t timer_routine)
* Please don't change this to use ciaa, as it interferes with the
* SCSI code. We'll have to take a look at this later
*/
request_irq(IRQ_AMIGA_CIAB_TA, timer_routine, 0, "timer", NULL);
if (request_irq(IRQ_AMIGA_CIAB_TA, timer_routine, 0, "timer", NULL))
pr_err("Couldn't register timer interrupt\n");
/* start timer */
ciab.cra |= 0x11;
}

View File

@ -31,10 +31,6 @@ extern unsigned long dn_gettimeoffset(void);
extern int dn_dummy_hwclk(int, struct rtc_time *);
extern int dn_dummy_set_clock_mmss(unsigned long);
extern void dn_dummy_reset(void);
extern void dn_dummy_waitbut(void);
extern struct fb_info *dn_fb_init(long *);
extern void dn_dummy_debug_init(void);
extern irqreturn_t dn_process_int(int irq, struct pt_regs *fp);
#ifdef CONFIG_HEARTBEAT
static void dn_heartbeat(int on);
#endif
@ -204,7 +200,8 @@ void dn_sched_init(irq_handler_t timer_routine)
printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3));
#endif
request_irq(IRQ_APOLLO, dn_timer_int, 0, "time", timer_routine);
if (request_irq(IRQ_APOLLO, dn_timer_int, 0, "time", timer_routine))
pr_err("Couldn't register timer interrupt\n");
}
unsigned long dn_gettimeoffset(void) {

View File

@ -33,7 +33,6 @@
#include <asm/atari_joystick.h>
#include <asm/irq.h>
extern unsigned int keymap_count;
/* Hook for MIDI serial driver */
void (*atari_MIDI_interrupt_hook) (void);
@ -567,14 +566,19 @@ static int atari_keyb_done = 0;
int atari_keyb_init(void)
{
int error;
if (atari_keyb_done)
return 0;
kb_state.state = KEYBOARD;
kb_state.len = 0;
request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt, IRQ_TYPE_SLOW,
"keyboard/mouse/MIDI", atari_keyboard_interrupt);
error = request_irq(IRQ_MFP_ACIA, atari_keyboard_interrupt,
IRQ_TYPE_SLOW, "keyboard/mouse/MIDI",
atari_keyboard_interrupt);
if (error)
return error;
atari_turnoff_irq(IRQ_MFP_ACIA);
do {

View File

@ -179,8 +179,9 @@ EXPORT_SYMBOL(stdma_islocked);
void __init stdma_init(void)
{
stdma_isr = NULL;
request_irq(IRQ_MFP_FDC, stdma_int, IRQ_TYPE_SLOW | IRQF_SHARED,
"ST-DMA: floppy/ACSI/IDE/Falcon-SCSI", stdma_int);
if (request_irq(IRQ_MFP_FDC, stdma_int, IRQ_TYPE_SLOW | IRQF_SHARED,
"ST-DMA: floppy/ACSI/IDE/Falcon-SCSI", stdma_int))
pr_err("Couldn't register ST-DMA interrupt\n");
}

View File

@ -31,8 +31,9 @@ atari_sched_init(irq_handler_t timer_routine)
/* start timer C, div = 1:100 */
mfp.tim_ct_cd = (mfp.tim_ct_cd & 15) | 0x60;
/* install interrupt service routine for MFP Timer C */
request_irq(IRQ_MFP_TIMC, timer_routine, IRQ_TYPE_SLOW,
"timer", timer_routine);
if (request_irq(IRQ_MFP_TIMC, timer_routine, IRQ_TYPE_SLOW,
"timer", timer_routine))
pr_err("Couldn't register timer interrupt\n");
}
/* ++andreas: gettimeoffset fixed to check for pending interrupt */

View File

@ -43,7 +43,6 @@ extern unsigned long bvme6000_gettimeoffset (void);
extern int bvme6000_hwclk (int, struct rtc_time *);
extern int bvme6000_set_clock_mmss (unsigned long);
extern void bvme6000_reset (void);
extern void bvme6000_waitbut(void);
void bvme6000_set_vectors (void);
/* Save tick handler routine pointer, will point to do_timer() in

View File

@ -70,7 +70,8 @@ void __init hp300_sched_init(irq_handler_t vector)
asm volatile(" movpw %0,%1@(5)" : : "d" (INTVAL), "a" (CLOCKBASE));
request_irq(IRQ_AUTO_6, hp300_tick, IRQ_FLG_STD, "timer tick", vector);
if (request_irq(IRQ_AUTO_6, hp300_tick, IRQ_FLG_STD, "timer tick", vector))
pr_err("Couldn't register timer interrupt\n");
out_8(CLOCKBASE + CLKCR2, 0x1); /* select CR1 */
out_8(CLOCKBASE + CLKCR1, 0x40); /* enable irq */

1
arch/m68k/kernel/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
vmlinux.lds

View File

@ -424,7 +424,7 @@ resume:
.data
ALIGN
sys_call_table:
.long sys_ni_syscall /* 0 - old "setup()" system call*/
.long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
.long sys_exit
.long sys_fork
.long sys_read

View File

@ -26,6 +26,7 @@
#include <linux/initrd.h>
#include <asm/bootinfo.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/fpu.h>
#include <asm/irq.h>
@ -62,7 +63,6 @@ EXPORT_SYMBOL(vme_brdtype);
int m68k_is040or060;
EXPORT_SYMBOL(m68k_is040or060);
extern int end;
extern unsigned long availmem;
int m68k_num_memory;
@ -215,11 +215,10 @@ static void __init m68k_parse_bootinfo(const struct bi_record *record)
void __init setup_arch(char **cmdline_p)
{
extern int _etext, _edata, _end;
int i;
/* The bootinfo is located right after the kernel bss */
m68k_parse_bootinfo((const struct bi_record *)&_end);
m68k_parse_bootinfo((const struct bi_record *)_end);
if (CPU_IS_040)
m68k_is040or060 = 4;
@ -252,9 +251,9 @@ void __init setup_arch(char **cmdline_p)
}
init_mm.start_code = PAGE_OFFSET;
init_mm.end_code = (unsigned long) &_etext;
init_mm.end_data = (unsigned long) &_edata;
init_mm.brk = (unsigned long) &_end;
init_mm.end_code = (unsigned long)_etext;
init_mm.end_data = (unsigned long)_edata;
init_mm.brk = (unsigned long)_end;
*cmdline_p = m68k_command_line;
memcpy(boot_command_line, *cmdline_p, CL_SIZE);

View File

@ -326,6 +326,9 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __u
struct sigcontext context;
int err;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
/* get previous context */
if (copy_from_user(&context, usc, sizeof(context)))
goto badframe;
@ -411,6 +414,9 @@ rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
unsigned long usp;
int err;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
err = __get_user(temp, &uc->uc_mcontext.version);
if (temp != MCONTEXT_VERSION)
goto badframe;
@ -937,6 +943,15 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
regs->d0 = -EINTR;
break;
case -ERESTART_RESTARTBLOCK:
if (!has_handler) {
regs->d0 = __NR_restart_syscall;
regs->pc -= 2;
break;
}
regs->d0 = -EINTR;
break;
case -ERESTARTSYS:
if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
regs->d0 = -EINTR;

View File

@ -33,6 +33,7 @@ SECTIONS
} :data
/* End of data goes *here* so that freeing init code works properly. */
_edata = .;
NOTES
/* will be freed after init */
. = ALIGN(PAGE_SIZE); /* Init code and data */

View File

@ -92,7 +92,8 @@ static irqreturn_t baboon_irq(int irq, void *dev_id)
void __init baboon_register_interrupts(void)
{
baboon_disabled = 0;
request_irq(IRQ_NUBUS_C, baboon_irq, 0, "baboon", (void *)baboon);
if (request_irq(IRQ_NUBUS_C, baboon_irq, 0, "baboon", (void *)baboon))
pr_err("Couldn't register baboon interrupt\n");
}
/*

View File

@ -47,13 +47,6 @@
struct mac_booter_data mac_bi_data;
/* New m68k bootinfo stuff and videobase */
extern int m68k_num_memory;
extern struct mem_info m68k_memory[NUM_MEMINFO];
extern struct mem_info m68k_ramdisk;
/* The phys. video addr. - might be bogus on some machines */
static unsigned long mac_orig_videoaddr;
@ -61,7 +54,6 @@ static unsigned long mac_orig_videoaddr;
extern unsigned long mac_gettimeoffset(void);
extern int mac_hwclk(int, struct rtc_time *);
extern int mac_set_clock_mmss(unsigned long);
extern int show_mac_interrupts(struct seq_file *, void *);
extern void iop_preinit(void);
extern void iop_init(void);
extern void via_init(void);
@ -805,10 +797,6 @@ static void __init mac_identify(void)
mac_bi_data.boottime, mac_bi_data.gmtbias);
printk(KERN_DEBUG " Machine ID: %ld CPUid: 0x%lx memory size: 0x%lx \n",
mac_bi_data.id, mac_bi_data.cpuid, mac_bi_data.memsize);
#if 0
printk("Ramdisk: addr 0x%lx size 0x%lx\n",
m68k_ramdisk.addr, m68k_ramdisk.size);
#endif
iop_init();
via_init();

View File

@ -27,7 +27,6 @@
#include <asm/macints.h>
extern unsigned long mac_videobase;
extern unsigned long mac_videodepth;
extern unsigned long mac_rowbytes;
extern void mac_serial_print(const char *);

View File

@ -305,14 +305,16 @@ void __init iop_register_interrupts(void)
{
if (iop_ism_present) {
if (oss_present) {
request_irq(OSS_IRQLEV_IOPISM, iop_ism_irq,
if (request_irq(OSS_IRQLEV_IOPISM, iop_ism_irq,
IRQ_FLG_LOCK, "ISM IOP",
(void *) IOP_NUM_ISM);
(void *) IOP_NUM_ISM))
pr_err("Couldn't register ISM IOP interrupt\n");
oss_irq_enable(IRQ_MAC_ADB);
} else {
request_irq(IRQ_VIA2_0, iop_ism_irq,
if (request_irq(IRQ_VIA2_0, iop_ism_irq,
IRQ_FLG_LOCK|IRQ_FLG_FAST, "ISM IOP",
(void *) IOP_NUM_ISM);
(void *) IOP_NUM_ISM))
pr_err("Couldn't register ISM IOP interrupt\n");
}
if (!iop_alive(iop_base[IOP_NUM_ISM])) {
printk("IOP: oh my god, they killed the ISM IOP!\n");

View File

@ -134,6 +134,7 @@
#include <asm/errno.h>
#include <asm/macints.h>
#include <asm/irq_regs.h>
#include <asm/mac_oss.h>
#define DEBUG_SPURIOUS
#define SHUTUP_SONIC
@ -146,7 +147,6 @@ static int scc_mask;
* VIA/RBV hooks
*/
extern void via_init(void);
extern void via_register_interrupts(void);
extern void via_irq_enable(int);
extern void via_irq_disable(int);
@ -157,9 +157,6 @@ extern int via_irq_pending(int);
* OSS hooks
*/
extern int oss_present;
extern void oss_init(void);
extern void oss_register_interrupts(void);
extern void oss_irq_enable(int);
extern void oss_irq_disable(int);
@ -170,9 +167,6 @@ extern int oss_irq_pending(int);
* PSC hooks
*/
extern int psc_present;
extern void psc_init(void);
extern void psc_register_interrupts(void);
extern void psc_irq_enable(int);
extern void psc_irq_disable(int);
@ -191,12 +185,10 @@ extern void iop_register_interrupts(void);
extern int baboon_present;
extern void baboon_init(void);
extern void baboon_register_interrupts(void);
extern void baboon_irq_enable(int);
extern void baboon_irq_disable(int);
extern void baboon_irq_clear(int);
extern int baboon_irq_pending(int);
/*
* SCC interrupt routines
@ -258,8 +250,9 @@ void __init mac_init_IRQ(void)
if (baboon_present)
baboon_register_interrupts();
iop_register_interrupts();
request_irq(IRQ_AUTO_7, mac_nmi_handler, 0, "NMI",
mac_nmi_handler);
if (request_irq(IRQ_AUTO_7, mac_nmi_handler, 0, "NMI",
mac_nmi_handler))
pr_err("Couldn't register NMI\n");
#ifdef DEBUG_MACINTS
printk("mac_init_IRQ(): Done!\n");
#endif

View File

@ -35,7 +35,6 @@
#define RTC_OFFSET 2082844800
extern struct mac_booter_data mac_bi_data;
static void (*rom_reset)(void);
#ifdef CONFIG_ADB_CUDA

View File

@ -66,16 +66,21 @@ void __init oss_init(void)
void __init oss_register_interrupts(void)
{
request_irq(OSS_IRQLEV_SCSI, oss_irq, IRQ_FLG_LOCK,
"scsi", (void *) oss);
request_irq(OSS_IRQLEV_IOPSCC, mac_scc_dispatch, IRQ_FLG_LOCK,
"scc", mac_scc_dispatch);
request_irq(OSS_IRQLEV_NUBUS, oss_nubus_irq, IRQ_FLG_LOCK,
"nubus", (void *) oss);
request_irq(OSS_IRQLEV_SOUND, oss_irq, IRQ_FLG_LOCK,
"sound", (void *) oss);
request_irq(OSS_IRQLEV_VIA1, via1_irq, IRQ_FLG_LOCK,
"via1", (void *) via1);
if (request_irq(OSS_IRQLEV_SCSI, oss_irq, IRQ_FLG_LOCK,
"scsi", (void *) oss))
pr_err("Couldn't register %s interrupt\n", "scsi");
if (request_irq(OSS_IRQLEV_IOPSCC, mac_scc_dispatch, IRQ_FLG_LOCK,
"scc", mac_scc_dispatch))
pr_err("Couldn't register %s interrupt\n", "scc");
if (request_irq(OSS_IRQLEV_NUBUS, oss_nubus_irq, IRQ_FLG_LOCK,
"nubus", (void *) oss))
pr_err("Couldn't register %s interrupt\n", "nubus");
if (request_irq(OSS_IRQLEV_SOUND, oss_irq, IRQ_FLG_LOCK,
"sound", (void *) oss))
pr_err("Couldn't register %s interrupt\n", "sound");
if (request_irq(OSS_IRQLEV_VIA1, via1_irq, IRQ_FLG_LOCK,
"via1", (void *) via1))
pr_err("Couldn't register %s interrupt\n", "via1");
}
/*

View File

@ -117,10 +117,14 @@ void __init psc_init(void)
void __init psc_register_interrupts(void)
{
request_irq(IRQ_AUTO_3, psc_irq, 0, "psc3", (void *) 0x30);
request_irq(IRQ_AUTO_4, psc_irq, 0, "psc4", (void *) 0x40);
request_irq(IRQ_AUTO_5, psc_irq, 0, "psc5", (void *) 0x50);
request_irq(IRQ_AUTO_6, psc_irq, 0, "psc6", (void *) 0x60);
if (request_irq(IRQ_AUTO_3, psc_irq, 0, "psc3", (void *) 0x30))
pr_err("Couldn't register psc%d interrupt\n", 3);
if (request_irq(IRQ_AUTO_4, psc_irq, 0, "psc4", (void *) 0x40))
pr_err("Couldn't register psc%d interrupt\n", 4);
if (request_irq(IRQ_AUTO_5, psc_irq, 0, "psc5", (void *) 0x50))
pr_err("Couldn't register psc%d interrupt\n", 5);
if (request_irq(IRQ_AUTO_6, psc_irq, 0, "psc6", (void *) 0x60))
pr_err("Couldn't register psc%d interrupt\n", 6);
}
/*

View File

@ -34,6 +34,7 @@
#include <asm/macints.h>
#include <asm/mac_via.h>
#include <asm/mac_psc.h>
#include <asm/mac_oss.h>
volatile __u8 *via1, *via2;
int rbv_present;
@ -84,7 +85,6 @@ void via_irq_disable(int irq);
void via_irq_clear(int irq);
extern irqreturn_t mac_scc_dispatch(int, void *);
extern int oss_present;
/*
* Initialize the VIAs
@ -283,7 +283,8 @@ void __init via_init_clock(irq_handler_t func)
via1[vT1CL] = MAC_CLOCK_LOW;
via1[vT1CH] = MAC_CLOCK_HIGH;
request_irq(IRQ_MAC_TIMER_1, func, IRQ_FLG_LOCK, "timer", func);
if (request_irq(IRQ_MAC_TIMER_1, func, IRQ_FLG_LOCK, "timer", func))
pr_err("Couldn't register %s interrupt\n", "timer");
}
/*
@ -293,25 +294,31 @@ void __init via_init_clock(irq_handler_t func)
void __init via_register_interrupts(void)
{
if (via_alt_mapping) {
request_irq(IRQ_AUTO_1, via1_irq,
if (request_irq(IRQ_AUTO_1, via1_irq,
IRQ_FLG_LOCK|IRQ_FLG_FAST, "software",
(void *) via1);
request_irq(IRQ_AUTO_6, via1_irq,
(void *) via1))
pr_err("Couldn't register %s interrupt\n", "software");
if (request_irq(IRQ_AUTO_6, via1_irq,
IRQ_FLG_LOCK|IRQ_FLG_FAST, "via1",
(void *) via1);
(void *) via1))
pr_err("Couldn't register %s interrupt\n", "via1");
} else {
request_irq(IRQ_AUTO_1, via1_irq,
if (request_irq(IRQ_AUTO_1, via1_irq,
IRQ_FLG_LOCK|IRQ_FLG_FAST, "via1",
(void *) via1);
(void *) via1))
pr_err("Couldn't register %s interrupt\n", "via1");
}
request_irq(IRQ_AUTO_2, via2_irq, IRQ_FLG_LOCK|IRQ_FLG_FAST,
"via2", (void *) via2);
if (request_irq(IRQ_AUTO_2, via2_irq, IRQ_FLG_LOCK|IRQ_FLG_FAST,
"via2", (void *) via2))
pr_err("Couldn't register %s interrupt\n", "via2");
if (!psc_present) {
request_irq(IRQ_AUTO_4, mac_scc_dispatch, IRQ_FLG_LOCK,
"scc", mac_scc_dispatch);
if (request_irq(IRQ_AUTO_4, mac_scc_dispatch, IRQ_FLG_LOCK,
"scc", mac_scc_dispatch))
pr_err("Couldn't register %s interrupt\n", "scc");
}
request_irq(IRQ_MAC_NUBUS, via_nubus_irq, IRQ_FLG_LOCK|IRQ_FLG_FAST,
"nubus", (void *) via2);
if (request_irq(IRQ_MAC_NUBUS, via_nubus_irq,
IRQ_FLG_LOCK|IRQ_FLG_FAST, "nubus", (void *) via2))
pr_err("Couldn't register %s interrupt\n", "nubus");
}
/*

View File

@ -24,7 +24,6 @@ static const struct fp_ext fp_one =
extern struct fp_ext *fp_fadd(struct fp_ext *dest, const struct fp_ext *src);
extern struct fp_ext *fp_fdiv(struct fp_ext *dest, const struct fp_ext *src);
extern struct fp_ext *fp_fmul(struct fp_ext *dest, const struct fp_ext *src);
struct fp_ext *
fp_fsqrt(struct fp_ext *dest, struct fp_ext *src)

View File

@ -28,6 +28,7 @@
#ifdef CONFIG_ATARI
#include <asm/atari_stram.h>
#endif
#include <asm/sections.h>
#include <asm/tlb.h>
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
@ -73,9 +74,6 @@ extern void init_pointer_table(unsigned long ptable);
/* References to section boundaries */
extern char _text[], _etext[];
extern char __init_begin[], __init_end[];
extern pmd_t *zero_pgtable;
void __init mem_init(void)

View File

@ -30,6 +30,7 @@
#ifdef CONFIG_ATARI
#include <asm/atari_stram.h>
#endif
#include <asm/sections.h>
#undef DEBUG
@ -301,14 +302,12 @@ void __init paging_init(void)
}
}
extern char __init_begin, __init_end;
void free_initmem(void)
{
unsigned long addr;
addr = (unsigned long)&__init_begin;
for (; addr < (unsigned long)&__init_end; addr += PAGE_SIZE) {
addr = (unsigned long)__init_begin;
for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
virt_to_page(addr)->flags &= ~(1 << PG_reserved);
init_page_count(virt_to_page(addr));
free_page(addr);

View File

@ -42,7 +42,6 @@ extern unsigned long mvme147_gettimeoffset (void);
extern int mvme147_hwclk (int, struct rtc_time *);
extern int mvme147_set_clock_mmss (unsigned long);
extern void mvme147_reset (void);
extern void mvme147_waitbut(void);
static int bcd2int (unsigned char b);
@ -115,8 +114,9 @@ static irqreturn_t mvme147_timer_int (int irq, void *dev_id)
void mvme147_sched_init (irq_handler_t timer_routine)
{
tick_handler = timer_routine;
request_irq (PCC_IRQ_TIMER1, mvme147_timer_int,
IRQ_FLG_REPLACE, "timer 1", NULL);
if (request_irq(PCC_IRQ_TIMER1, mvme147_timer_int, IRQ_FLG_REPLACE,
"timer 1", NULL))
pr_err("Couldn't register timer interrupt\n");
/* Init the clock with a value */
/* our clock goes off every 6.25us */

View File

@ -48,7 +48,6 @@ extern unsigned long mvme16x_gettimeoffset (void);
extern int mvme16x_hwclk (int, struct rtc_time *);
extern int mvme16x_set_clock_mmss (unsigned long);
extern void mvme16x_reset (void);
extern void mvme16x_waitbut(void);
int bcd2int (unsigned char b);

View File

@ -36,7 +36,6 @@
#include <asm/machdep.h>
#include <asm/q40_master.h>
extern irqreturn_t q40_process_int(int level, struct pt_regs *regs);
extern void q40_init_IRQ(void);
static void q40_get_model(char *model);
extern void q40_sched_init(irq_handler_t handler);
@ -47,8 +46,6 @@ static unsigned int q40_get_ss(void);
static int q40_set_clock_mmss(unsigned long);
static int q40_get_rtc_pll(struct rtc_pll_info *pll);
static int q40_set_rtc_pll(struct rtc_pll_info *pll);
extern void q40_waitbut(void);
void q40_set_vectors(void);
extern void q40_mksound(unsigned int /*freq*/, unsigned int /*ticks*/);

View File

@ -27,23 +27,21 @@
#include <asm/sun3mmu.h>
#include <asm/rtc.h>
#include <asm/machdep.h>
#include <asm/idprom.h>
#include <asm/intersil.h>
#include <asm/irq.h>
#include <asm/sections.h>
#include <asm/segment.h>
#include <asm/sun3ints.h>
extern char _text, _end;
char sun3_reserved_pmeg[SUN3_PMEGS_NUM];
extern unsigned long sun3_gettimeoffset(void);
static void sun3_sched_init(irq_handler_t handler);
extern void sun3_get_model (char* model);
extern void idprom_init (void);
extern int sun3_hwclk(int set, struct rtc_time *t);
volatile char* clock_va;
extern volatile unsigned char* sun3_intreg;
extern unsigned long availmem;
unsigned long num_pages;
@ -149,7 +147,7 @@ void __init config_sun3(void)
mach_halt = sun3_halt;
mach_get_hardware_list = sun3_get_hardware_list;
memory_start = ((((int)&_end) + 0x2000) & ~0x1fff);
memory_start = ((((unsigned long)_end) + 0x2000) & ~0x1fff);
// PROM seems to want the last couple of physical pages. --m
memory_end = *(romvec->pv_sun3mem) + PAGE_OFFSET - 2*PAGE_SIZE;

View File

@ -27,7 +27,6 @@
#include <asm/mmu_context.h>
#include <asm/dvma.h>
extern void prom_reboot (char *) __attribute__ ((__noreturn__));
#undef DEBUG_MMU_EMU
#define DEBUG_PROM_MAPS

View File

@ -105,7 +105,10 @@ void __init sun3_init_IRQ(void)
m68k_setup_irq_controller(&sun3_irq_controller, IRQ_AUTO_1, 7);
m68k_setup_user_interrupt(VEC_USER, 128, NULL);
request_irq(IRQ_AUTO_5, sun3_int5, 0, "int5", NULL);
request_irq(IRQ_AUTO_7, sun3_int7, 0, "int7", NULL);
request_irq(IRQ_USER+127, sun3_vec255, 0, "vec255", NULL);
if (request_irq(IRQ_AUTO_5, sun3_int5, 0, "int5", NULL))
pr_err("Couldn't register %s interrupt\n", "int5");
if (request_irq(IRQ_AUTO_7, sun3_int7, 0, "int7", NULL))
pr_err("Couldn't register %s interrupt\n", "int7");
if (request_irq(IRQ_USER+127, sun3_vec255, 0, "vec255", NULL))
pr_err("Couldn't register %s interrupt\n", "vec255");
}

View File

@ -23,7 +23,6 @@
#include "time.h"
volatile char *clock_va;
extern volatile unsigned char *sun3_intreg;
extern void sun3_get_model(char *model);

View File

@ -595,6 +595,44 @@ config WR_PPMC
This enables support for the Wind River MIPS32 4KC PPMC evaluation
board, which is based on GT64120 bridge chip.
config CAVIUM_OCTEON_SIMULATOR
bool "Support for the Cavium Networks Octeon Simulator"
select CEVT_R4K
select 64BIT_PHYS_ADDR
select DMA_COHERENT
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_HIGHMEM
select CPU_CAVIUM_OCTEON
help
The Octeon simulator is software performance model of the Cavium
Octeon Processor. It supports simulating Octeon processors on x86
hardware.
config CAVIUM_OCTEON_REFERENCE_BOARD
bool "Support for the Cavium Networks Octeon reference board"
select CEVT_R4K
select 64BIT_PHYS_ADDR
select DMA_COHERENT
select SYS_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_HIGHMEM
select SYS_HAS_EARLY_PRINTK
select CPU_CAVIUM_OCTEON
select SWAP_IO_SPACE
help
This option supports all of the Octeon reference boards from Cavium
Networks. It builds a kernel that dynamically determines the Octeon
CPU type and supports all known board reference implementations.
Some of the supported boards are:
EBT3000
EBH3000
EBH3100
Thunder
Kodama
Hikari
Say Y here for most Octeon reference boards.
endchoice
source "arch/mips/alchemy/Kconfig"
@ -607,6 +645,7 @@ source "arch/mips/sgi-ip27/Kconfig"
source "arch/mips/sibyte/Kconfig"
source "arch/mips/txx9/Kconfig"
source "arch/mips/vr41xx/Kconfig"
source "arch/mips/cavium-octeon/Kconfig"
endmenu
@ -682,7 +721,11 @@ config CEVT_DS1287
config CEVT_GT641XX
bool
config CEVT_R4K_LIB
bool
config CEVT_R4K
select CEVT_R4K_LIB
bool
config CEVT_SB1250
@ -697,7 +740,11 @@ config CSRC_BCM1480
config CSRC_IOASIC
bool
config CSRC_R4K_LIB
bool
config CSRC_R4K
select CSRC_R4K_LIB
bool
config CSRC_SB1250
@ -835,6 +882,9 @@ config IRQ_GT641XX
config IRQ_GIC
bool
config IRQ_CPU_OCTEON
bool
config MIPS_BOARDS_GEN
bool
@ -924,7 +974,7 @@ config BOOT_ELF32
config MIPS_L1_CACHE_SHIFT
int
default "4" if MACH_DECSTATION || MIKROTIK_RB532
default "7" if SGI_IP22 || SGI_IP27 || SGI_IP28 || SNI_RM
default "7" if SGI_IP22 || SGI_IP27 || SGI_IP28 || SNI_RM || CPU_CAVIUM_OCTEON
default "4" if PMC_MSP4200_EVAL
default "5"
@ -1185,6 +1235,23 @@ config CPU_SB1
select CPU_SUPPORTS_HIGHMEM
select WEAK_ORDERING
config CPU_CAVIUM_OCTEON
bool "Cavium Octeon processor"
select IRQ_CPU
select IRQ_CPU_OCTEON
select CPU_HAS_PREFETCH
select CPU_SUPPORTS_64BIT_KERNEL
select SYS_SUPPORTS_SMP
select NR_CPUS_DEFAULT_16
select WEAK_ORDERING
select WEAK_REORDERING_BEYOND_LLSC
select CPU_SUPPORTS_HIGHMEM
help
The Cavium Octeon processor is a highly integrated chip containing
many ethernet hardware widgets for networking tasks. The processor
can have up to 16 Mips64v2 cores and 8 integrated gigabit ethernets.
Full details can be found at http://www.caviumnetworks.com.
endchoice
config SYS_HAS_CPU_LOONGSON2
@ -1285,7 +1352,7 @@ config CPU_MIPSR1
config CPU_MIPSR2
bool
default y if CPU_MIPS32_R2 || CPU_MIPS64_R2
default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON
config SYS_SUPPORTS_32BIT_KERNEL
bool

View File

@ -144,6 +144,10 @@ cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1,-march=r5000) \
cflags-$(CONFIG_CPU_R8000) += -march=r8000 -Wa,--trap
cflags-$(CONFIG_CPU_R10000) += $(call cc-option,-march=r10000,-march=r8000) \
-Wa,--trap
cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += $(call cc-option,-march=octeon) -Wa,--trap
ifeq (,$(findstring march=octeon, $(cflags-$(CONFIG_CPU_CAVIUM_OCTEON))))
cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += -Wa,-march=octeon
endif
cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,)
cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,)
@ -184,84 +188,84 @@ cflags-$(CONFIG_SOC_AU1X00) += -I$(srctree)/arch/mips/include/asm/mach-au1x00
#
# AMD Alchemy Pb1000 eval board
#
libs-$(CONFIG_MIPS_PB1000) += arch/mips/alchemy/pb1000/
core-$(CONFIG_MIPS_PB1000) += arch/mips/alchemy/devboards/
cflags-$(CONFIG_MIPS_PB1000) += -I$(srctree)/arch/mips/include/asm/mach-pb1x00
load-$(CONFIG_MIPS_PB1000) += 0xffffffff80100000
#
# AMD Alchemy Pb1100 eval board
#
libs-$(CONFIG_MIPS_PB1100) += arch/mips/alchemy/pb1100/
core-$(CONFIG_MIPS_PB1100) += arch/mips/alchemy/devboards/
cflags-$(CONFIG_MIPS_PB1100) += -I$(srctree)/arch/mips/include/asm/mach-pb1x00
load-$(CONFIG_MIPS_PB1100) += 0xffffffff80100000
#
# AMD Alchemy Pb1500 eval board
#
libs-$(CONFIG_MIPS_PB1500) += arch/mips/alchemy/pb1500/
core-$(CONFIG_MIPS_PB1500) += arch/mips/alchemy/devboards/
cflags-$(CONFIG_MIPS_PB1500) += -I$(srctree)/arch/mips/include/asm/mach-pb1x00
load-$(CONFIG_MIPS_PB1500) += 0xffffffff80100000
#
# AMD Alchemy Pb1550 eval board
#
libs-$(CONFIG_MIPS_PB1550) += arch/mips/alchemy/pb1550/
core-$(CONFIG_MIPS_PB1550) += arch/mips/alchemy/devboards/
cflags-$(CONFIG_MIPS_PB1550) += -I$(srctree)/arch/mips/include/asm/mach-pb1x00
load-$(CONFIG_MIPS_PB1550) += 0xffffffff80100000
#
# AMD Alchemy Pb1200 eval board
#
libs-$(CONFIG_MIPS_PB1200) += arch/mips/alchemy/pb1200/
core-$(CONFIG_MIPS_PB1200) += arch/mips/alchemy/devboards/
cflags-$(CONFIG_MIPS_PB1200) += -I$(srctree)/arch/mips/include/asm/mach-pb1x00
load-$(CONFIG_MIPS_PB1200) += 0xffffffff80100000
#
# AMD Alchemy Db1000 eval board
#
libs-$(CONFIG_MIPS_DB1000) += arch/mips/alchemy/db1x00/
core-$(CONFIG_MIPS_DB1000) += arch/mips/alchemy/devboards/
cflags-$(CONFIG_MIPS_DB1000) += -I$(srctree)/arch/mips/include/asm/mach-db1x00
load-$(CONFIG_MIPS_DB1000) += 0xffffffff80100000
#
# AMD Alchemy Db1100 eval board
#
libs-$(CONFIG_MIPS_DB1100) += arch/mips/alchemy/db1x00/
core-$(CONFIG_MIPS_DB1100) += arch/mips/alchemy/devboards/
cflags-$(CONFIG_MIPS_DB1100) += -I$(srctree)/arch/mips/include/asm/mach-db1x00
load-$(CONFIG_MIPS_DB1100) += 0xffffffff80100000
#
# AMD Alchemy Db1500 eval board
#
libs-$(CONFIG_MIPS_DB1500) += arch/mips/alchemy/db1x00/
core-$(CONFIG_MIPS_DB1500) += arch/mips/alchemy/devboards/
cflags-$(CONFIG_MIPS_DB1500) += -I$(srctree)/arch/mips/include/asm/mach-db1x00
load-$(CONFIG_MIPS_DB1500) += 0xffffffff80100000
#
# AMD Alchemy Db1550 eval board
#
libs-$(CONFIG_MIPS_DB1550) += arch/mips/alchemy/db1x00/
core-$(CONFIG_MIPS_DB1550) += arch/mips/alchemy/devboards/
cflags-$(CONFIG_MIPS_DB1550) += -I$(srctree)/arch/mips/include/asm/mach-db1x00
load-$(CONFIG_MIPS_DB1550) += 0xffffffff80100000
#
# AMD Alchemy Db1200 eval board
#
libs-$(CONFIG_MIPS_DB1200) += arch/mips/alchemy/pb1200/
core-$(CONFIG_MIPS_DB1200) += arch/mips/alchemy/devboards/
cflags-$(CONFIG_MIPS_DB1200) += -I$(srctree)/arch/mips/include/asm/mach-db1x00
load-$(CONFIG_MIPS_DB1200) += 0xffffffff80100000
#
# AMD Alchemy Bosporus eval board
#
libs-$(CONFIG_MIPS_BOSPORUS) += arch/mips/alchemy/db1x00/
core-$(CONFIG_MIPS_BOSPORUS) += arch/mips/alchemy/devboards/
cflags-$(CONFIG_MIPS_BOSPORUS) += -I$(srctree)/arch/mips/include/asm/mach-db1x00
load-$(CONFIG_MIPS_BOSPORUS) += 0xffffffff80100000
#
# AMD Alchemy Mirage eval board
#
libs-$(CONFIG_MIPS_MIRAGE) += arch/mips/alchemy/db1x00/
core-$(CONFIG_MIPS_MIRAGE) += arch/mips/alchemy/devboards/
cflags-$(CONFIG_MIPS_MIRAGE) += -I$(srctree)/arch/mips/include/asm/mach-db1x00
load-$(CONFIG_MIPS_MIRAGE) += 0xffffffff80100000
@ -586,6 +590,18 @@ core-$(CONFIG_TOSHIBA_RBTX4927) += arch/mips/txx9/rbtx4927/
core-$(CONFIG_TOSHIBA_RBTX4938) += arch/mips/txx9/rbtx4938/
core-$(CONFIG_TOSHIBA_RBTX4939) += arch/mips/txx9/rbtx4939/
#
# Cavium Octeon
#
core-$(CONFIG_CPU_CAVIUM_OCTEON) += arch/mips/cavium-octeon/
cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += -I$(srctree)/arch/mips/include/asm/mach-cavium-octeon
core-$(CONFIG_CPU_CAVIUM_OCTEON) += arch/mips/cavium-octeon/executive/
ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
load-$(CONFIG_CPU_CAVIUM_OCTEON) += 0xffffffff84100000
else
load-$(CONFIG_CPU_CAVIUM_OCTEON) += 0xffffffff81100000
endif
cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic
drivers-$(CONFIG_PCI) += arch/mips/pci/

View File

@ -128,9 +128,10 @@ config SOC_AU1200
config SOC_AU1X00
bool
select 64BIT_PHYS_ADDR
select CEVT_R4K
select CSRC_R4K
select CEVT_R4K_LIB
select CSRC_R4K_LIB
select IRQ_CPU
select SYS_HAS_CPU_MIPS32_R1
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_APM_EMULATION
select GENERIC_HARDIRQS_NO__DO_IRQ

View File

@ -6,8 +6,8 @@
#
obj-y += prom.o irq.o puts.o time.o reset.o \
au1xxx_irqmap.o clocks.o platform.o power.o setup.o \
sleeper.o cputable.o dma.o dbdma.o gpio.o
clocks.o platform.o power.o setup.o \
sleeper.o dma.o dbdma.o gpio.o
obj-$(CONFIG_PCI) += pci.o

View File

@ -1,205 +0,0 @@
/*
* BRIEF MODULE DESCRIPTION
* Au1xxx processor specific IRQ tables
*
* Copyright 2004 Embedded Edge, LLC
* dan@embeddededge.com
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <au1000.h>
/* The IC0 interrupt table. This is processor, rather than
* board dependent, so no reason to keep this info in the board
* dependent files.
*
* Careful if you change match 2 request!
* The interrupt handler is called directly from the low level dispatch code.
*/
struct au1xxx_irqmap __initdata au1xxx_ic0_map[] = {
#if defined(CONFIG_SOC_AU1000)
{ AU1000_UART0_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_UART1_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_UART2_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_UART3_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_SSI0_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_SSI1_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_DMA_INT_BASE, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_DMA_INT_BASE+1, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_DMA_INT_BASE+2, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_DMA_INT_BASE+3, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_DMA_INT_BASE+4, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_DMA_INT_BASE+5, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_DMA_INT_BASE+6, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_DMA_INT_BASE+7, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_TOY_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_TOY_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_TOY_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_TOY_MATCH2_INT, INTC_INT_RISE_EDGE, 1 },
{ AU1000_RTC_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_RTC_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_RTC_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_RTC_MATCH2_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_IRDA_TX_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_IRDA_RX_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_USB_DEV_REQ_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_USB_DEV_SUS_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_USB_HOST_INT, INTC_INT_LOW_LEVEL, 0 },
{ AU1000_ACSYNC_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_MAC0_DMA_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_MAC1_DMA_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_AC97C_INT, INTC_INT_RISE_EDGE, 0 },
#elif defined(CONFIG_SOC_AU1500)
{ AU1500_UART0_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_PCI_INTA, INTC_INT_LOW_LEVEL, 0 },
{ AU1000_PCI_INTB, INTC_INT_LOW_LEVEL, 0 },
{ AU1500_UART3_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_PCI_INTC, INTC_INT_LOW_LEVEL, 0 },
{ AU1000_PCI_INTD, INTC_INT_LOW_LEVEL, 0 },
{ AU1000_DMA_INT_BASE, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_DMA_INT_BASE+1, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_DMA_INT_BASE+2, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_DMA_INT_BASE+3, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_DMA_INT_BASE+4, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_DMA_INT_BASE+5, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_DMA_INT_BASE+6, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_DMA_INT_BASE+7, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_TOY_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_TOY_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_TOY_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_TOY_MATCH2_INT, INTC_INT_RISE_EDGE, 1 },
{ AU1000_RTC_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_RTC_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_RTC_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_RTC_MATCH2_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_USB_DEV_REQ_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_USB_DEV_SUS_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_USB_HOST_INT, INTC_INT_LOW_LEVEL, 0 },
{ AU1000_ACSYNC_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1500_MAC0_DMA_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1500_MAC1_DMA_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_AC97C_INT, INTC_INT_RISE_EDGE, 0 },
#elif defined(CONFIG_SOC_AU1100)
{ AU1100_UART0_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1100_UART1_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1100_SD_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1100_UART3_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_SSI0_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_SSI1_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_DMA_INT_BASE, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_DMA_INT_BASE+1, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_DMA_INT_BASE+2, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_DMA_INT_BASE+3, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_DMA_INT_BASE+4, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_DMA_INT_BASE+5, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_DMA_INT_BASE+6, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_DMA_INT_BASE+7, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_TOY_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_TOY_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_TOY_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_TOY_MATCH2_INT, INTC_INT_RISE_EDGE, 1 },
{ AU1000_RTC_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_RTC_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_RTC_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_RTC_MATCH2_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_IRDA_TX_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_IRDA_RX_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_USB_DEV_REQ_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_USB_DEV_SUS_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_USB_HOST_INT, INTC_INT_LOW_LEVEL, 0 },
{ AU1000_ACSYNC_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1100_MAC0_DMA_INT, INTC_INT_HIGH_LEVEL, 0 },
/* { AU1000_GPIO215_208_INT, INTC_INT_HIGH_LEVEL, 0 }, */
{ AU1100_LCD_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_AC97C_INT, INTC_INT_RISE_EDGE, 0 },
#elif defined(CONFIG_SOC_AU1550)
{ AU1550_UART0_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1550_PCI_INTA, INTC_INT_LOW_LEVEL, 0 },
{ AU1550_PCI_INTB, INTC_INT_LOW_LEVEL, 0 },
{ AU1550_DDMA_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1550_CRYPTO_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1550_PCI_INTC, INTC_INT_LOW_LEVEL, 0 },
{ AU1550_PCI_INTD, INTC_INT_LOW_LEVEL, 0 },
{ AU1550_PCI_RST_INT, INTC_INT_LOW_LEVEL, 0 },
{ AU1550_UART1_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1550_UART3_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1550_PSC0_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1550_PSC1_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1550_PSC2_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1550_PSC3_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_TOY_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_TOY_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_TOY_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_TOY_MATCH2_INT, INTC_INT_RISE_EDGE, 1 },
{ AU1000_RTC_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_RTC_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_RTC_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_RTC_MATCH2_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1550_NAND_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1550_USB_DEV_REQ_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1550_USB_DEV_SUS_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1550_USB_HOST_INT, INTC_INT_LOW_LEVEL, 0 },
{ AU1550_MAC0_DMA_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1550_MAC1_DMA_INT, INTC_INT_HIGH_LEVEL, 0 },
#elif defined(CONFIG_SOC_AU1200)
{ AU1200_UART0_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1200_SWT_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1200_SD_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1200_DDMA_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1200_MAE_BE_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1200_UART1_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1200_MAE_FE_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1200_PSC0_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1200_PSC1_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1200_AES_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1200_CAMERA_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1000_TOY_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_TOY_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_TOY_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_TOY_MATCH2_INT, INTC_INT_RISE_EDGE, 1 },
{ AU1000_RTC_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_RTC_MATCH0_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_RTC_MATCH1_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1000_RTC_MATCH2_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1200_NAND_INT, INTC_INT_RISE_EDGE, 0 },
{ AU1200_USB_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1200_LCD_INT, INTC_INT_HIGH_LEVEL, 0 },
{ AU1200_MAE_BOTH_INT, INTC_INT_HIGH_LEVEL, 0 },
#else
#error "Error: Unknown Alchemy SOC"
#endif
};
int __initdata au1xxx_ic0_nr_irqs = ARRAY_SIZE(au1xxx_ic0_map);

View File

@ -27,12 +27,21 @@
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/time.h>
#include <asm/mach-au1x00/au1000.h>
/*
* I haven't found anyone that doesn't use a 12 MHz source clock,
* but just in case.....
*/
#define AU1000_SRC_CLK 12000000
static unsigned int au1x00_clock; /* Hz */
static unsigned int lcd_clock; /* KHz */
static unsigned long uart_baud_base;
static DEFINE_SPINLOCK(time_lock);
/*
* Set the au1000_clock
*/
@ -63,31 +72,45 @@ void set_au1x00_uart_baud_base(unsigned long new_baud_base)
}
/*
* Calculate the Au1x00's LCD clock based on the current
* cpu clock and the system bus clock, and try to keep it
* below 40 MHz (the Pb1000 board can lock-up if the LCD
* clock is over 40 MHz).
* We read the real processor speed from the PLL. This is important
* because it is more accurate than computing it from the 32 KHz
* counter, if it exists. If we don't have an accurate processor
* speed, all of the peripherals that derive their clocks based on
* this advertised speed will introduce error and sometimes not work
* properly. This function is futher convoluted to still allow configurations
* to do that in case they have really, really old silicon with a
* write-only PLL register. -- Dan
*/
void set_au1x00_lcd_clock(void)
unsigned long au1xxx_calc_clock(void)
{
unsigned int static_cfg0;
unsigned int sys_busclk = (get_au1x00_speed() / 1000) /
((int)(au_readl(SYS_POWERCTRL) & 0x03) + 2);
unsigned long cpu_speed;
unsigned long flags;
static_cfg0 = au_readl(MEM_STCFG0);
spin_lock_irqsave(&time_lock, flags);
if (static_cfg0 & (1 << 11))
lcd_clock = sys_busclk / 5; /* note: BCLK switching fails with D5 */
/*
* On early Au1000, sys_cpupll was write-only. Since these
* silicon versions of Au1000 are not sold by AMD, we don't bend
* over backwards trying to determine the frequency.
*/
if (au1xxx_cpu_has_pll_wo())
#ifdef CONFIG_SOC_AU1000_FREQUENCY
cpu_speed = CONFIG_SOC_AU1000_FREQUENCY;
#else
cpu_speed = 396000000;
#endif
else
lcd_clock = sys_busclk / 4;
cpu_speed = (au_readl(SYS_CPUPLL) & 0x0000003f) * AU1000_SRC_CLK;
if (lcd_clock > 50000) /* Epson MAX */
printk(KERN_WARNING "warning: LCD clock too high (%u KHz)\n",
lcd_clock);
}
/* On Alchemy CPU:counter ratio is 1:1 */
mips_hpt_frequency = cpu_speed;
/* Equation: Baudrate = CPU / (SD * 2 * CLKDIV * 16) */
set_au1x00_uart_baud_base(cpu_speed / (2 * ((int)(au_readl(SYS_POWERCTRL)
& 0x03) + 2) * 16));
unsigned int get_au1x00_lcd_clock(void)
{
return lcd_clock;
spin_unlock_irqrestore(&time_lock, flags);
set_au1x00_speed(cpu_speed);
return cpu_speed;
}
EXPORT_SYMBOL(get_au1x00_lcd_clock);

View File

@ -1,52 +0,0 @@
/*
* arch/mips/au1000/common/cputable.c
*
* Copyright (C) 2004 Dan Malek (dan@embeddededge.com)
* Copied from PowerPC and updated for Alchemy Au1xxx processors.
*
* Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/mach-au1x00/au1000.h>
struct cpu_spec *cur_cpu_spec[NR_CPUS];
/* With some thought, we can probably use the mask to reduce the
* size of the table.
*/
struct cpu_spec cpu_specs[] = {
{ 0xffffffff, 0x00030100, "Au1000 DA", 1, 0, 1 },
{ 0xffffffff, 0x00030201, "Au1000 HA", 1, 0, 1 },
{ 0xffffffff, 0x00030202, "Au1000 HB", 1, 0, 1 },
{ 0xffffffff, 0x00030203, "Au1000 HC", 1, 1, 0 },
{ 0xffffffff, 0x00030204, "Au1000 HD", 1, 1, 0 },
{ 0xffffffff, 0x01030200, "Au1500 AB", 1, 1, 0 },
{ 0xffffffff, 0x01030201, "Au1500 AC", 0, 1, 0 },
{ 0xffffffff, 0x01030202, "Au1500 AD", 0, 1, 0 },
{ 0xffffffff, 0x02030200, "Au1100 AB", 1, 1, 0 },
{ 0xffffffff, 0x02030201, "Au1100 BA", 1, 1, 0 },
{ 0xffffffff, 0x02030202, "Au1100 BC", 1, 1, 0 },
{ 0xffffffff, 0x02030203, "Au1100 BD", 0, 1, 0 },
{ 0xffffffff, 0x02030204, "Au1100 BE", 0, 1, 0 },
{ 0xffffffff, 0x03030200, "Au1550 AA", 0, 1, 0 },
{ 0xffffffff, 0x04030200, "Au1200 AB", 0, 0, 0 },
{ 0xffffffff, 0x04030201, "Au1200 AC", 1, 0, 0 },
{ 0x00000000, 0x00000000, "Unknown Au1xxx", 1, 0, 0 }
};
void set_cpuspec(void)
{
struct cpu_spec *sp;
u32 prid;
prid = read_c0_prid();
sp = cpu_specs;
while ((prid & sp->prid_mask) != sp->prid_value)
sp++;
cur_cpu_spec[0] = sp;
}

View File

@ -174,6 +174,11 @@ static dbdev_tab_t dbdev_tab[] = {
#define DBDEV_TAB_SIZE ARRAY_SIZE(dbdev_tab)
#ifdef CONFIG_PM
static u32 au1xxx_dbdma_pm_regs[NUM_DBDMA_CHANS + 1][8];
#endif
static chan_tab_t *chan_tab_ptr[NUM_DBDMA_CHANS];
static dbdev_tab_t *find_dbdev_id(u32 id)
@ -975,4 +980,64 @@ u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
return nbytes;
}
#ifdef CONFIG_PM
void au1xxx_dbdma_suspend(void)
{
int i;
u32 addr;
addr = DDMA_GLOBAL_BASE;
au1xxx_dbdma_pm_regs[0][0] = au_readl(addr + 0x00);
au1xxx_dbdma_pm_regs[0][1] = au_readl(addr + 0x04);
au1xxx_dbdma_pm_regs[0][2] = au_readl(addr + 0x08);
au1xxx_dbdma_pm_regs[0][3] = au_readl(addr + 0x0c);
/* save channel configurations */
for (i = 1, addr = DDMA_CHANNEL_BASE; i < NUM_DBDMA_CHANS; i++) {
au1xxx_dbdma_pm_regs[i][0] = au_readl(addr + 0x00);
au1xxx_dbdma_pm_regs[i][1] = au_readl(addr + 0x04);
au1xxx_dbdma_pm_regs[i][2] = au_readl(addr + 0x08);
au1xxx_dbdma_pm_regs[i][3] = au_readl(addr + 0x0c);
au1xxx_dbdma_pm_regs[i][4] = au_readl(addr + 0x10);
au1xxx_dbdma_pm_regs[i][5] = au_readl(addr + 0x14);
au1xxx_dbdma_pm_regs[i][6] = au_readl(addr + 0x18);
/* halt channel */
au_writel(au1xxx_dbdma_pm_regs[i][0] & ~1, addr + 0x00);
au_sync();
while (!(au_readl(addr + 0x14) & 1))
au_sync();
addr += 0x100; /* next channel base */
}
/* disable channel interrupts */
au_writel(0, DDMA_GLOBAL_BASE + 0x0c);
au_sync();
}
void au1xxx_dbdma_resume(void)
{
int i;
u32 addr;
addr = DDMA_GLOBAL_BASE;
au_writel(au1xxx_dbdma_pm_regs[0][0], addr + 0x00);
au_writel(au1xxx_dbdma_pm_regs[0][1], addr + 0x04);
au_writel(au1xxx_dbdma_pm_regs[0][2], addr + 0x08);
au_writel(au1xxx_dbdma_pm_regs[0][3], addr + 0x0c);
/* restore channel configurations */
for (i = 1, addr = DDMA_CHANNEL_BASE; i < NUM_DBDMA_CHANS; i++) {
au_writel(au1xxx_dbdma_pm_regs[i][0], addr + 0x00);
au_writel(au1xxx_dbdma_pm_regs[i][1], addr + 0x04);
au_writel(au1xxx_dbdma_pm_regs[i][2], addr + 0x08);
au_writel(au1xxx_dbdma_pm_regs[i][3], addr + 0x0c);
au_writel(au1xxx_dbdma_pm_regs[i][4], addr + 0x10);
au_writel(au1xxx_dbdma_pm_regs[i][5], addr + 0x14);
au_writel(au1xxx_dbdma_pm_regs[i][6], addr + 0x18);
au_sync();
addr += 0x100; /* next channel base */
}
}
#endif /* CONFIG_PM */
#endif /* defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200) */

View File

@ -24,6 +24,7 @@
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@ -36,15 +37,172 @@
#include <asm/mach-pb1x00/pb1000.h>
#endif
#define EXT_INTC0_REQ0 2 /* IP 2 */
#define EXT_INTC0_REQ1 3 /* IP 3 */
#define EXT_INTC1_REQ0 4 /* IP 4 */
#define EXT_INTC1_REQ1 5 /* IP 5 */
#define MIPS_TIMER_IP 7 /* IP 7 */
static int au1x_ic_settype(unsigned int irq, unsigned int flow_type);
void (*board_init_irq)(void) __initdata = NULL;
/* per-processor fixed function irqs */
struct au1xxx_irqmap au1xxx_ic0_map[] __initdata = {
#if defined(CONFIG_SOC_AU1000)
{ AU1000_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_UART2_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
{ AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
{ AU1000_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_AC97C_INT, IRQ_TYPE_EDGE_RISING, 0 },
#elif defined(CONFIG_SOC_AU1500)
{ AU1500_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 0 },
{ AU1000_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 0 },
{ AU1500_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 0 },
{ AU1000_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 0 },
{ AU1000_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
{ AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
{ AU1000_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1500_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1500_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_AC97C_INT, IRQ_TYPE_EDGE_RISING, 0 },
#elif defined(CONFIG_SOC_AU1100)
{ AU1100_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1100_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1100_SD_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1100_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_SSI0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_SSI1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_DMA_INT_BASE, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_DMA_INT_BASE+1, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_DMA_INT_BASE+2, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_DMA_INT_BASE+3, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_DMA_INT_BASE+4, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_DMA_INT_BASE+5, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_DMA_INT_BASE+6, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_DMA_INT_BASE+7, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
{ AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_IRDA_TX_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_IRDA_RX_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
{ AU1000_ACSYNC_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1100_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1100_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_AC97C_INT, IRQ_TYPE_EDGE_RISING, 0 },
#elif defined(CONFIG_SOC_AU1550)
{ AU1550_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1550_PCI_INTA, IRQ_TYPE_LEVEL_LOW, 0 },
{ AU1550_PCI_INTB, IRQ_TYPE_LEVEL_LOW, 0 },
{ AU1550_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1550_CRYPTO_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1550_PCI_INTC, IRQ_TYPE_LEVEL_LOW, 0 },
{ AU1550_PCI_INTD, IRQ_TYPE_LEVEL_LOW, 0 },
{ AU1550_PCI_RST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
{ AU1550_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1550_UART3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1550_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1550_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1550_PSC2_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1550_PSC3_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
{ AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1550_NAND_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1550_USB_DEV_REQ_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1550_USB_DEV_SUS_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1550_USB_HOST_INT, IRQ_TYPE_LEVEL_LOW, 0 },
{ AU1550_MAC0_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1550_MAC1_DMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
#elif defined(CONFIG_SOC_AU1200)
{ AU1200_UART0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1200_SWT_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1200_SD_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1200_DDMA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1200_MAE_BE_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1200_UART1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1200_MAE_FE_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1200_PSC0_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1200_PSC1_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1200_AES_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1200_CAMERA_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1000_TOY_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_TOY_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_TOY_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_TOY_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 1 },
{ AU1000_RTC_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_RTC_MATCH0_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_RTC_MATCH1_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1000_RTC_MATCH2_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1200_NAND_INT, IRQ_TYPE_EDGE_RISING, 0 },
{ AU1200_USB_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1200_LCD_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
{ AU1200_MAE_BOTH_INT, IRQ_TYPE_LEVEL_HIGH, 0 },
#else
#error "Error: Unknown Alchemy SOC"
#endif
};
static DEFINE_SPINLOCK(irq_lock);
#ifdef CONFIG_PM
@ -130,67 +288,47 @@ void restore_au1xxx_intctl(void)
#endif /* CONFIG_PM */
inline void local_enable_irq(unsigned int irq_nr)
static void au1x_ic0_unmask(unsigned int irq_nr)
{
unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE;
if (bit >= 32) {
au_writel(1 << (bit - 32), IC1_MASKSET);
au_writel(1 << (bit - 32), IC1_WAKESET);
} else {
au_writel(1 << bit, IC0_MASKSET);
au_writel(1 << bit, IC0_WAKESET);
}
au_writel(1 << bit, IC0_MASKSET);
au_writel(1 << bit, IC0_WAKESET);
au_sync();
}
inline void local_disable_irq(unsigned int irq_nr)
static void au1x_ic1_unmask(unsigned int irq_nr)
{
unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE;
unsigned int bit = irq_nr - AU1000_INTC1_INT_BASE;
au_writel(1 << bit, IC1_MASKSET);
au_writel(1 << bit, IC1_WAKESET);
if (bit >= 32) {
au_writel(1 << (bit - 32), IC1_MASKCLR);
au_writel(1 << (bit - 32), IC1_WAKECLR);
} else {
au_writel(1 << bit, IC0_MASKCLR);
au_writel(1 << bit, IC0_WAKECLR);
}
/* very hacky. does the pb1000 cpld auto-disable this int?
* nowhere in the current kernel sources is it disabled. --mlau
*/
#if defined(CONFIG_MIPS_PB1000)
if (irq_nr == AU1000_GPIO_15)
au_writel(0x4000, PB1000_MDR); /* enable int */
#endif
au_sync();
}
static inline void mask_and_ack_rise_edge_irq(unsigned int irq_nr)
static void au1x_ic0_mask(unsigned int irq_nr)
{
unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE;
if (bit >= 32) {
au_writel(1 << (bit - 32), IC1_RISINGCLR);
au_writel(1 << (bit - 32), IC1_MASKCLR);
} else {
au_writel(1 << bit, IC0_RISINGCLR);
au_writel(1 << bit, IC0_MASKCLR);
}
au_writel(1 << bit, IC0_MASKCLR);
au_writel(1 << bit, IC0_WAKECLR);
au_sync();
}
static inline void mask_and_ack_fall_edge_irq(unsigned int irq_nr)
static void au1x_ic1_mask(unsigned int irq_nr)
{
unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE;
if (bit >= 32) {
au_writel(1 << (bit - 32), IC1_FALLINGCLR);
au_writel(1 << (bit - 32), IC1_MASKCLR);
} else {
au_writel(1 << bit, IC0_FALLINGCLR);
au_writel(1 << bit, IC0_MASKCLR);
}
unsigned int bit = irq_nr - AU1000_INTC1_INT_BASE;
au_writel(1 << bit, IC1_MASKCLR);
au_writel(1 << bit, IC1_WAKECLR);
au_sync();
}
static inline void mask_and_ack_either_edge_irq(unsigned int irq_nr)
static void au1x_ic0_ack(unsigned int irq_nr)
{
unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE;
@ -198,349 +336,229 @@ static inline void mask_and_ack_either_edge_irq(unsigned int irq_nr)
* This may assume that we don't get interrupts from
* both edges at once, or if we do, that we don't care.
*/
if (bit >= 32) {
au_writel(1 << (bit - 32), IC1_FALLINGCLR);
au_writel(1 << (bit - 32), IC1_RISINGCLR);
au_writel(1 << (bit - 32), IC1_MASKCLR);
} else {
au_writel(1 << bit, IC0_FALLINGCLR);
au_writel(1 << bit, IC0_RISINGCLR);
au_writel(1 << bit, IC0_MASKCLR);
}
au_writel(1 << bit, IC0_FALLINGCLR);
au_writel(1 << bit, IC0_RISINGCLR);
au_sync();
}
static inline void mask_and_ack_level_irq(unsigned int irq_nr)
static void au1x_ic1_ack(unsigned int irq_nr)
{
local_disable_irq(irq_nr);
unsigned int bit = irq_nr - AU1000_INTC1_INT_BASE;
/*
* This may assume that we don't get interrupts from
* both edges at once, or if we do, that we don't care.
*/
au_writel(1 << bit, IC1_FALLINGCLR);
au_writel(1 << bit, IC1_RISINGCLR);
au_sync();
#if defined(CONFIG_MIPS_PB1000)
if (irq_nr == AU1000_GPIO_15) {
au_writel(0x8000, PB1000_MDR); /* ack int */
au_sync();
}
#endif
}
static void end_irq(unsigned int irq_nr)
static int au1x_ic1_setwake(unsigned int irq, unsigned int on)
{
if (!(irq_desc[irq_nr].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
local_enable_irq(irq_nr);
unsigned int bit = irq - AU1000_INTC1_INT_BASE;
unsigned long wakemsk, flags;
#if defined(CONFIG_MIPS_PB1000)
if (irq_nr == AU1000_GPIO_15) {
au_writel(0x4000, PB1000_MDR); /* enable int */
au_sync();
}
#endif
}
/* only GPIO 0-7 can act as wakeup source: */
if ((irq < AU1000_GPIO_0) || (irq > AU1000_GPIO_7))
return -EINVAL;
unsigned long save_local_and_disable(int controller)
{
int i;
unsigned long flags, mask;
spin_lock_irqsave(&irq_lock, flags);
if (controller) {
mask = au_readl(IC1_MASKSET);
for (i = 32; i < 64; i++)
local_disable_irq(i);
} else {
mask = au_readl(IC0_MASKSET);
for (i = 0; i < 32; i++)
local_disable_irq(i);
}
spin_unlock_irqrestore(&irq_lock, flags);
return mask;
}
void restore_local_and_enable(int controller, unsigned long mask)
{
int i;
unsigned long flags, new_mask;
spin_lock_irqsave(&irq_lock, flags);
for (i = 0; i < 32; i++)
if (mask & (1 << i)) {
if (controller)
local_enable_irq(i + 32);
else
local_enable_irq(i);
}
if (controller)
new_mask = au_readl(IC1_MASKSET);
local_irq_save(flags);
wakemsk = au_readl(SYS_WAKEMSK);
if (on)
wakemsk |= 1 << bit;
else
new_mask = au_readl(IC0_MASKSET);
spin_unlock_irqrestore(&irq_lock, flags);
}
static struct irq_chip rise_edge_irq_type = {
.name = "Au1000 Rise Edge",
.ack = mask_and_ack_rise_edge_irq,
.mask = local_disable_irq,
.mask_ack = mask_and_ack_rise_edge_irq,
.unmask = local_enable_irq,
.end = end_irq,
};
static struct irq_chip fall_edge_irq_type = {
.name = "Au1000 Fall Edge",
.ack = mask_and_ack_fall_edge_irq,
.mask = local_disable_irq,
.mask_ack = mask_and_ack_fall_edge_irq,
.unmask = local_enable_irq,
.end = end_irq,
};
static struct irq_chip either_edge_irq_type = {
.name = "Au1000 Rise or Fall Edge",
.ack = mask_and_ack_either_edge_irq,
.mask = local_disable_irq,
.mask_ack = mask_and_ack_either_edge_irq,
.unmask = local_enable_irq,
.end = end_irq,
};
static struct irq_chip level_irq_type = {
.name = "Au1000 Level",
.ack = mask_and_ack_level_irq,
.mask = local_disable_irq,
.mask_ack = mask_and_ack_level_irq,
.unmask = local_enable_irq,
.end = end_irq,
};
static void __init setup_local_irq(unsigned int irq_nr, int type, int int_req)
{
unsigned int bit = irq_nr - AU1000_INTC0_INT_BASE;
if (irq_nr > AU1000_MAX_INTR)
return;
/* Config2[n], Config1[n], Config0[n] */
if (bit >= 32) {
switch (type) {
case INTC_INT_RISE_EDGE: /* 0:0:1 */
au_writel(1 << (bit - 32), IC1_CFG2CLR);
au_writel(1 << (bit - 32), IC1_CFG1CLR);
au_writel(1 << (bit - 32), IC1_CFG0SET);
set_irq_chip(irq_nr, &rise_edge_irq_type);
break;
case INTC_INT_FALL_EDGE: /* 0:1:0 */
au_writel(1 << (bit - 32), IC1_CFG2CLR);
au_writel(1 << (bit - 32), IC1_CFG1SET);
au_writel(1 << (bit - 32), IC1_CFG0CLR);
set_irq_chip(irq_nr, &fall_edge_irq_type);
break;
case INTC_INT_RISE_AND_FALL_EDGE: /* 0:1:1 */
au_writel(1 << (bit - 32), IC1_CFG2CLR);
au_writel(1 << (bit - 32), IC1_CFG1SET);
au_writel(1 << (bit - 32), IC1_CFG0SET);
set_irq_chip(irq_nr, &either_edge_irq_type);
break;
case INTC_INT_HIGH_LEVEL: /* 1:0:1 */
au_writel(1 << (bit - 32), IC1_CFG2SET);
au_writel(1 << (bit - 32), IC1_CFG1CLR);
au_writel(1 << (bit - 32), IC1_CFG0SET);
set_irq_chip(irq_nr, &level_irq_type);
break;
case INTC_INT_LOW_LEVEL: /* 1:1:0 */
au_writel(1 << (bit - 32), IC1_CFG2SET);
au_writel(1 << (bit - 32), IC1_CFG1SET);
au_writel(1 << (bit - 32), IC1_CFG0CLR);
set_irq_chip(irq_nr, &level_irq_type);
break;
case INTC_INT_DISABLED: /* 0:0:0 */
au_writel(1 << (bit - 32), IC1_CFG0CLR);
au_writel(1 << (bit - 32), IC1_CFG1CLR);
au_writel(1 << (bit - 32), IC1_CFG2CLR);
break;
default: /* disable the interrupt */
printk(KERN_WARNING "unexpected int type %d (irq %d)\n",
type, irq_nr);
au_writel(1 << (bit - 32), IC1_CFG0CLR);
au_writel(1 << (bit - 32), IC1_CFG1CLR);
au_writel(1 << (bit - 32), IC1_CFG2CLR);
return;
}
if (int_req) /* assign to interrupt request 1 */
au_writel(1 << (bit - 32), IC1_ASSIGNCLR);
else /* assign to interrupt request 0 */
au_writel(1 << (bit - 32), IC1_ASSIGNSET);
au_writel(1 << (bit - 32), IC1_SRCSET);
au_writel(1 << (bit - 32), IC1_MASKCLR);
au_writel(1 << (bit - 32), IC1_WAKECLR);
} else {
switch (type) {
case INTC_INT_RISE_EDGE: /* 0:0:1 */
au_writel(1 << bit, IC0_CFG2CLR);
au_writel(1 << bit, IC0_CFG1CLR);
au_writel(1 << bit, IC0_CFG0SET);
set_irq_chip(irq_nr, &rise_edge_irq_type);
break;
case INTC_INT_FALL_EDGE: /* 0:1:0 */
au_writel(1 << bit, IC0_CFG2CLR);
au_writel(1 << bit, IC0_CFG1SET);
au_writel(1 << bit, IC0_CFG0CLR);
set_irq_chip(irq_nr, &fall_edge_irq_type);
break;
case INTC_INT_RISE_AND_FALL_EDGE: /* 0:1:1 */
au_writel(1 << bit, IC0_CFG2CLR);
au_writel(1 << bit, IC0_CFG1SET);
au_writel(1 << bit, IC0_CFG0SET);
set_irq_chip(irq_nr, &either_edge_irq_type);
break;
case INTC_INT_HIGH_LEVEL: /* 1:0:1 */
au_writel(1 << bit, IC0_CFG2SET);
au_writel(1 << bit, IC0_CFG1CLR);
au_writel(1 << bit, IC0_CFG0SET);
set_irq_chip(irq_nr, &level_irq_type);
break;
case INTC_INT_LOW_LEVEL: /* 1:1:0 */
au_writel(1 << bit, IC0_CFG2SET);
au_writel(1 << bit, IC0_CFG1SET);
au_writel(1 << bit, IC0_CFG0CLR);
set_irq_chip(irq_nr, &level_irq_type);
break;
case INTC_INT_DISABLED: /* 0:0:0 */
au_writel(1 << bit, IC0_CFG0CLR);
au_writel(1 << bit, IC0_CFG1CLR);
au_writel(1 << bit, IC0_CFG2CLR);
break;
default: /* disable the interrupt */
printk(KERN_WARNING "unexpected int type %d (irq %d)\n",
type, irq_nr);
au_writel(1 << bit, IC0_CFG0CLR);
au_writel(1 << bit, IC0_CFG1CLR);
au_writel(1 << bit, IC0_CFG2CLR);
return;
}
if (int_req) /* assign to interrupt request 1 */
au_writel(1 << bit, IC0_ASSIGNCLR);
else /* assign to interrupt request 0 */
au_writel(1 << bit, IC0_ASSIGNSET);
au_writel(1 << bit, IC0_SRCSET);
au_writel(1 << bit, IC0_MASKCLR);
au_writel(1 << bit, IC0_WAKECLR);
}
wakemsk &= ~(1 << bit);
au_writel(wakemsk, SYS_WAKEMSK);
au_sync();
local_irq_restore(flags);
return 0;
}
/*
* Interrupts are nested. Even if an interrupt handler is registered
* as "fast", we might get another interrupt before we return from
* intcX_reqX_irqdispatch().
* irq_chips for both ICs; this way the mask handlers can be
* as short as possible.
*
* NOTE: the ->ack() callback is used by the handle_edge_irq
* flowhandler only, the ->mask_ack() one by handle_level_irq,
* so no need for an irq_chip for each type of irq (level/edge).
*/
static struct irq_chip au1x_ic0_chip = {
.name = "Alchemy-IC0",
.ack = au1x_ic0_ack, /* edge */
.mask = au1x_ic0_mask,
.mask_ack = au1x_ic0_mask, /* level */
.unmask = au1x_ic0_unmask,
.set_type = au1x_ic_settype,
};
static void intc0_req0_irqdispatch(void)
static struct irq_chip au1x_ic1_chip = {
.name = "Alchemy-IC1",
.ack = au1x_ic1_ack, /* edge */
.mask = au1x_ic1_mask,
.mask_ack = au1x_ic1_mask, /* level */
.unmask = au1x_ic1_unmask,
.set_type = au1x_ic_settype,
.set_wake = au1x_ic1_setwake,
};
static int au1x_ic_settype(unsigned int irq, unsigned int flow_type)
{
static unsigned long intc0_req0;
unsigned int bit;
struct irq_chip *chip;
unsigned long icr[6];
unsigned int bit, ic;
int ret;
intc0_req0 |= au_readl(IC0_REQ0INT);
if (irq >= AU1000_INTC1_INT_BASE) {
bit = irq - AU1000_INTC1_INT_BASE;
chip = &au1x_ic1_chip;
ic = 1;
} else {
bit = irq - AU1000_INTC0_INT_BASE;
chip = &au1x_ic0_chip;
ic = 0;
}
if (!intc0_req0)
if (bit > 31)
return -EINVAL;
icr[0] = ic ? IC1_CFG0SET : IC0_CFG0SET;
icr[1] = ic ? IC1_CFG1SET : IC0_CFG1SET;
icr[2] = ic ? IC1_CFG2SET : IC0_CFG2SET;
icr[3] = ic ? IC1_CFG0CLR : IC0_CFG0CLR;
icr[4] = ic ? IC1_CFG1CLR : IC0_CFG1CLR;
icr[5] = ic ? IC1_CFG2CLR : IC0_CFG2CLR;
ret = 0;
switch (flow_type) { /* cfgregs 2:1:0 */
case IRQ_TYPE_EDGE_RISING: /* 0:0:1 */
au_writel(1 << bit, icr[5]);
au_writel(1 << bit, icr[4]);
au_writel(1 << bit, icr[0]);
set_irq_chip_and_handler_name(irq, chip,
handle_edge_irq, "riseedge");
break;
case IRQ_TYPE_EDGE_FALLING: /* 0:1:0 */
au_writel(1 << bit, icr[5]);
au_writel(1 << bit, icr[1]);
au_writel(1 << bit, icr[3]);
set_irq_chip_and_handler_name(irq, chip,
handle_edge_irq, "falledge");
break;
case IRQ_TYPE_EDGE_BOTH: /* 0:1:1 */
au_writel(1 << bit, icr[5]);
au_writel(1 << bit, icr[1]);
au_writel(1 << bit, icr[0]);
set_irq_chip_and_handler_name(irq, chip,
handle_edge_irq, "bothedge");
break;
case IRQ_TYPE_LEVEL_HIGH: /* 1:0:1 */
au_writel(1 << bit, icr[2]);
au_writel(1 << bit, icr[4]);
au_writel(1 << bit, icr[0]);
set_irq_chip_and_handler_name(irq, chip,
handle_level_irq, "hilevel");
break;
case IRQ_TYPE_LEVEL_LOW: /* 1:1:0 */
au_writel(1 << bit, icr[2]);
au_writel(1 << bit, icr[1]);
au_writel(1 << bit, icr[3]);
set_irq_chip_and_handler_name(irq, chip,
handle_level_irq, "lowlevel");
break;
case IRQ_TYPE_NONE: /* 0:0:0 */
au_writel(1 << bit, icr[5]);
au_writel(1 << bit, icr[4]);
au_writel(1 << bit, icr[3]);
/* set at least chip so we can call set_irq_type() on it */
set_irq_chip(irq, chip);
break;
default:
ret = -EINVAL;
}
au_sync();
return ret;
}
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending = read_c0_status() & read_c0_cause();
unsigned long s, off, bit;
if (pending & CAUSEF_IP7) {
do_IRQ(MIPS_CPU_IRQ_BASE + 7);
return;
} else if (pending & CAUSEF_IP2) {
s = IC0_REQ0INT;
off = AU1000_INTC0_INT_BASE;
} else if (pending & CAUSEF_IP3) {
s = IC0_REQ1INT;
off = AU1000_INTC0_INT_BASE;
} else if (pending & CAUSEF_IP4) {
s = IC1_REQ0INT;
off = AU1000_INTC1_INT_BASE;
} else if (pending & CAUSEF_IP5) {
s = IC1_REQ1INT;
off = AU1000_INTC1_INT_BASE;
} else
goto spurious;
bit = 0;
s = au_readl(s);
if (unlikely(!s)) {
spurious:
spurious_interrupt();
return;
}
#ifdef AU1000_USB_DEV_REQ_INT
/*
* Because of the tight timing of SETUP token to reply
* transactions, the USB devices-side packet complete
* interrupt needs the highest priority.
*/
if ((intc0_req0 & (1 << AU1000_USB_DEV_REQ_INT))) {
intc0_req0 &= ~(1 << AU1000_USB_DEV_REQ_INT);
bit = 1 << (AU1000_USB_DEV_REQ_INT - AU1000_INTC0_INT_BASE);
if ((pending & CAUSEF_IP2) && (s & bit)) {
do_IRQ(AU1000_USB_DEV_REQ_INT);
return;
}
#endif
bit = __ffs(intc0_req0);
intc0_req0 &= ~(1 << bit);
do_IRQ(AU1000_INTC0_INT_BASE + bit);
do_IRQ(__ffs(s) + off);
}
static void intc0_req1_irqdispatch(void)
/* setup edge/level and assign request 0/1 */
void __init au1xxx_setup_irqmap(struct au1xxx_irqmap *map, int count)
{
static unsigned long intc0_req1;
unsigned int bit;
unsigned int bit, irq_nr;
intc0_req1 |= au_readl(IC0_REQ1INT);
while (count--) {
irq_nr = map[count].im_irq;
if (!intc0_req1)
return;
if (((irq_nr < AU1000_INTC0_INT_BASE) ||
(irq_nr >= AU1000_INTC0_INT_BASE + 32)) &&
((irq_nr < AU1000_INTC1_INT_BASE) ||
(irq_nr >= AU1000_INTC1_INT_BASE + 32)))
continue;
bit = __ffs(intc0_req1);
intc0_req1 &= ~(1 << bit);
do_IRQ(AU1000_INTC0_INT_BASE + bit);
}
if (irq_nr >= AU1000_INTC1_INT_BASE) {
bit = irq_nr - AU1000_INTC1_INT_BASE;
if (map[count].im_request)
au_writel(1 << bit, IC1_ASSIGNCLR);
} else {
bit = irq_nr - AU1000_INTC0_INT_BASE;
if (map[count].im_request)
au_writel(1 << bit, IC0_ASSIGNCLR);
}
/*
* Interrupt Controller 1:
* interrupts 32 - 63
*/
static void intc1_req0_irqdispatch(void)
{
static unsigned long intc1_req0;
unsigned int bit;
intc1_req0 |= au_readl(IC1_REQ0INT);
if (!intc1_req0)
return;
bit = __ffs(intc1_req0);
intc1_req0 &= ~(1 << bit);
do_IRQ(AU1000_INTC1_INT_BASE + bit);
}
static void intc1_req1_irqdispatch(void)
{
static unsigned long intc1_req1;
unsigned int bit;
intc1_req1 |= au_readl(IC1_REQ1INT);
if (!intc1_req1)
return;
bit = __ffs(intc1_req1);
intc1_req1 &= ~(1 << bit);
do_IRQ(AU1000_INTC1_INT_BASE + bit);
}
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending = read_c0_status() & read_c0_cause();
if (pending & CAUSEF_IP7)
do_IRQ(MIPS_CPU_IRQ_BASE + 7);
else if (pending & CAUSEF_IP2)
intc0_req0_irqdispatch();
else if (pending & CAUSEF_IP3)
intc0_req1_irqdispatch();
else if (pending & CAUSEF_IP4)
intc1_req0_irqdispatch();
else if (pending & CAUSEF_IP5)
intc1_req1_irqdispatch();
else
spurious_interrupt();
au1x_ic_settype(irq_nr, map[count].im_type);
}
}
void __init arch_init_irq(void)
{
int i;
struct au1xxx_irqmap *imp;
extern struct au1xxx_irqmap au1xxx_irq_map[];
extern struct au1xxx_irqmap au1xxx_ic0_map[];
extern int au1xxx_nr_irqs;
extern int au1xxx_ic0_nr_irqs;
/*
* Initialize interrupt controllers to a safe state.
@ -569,28 +587,25 @@ void __init arch_init_irq(void)
mips_cpu_irq_init();
/* register all 64 possible IC0+IC1 irq sources as type "none".
* Use set_irq_type() to set edge/level behaviour at runtime.
*/
for (i = AU1000_INTC0_INT_BASE;
(i < AU1000_INTC0_INT_BASE + 32); i++)
au1x_ic_settype(i, IRQ_TYPE_NONE);
for (i = AU1000_INTC1_INT_BASE;
(i < AU1000_INTC1_INT_BASE + 32); i++)
au1x_ic_settype(i, IRQ_TYPE_NONE);
/*
* Initialize IC0, which is fixed per processor.
*/
imp = au1xxx_ic0_map;
for (i = 0; i < au1xxx_ic0_nr_irqs; i++) {
setup_local_irq(imp->im_irq, imp->im_type, imp->im_request);
imp++;
}
au1xxx_setup_irqmap(au1xxx_ic0_map, ARRAY_SIZE(au1xxx_ic0_map));
/*
* Now set up the irq mapping for the board.
*/
imp = au1xxx_irq_map;
for (i = 0; i < au1xxx_nr_irqs; i++) {
setup_local_irq(imp->im_irq, imp->im_type, imp->im_request);
imp++;
}
set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4);
/* Board specific IRQ initialization.
/* Boards can register additional (GPIO-based) IRQs.
*/
if (board_init_irq)
board_init_irq();
board_init_irq();
set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3);
}

View File

@ -35,25 +35,12 @@
#include <linux/jiffies.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
#include <asm/mach-au1x00/au1000.h>
#ifdef CONFIG_PM
#define DEBUG 1
#ifdef DEBUG
#define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__, ## args)
#else
#define DPRINTK(fmt, args...)
#if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
#include <asm/mach-au1x00/au1xxx_dbdma.h>
#endif
static void au1000_calibrate_delay(void);
extern unsigned long save_local_and_disable(int controller);
extern void restore_local_and_enable(int controller, unsigned long mask);
extern void local_enable_irq(unsigned int irq_nr);
static DEFINE_SPINLOCK(pm_lock);
#ifdef CONFIG_PM
/*
* We need to save/restore a bunch of core registers that are
@ -65,29 +52,16 @@ static DEFINE_SPINLOCK(pm_lock);
* We only have to save/restore registers that aren't otherwise
* done as part of a driver pm_* function.
*/
static unsigned int sleep_aux_pll_cntrl;
static unsigned int sleep_cpu_pll_cntrl;
static unsigned int sleep_pin_function;
static unsigned int sleep_uart0_inten;
static unsigned int sleep_uart0_fifoctl;
static unsigned int sleep_uart0_linectl;
static unsigned int sleep_uart0_clkdiv;
static unsigned int sleep_uart0_enable;
static unsigned int sleep_usbhost_enable;
static unsigned int sleep_usbdev_enable;
static unsigned int sleep_static_memctlr[4][3];
static unsigned int sleep_uart0_inten;
static unsigned int sleep_uart0_fifoctl;
static unsigned int sleep_uart0_linectl;
static unsigned int sleep_uart0_clkdiv;
static unsigned int sleep_uart0_enable;
static unsigned int sleep_usb[2];
static unsigned int sleep_sys_clocks[5];
static unsigned int sleep_sys_pinfunc;
static unsigned int sleep_static_memctlr[4][3];
/*
* Define this to cause the value you write to /proc/sys/pm/sleep to
* set the TOY timer for the amount of time you want to sleep.
* This is done mainly for testing, but may be useful in other cases.
* The value is number of 32KHz ticks to sleep.
*/
#define SLEEP_TEST_TIMEOUT 1
#ifdef SLEEP_TEST_TIMEOUT
static int sleep_ticks;
void wakeup_counter0_set(int ticks);
#endif
static void save_core_regs(void)
{
@ -105,31 +79,45 @@ static void save_core_regs(void)
sleep_uart0_linectl = au_readl(UART0_ADDR + UART_LCR);
sleep_uart0_clkdiv = au_readl(UART0_ADDR + UART_CLK);
sleep_uart0_enable = au_readl(UART0_ADDR + UART_MOD_CNTRL);
au_sync();
#ifndef CONFIG_SOC_AU1200
/* Shutdown USB host/device. */
sleep_usbhost_enable = au_readl(USB_HOST_CONFIG);
sleep_usb[0] = au_readl(USB_HOST_CONFIG);
/* There appears to be some undocumented reset register.... */
au_writel(0, 0xb0100004); au_sync();
au_writel(0, USB_HOST_CONFIG); au_sync();
au_writel(0, 0xb0100004);
au_sync();
au_writel(0, USB_HOST_CONFIG);
au_sync();
sleep_usbdev_enable = au_readl(USBD_ENABLE);
au_writel(0, USBD_ENABLE); au_sync();
sleep_usb[1] = au_readl(USBD_ENABLE);
au_writel(0, USBD_ENABLE);
au_sync();
#else /* AU1200 */
/* enable access to OTG mmio so we can save OTG CAP/MUX.
* FIXME: write an OTG driver and move this stuff there!
*/
au_writel(au_readl(USB_MSR_BASE + 4) | (1 << 6), USB_MSR_BASE + 4);
au_sync();
sleep_usb[0] = au_readl(0xb4020020); /* OTG_CAP */
sleep_usb[1] = au_readl(0xb4020024); /* OTG_MUX */
#endif
/* Save interrupt controller state. */
save_au1xxx_intctl();
/* Clocks and PLLs. */
sleep_aux_pll_cntrl = au_readl(SYS_AUXPLL);
sleep_sys_clocks[0] = au_readl(SYS_FREQCTRL0);
sleep_sys_clocks[1] = au_readl(SYS_FREQCTRL1);
sleep_sys_clocks[2] = au_readl(SYS_CLKSRC);
sleep_sys_clocks[3] = au_readl(SYS_CPUPLL);
sleep_sys_clocks[4] = au_readl(SYS_AUXPLL);
/*
* We don't really need to do this one, but unless we
* write it again it won't have a valid value if we
* happen to read it.
*/
sleep_cpu_pll_cntrl = au_readl(SYS_CPUPLL);
sleep_pin_function = au_readl(SYS_PINFUNC);
/* pin mux config */
sleep_sys_pinfunc = au_readl(SYS_PINFUNC);
/* Save the static memory controller configuration. */
sleep_static_memctlr[0][0] = au_readl(MEM_STCFG0);
@ -144,16 +132,45 @@ static void save_core_regs(void)
sleep_static_memctlr[3][0] = au_readl(MEM_STCFG3);
sleep_static_memctlr[3][1] = au_readl(MEM_STTIME3);
sleep_static_memctlr[3][2] = au_readl(MEM_STADDR3);
#if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
au1xxx_dbdma_suspend();
#endif
}
static void restore_core_regs(void)
{
extern void restore_au1xxx_intctl(void);
extern void wakeup_counter0_adjust(void);
/* restore clock configuration. Writing CPUPLL last will
* stall a bit and stabilize other clocks (unless this is
* one of those Au1000 with a write-only PLL, where we dont
* have a valid value)
*/
au_writel(sleep_sys_clocks[0], SYS_FREQCTRL0);
au_writel(sleep_sys_clocks[1], SYS_FREQCTRL1);
au_writel(sleep_sys_clocks[2], SYS_CLKSRC);
au_writel(sleep_sys_clocks[4], SYS_AUXPLL);
if (!au1xxx_cpu_has_pll_wo())
au_writel(sleep_sys_clocks[3], SYS_CPUPLL);
au_sync();
au_writel(sleep_aux_pll_cntrl, SYS_AUXPLL); au_sync();
au_writel(sleep_cpu_pll_cntrl, SYS_CPUPLL); au_sync();
au_writel(sleep_pin_function, SYS_PINFUNC); au_sync();
au_writel(sleep_sys_pinfunc, SYS_PINFUNC);
au_sync();
#ifndef CONFIG_SOC_AU1200
au_writel(sleep_usb[0], USB_HOST_CONFIG);
au_writel(sleep_usb[1], USBD_ENABLE);
au_sync();
#else
/* enable accces to OTG memory */
au_writel(au_readl(USB_MSR_BASE + 4) | (1 << 6), USB_MSR_BASE + 4);
au_sync();
/* restore OTG caps and port mux. */
au_writel(sleep_usb[0], 0xb4020020 + 0); /* OTG_CAP */
au_sync();
au_writel(sleep_usb[1], 0xb4020020 + 4); /* OTG_MUX */
au_sync();
#endif
/* Restore the static memory controller configuration. */
au_writel(sleep_static_memctlr[0][0], MEM_STCFG0);
@ -184,282 +201,17 @@ static void restore_core_regs(void)
}
restore_au1xxx_intctl();
wakeup_counter0_adjust();
#if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
au1xxx_dbdma_resume();
#endif
}
unsigned long suspend_mode;
void wakeup_from_suspend(void)
void au_sleep(void)
{
suspend_mode = 0;
}
int au_sleep(void)
{
unsigned long wakeup, flags;
extern void save_and_sleep(void);
spin_lock_irqsave(&pm_lock, flags);
save_core_regs();
flush_cache_all();
/**
** The code below is all system dependent and we should probably
** have a function call out of here to set this up. You need
** to configure the GPIO or timer interrupts that will bring
** you out of sleep.
** For testing, the TOY counter wakeup is useful.
**/
#if 0
au_writel(au_readl(SYS_PINSTATERD) & ~(1 << 11), SYS_PINSTATERD);
/* GPIO 6 can cause a wake up event */
wakeup = au_readl(SYS_WAKEMSK);
wakeup &= ~(1 << 8); /* turn off match20 wakeup */
wakeup |= 1 << 6; /* turn on GPIO 6 wakeup */
#else
/* For testing, allow match20 to wake us up. */
#ifdef SLEEP_TEST_TIMEOUT
wakeup_counter0_set(sleep_ticks);
#endif
wakeup = 1 << 8; /* turn on match20 wakeup */
wakeup = 0;
#endif
au_writel(1, SYS_WAKESRC); /* clear cause */
au_sync();
au_writel(wakeup, SYS_WAKEMSK);
au_sync();
save_and_sleep();
/*
* After a wakeup, the cpu vectors back to 0x1fc00000, so
* it's up to the boot code to get us back here.
*/
au1xxx_save_and_sleep();
restore_core_regs();
spin_unlock_irqrestore(&pm_lock, flags);
return 0;
}
static int pm_do_sleep(ctl_table *ctl, int write, struct file *file,
void __user *buffer, size_t *len, loff_t *ppos)
{
#ifdef SLEEP_TEST_TIMEOUT
#define TMPBUFLEN2 16
char buf[TMPBUFLEN2], *p;
#endif
if (!write)
*len = 0;
else {
#ifdef SLEEP_TEST_TIMEOUT
if (*len > TMPBUFLEN2 - 1)
return -EFAULT;
if (copy_from_user(buf, buffer, *len))
return -EFAULT;
buf[*len] = 0;
p = buf;
sleep_ticks = simple_strtoul(p, &p, 0);
#endif
au_sleep();
}
return 0;
}
static int pm_do_freq(ctl_table *ctl, int write, struct file *file,
void __user *buffer, size_t *len, loff_t *ppos)
{
int retval = 0, i;
unsigned long val, pll;
#define TMPBUFLEN 64
#define MAX_CPU_FREQ 396
char buf[TMPBUFLEN], *p;
unsigned long flags, intc0_mask, intc1_mask;
unsigned long old_baud_base, old_cpu_freq, old_clk, old_refresh;
unsigned long new_baud_base, new_cpu_freq, new_clk, new_refresh;
unsigned long baud_rate;
spin_lock_irqsave(&pm_lock, flags);
if (!write)
*len = 0;
else {
/* Parse the new frequency */
if (*len > TMPBUFLEN - 1) {
spin_unlock_irqrestore(&pm_lock, flags);
return -EFAULT;
}
if (copy_from_user(buf, buffer, *len)) {
spin_unlock_irqrestore(&pm_lock, flags);
return -EFAULT;
}
buf[*len] = 0;
p = buf;
val = simple_strtoul(p, &p, 0);
if (val > MAX_CPU_FREQ) {
spin_unlock_irqrestore(&pm_lock, flags);
return -EFAULT;
}
pll = val / 12;
if ((pll > 33) || (pll < 7)) { /* 396 MHz max, 84 MHz min */
/* Revisit this for higher speed CPUs */
spin_unlock_irqrestore(&pm_lock, flags);
return -EFAULT;
}
old_baud_base = get_au1x00_uart_baud_base();
old_cpu_freq = get_au1x00_speed();
new_cpu_freq = pll * 12 * 1000000;
new_baud_base = (new_cpu_freq / (2 * ((int)(au_readl(SYS_POWERCTRL)
& 0x03) + 2) * 16));
set_au1x00_speed(new_cpu_freq);
set_au1x00_uart_baud_base(new_baud_base);
old_refresh = au_readl(MEM_SDREFCFG) & 0x1ffffff;
new_refresh = ((old_refresh * new_cpu_freq) / old_cpu_freq) |
(au_readl(MEM_SDREFCFG) & ~0x1ffffff);
au_writel(pll, SYS_CPUPLL);
au_sync_delay(1);
au_writel(new_refresh, MEM_SDREFCFG);
au_sync_delay(1);
for (i = 0; i < 4; i++)
if (au_readl(UART_BASE + UART_MOD_CNTRL +
i * 0x00100000) == 3) {
old_clk = au_readl(UART_BASE + UART_CLK +
i * 0x00100000);
baud_rate = old_baud_base / old_clk;
/*
* We won't get an exact baud rate and the error
* could be significant enough that our new
* calculation will result in a clock that will
* give us a baud rate that's too far off from
* what we really want.
*/
if (baud_rate > 100000)
baud_rate = 115200;
else if (baud_rate > 50000)
baud_rate = 57600;
else if (baud_rate > 30000)
baud_rate = 38400;
else if (baud_rate > 17000)
baud_rate = 19200;
else
baud_rate = 9600;
new_clk = new_baud_base / baud_rate;
au_writel(new_clk, UART_BASE + UART_CLK +
i * 0x00100000);
au_sync_delay(10);
}
}
/*
* We don't want _any_ interrupts other than match20. Otherwise our
* au1000_calibrate_delay() calculation will be off, potentially a lot.
*/
intc0_mask = save_local_and_disable(0);
intc1_mask = save_local_and_disable(1);
local_enable_irq(AU1000_TOY_MATCH2_INT);
spin_unlock_irqrestore(&pm_lock, flags);
au1000_calibrate_delay();
restore_local_and_enable(0, intc0_mask);
restore_local_and_enable(1, intc1_mask);
return retval;
}
static struct ctl_table pm_table[] = {
{
.ctl_name = CTL_UNNUMBERED,
.procname = "sleep",
.data = NULL,
.maxlen = 0,
.mode = 0600,
.proc_handler = &pm_do_sleep
},
{
.ctl_name = CTL_UNNUMBERED,
.procname = "freq",
.data = NULL,
.maxlen = 0,
.mode = 0600,
.proc_handler = &pm_do_freq
},
{}
};
static struct ctl_table pm_dir_table[] = {
{
.ctl_name = CTL_UNNUMBERED,
.procname = "pm",
.mode = 0555,
.child = pm_table
},
{}
};
/*
* Initialize power interface
*/
static int __init pm_init(void)
{
register_sysctl_table(pm_dir_table);
return 0;
}
__initcall(pm_init);
/*
* This is right out of init/main.c
*/
/*
* This is the number of bits of precision for the loops_per_jiffy.
* Each bit takes on average 1.5/HZ seconds. This (like the original)
* is a little better than 1%.
*/
#define LPS_PREC 8
static void au1000_calibrate_delay(void)
{
unsigned long ticks, loopbit;
int lps_precision = LPS_PREC;
loops_per_jiffy = 1 << 12;
while (loops_per_jiffy <<= 1) {
/* Wait for "start of" clock tick */
ticks = jiffies;
while (ticks == jiffies)
/* nothing */ ;
/* Go ... */
ticks = jiffies;
__delay(loops_per_jiffy);
ticks = jiffies - ticks;
if (ticks)
break;
}
/*
* Do a binary approximation to get loops_per_jiffy set to be equal
* one clock (up to lps_precision bits)
*/
loops_per_jiffy >>= 1;
loopbit = loops_per_jiffy;
while (lps_precision-- && (loopbit >>= 1)) {
loops_per_jiffy |= loopbit;
ticks = jiffies;
while (ticks == jiffies);
ticks = jiffies;
__delay(loops_per_jiffy);
if (jiffies != ticks) /* longer than 1 tick */
loops_per_jiffy &= ~loopbit;
}
}
#endif /* CONFIG_PM */

View File

@ -31,8 +31,6 @@
#include <asm/mach-au1x00/au1000.h>
extern int au_sleep(void);
void au1000_restart(char *command)
{
/* Set all integrated peripherals to disabled states */

View File

@ -35,7 +35,6 @@
#include <asm/time.h>
#include <au1000.h>
#include <prom.h>
extern void __init board_setup(void);
extern void au1000_restart(char *);
@ -45,80 +44,34 @@ extern void set_cpuspec(void);
void __init plat_mem_setup(void)
{
struct cpu_spec *sp;
char *argptr;
unsigned long prid, cpufreq, bclk;
unsigned long est_freq;
set_cpuspec();
sp = cur_cpu_spec[0];
/* determine core clock */
est_freq = au1xxx_calc_clock();
est_freq += 5000; /* round */
est_freq -= est_freq % 10000;
printk(KERN_INFO "(PRId %08x) @ %lu.%02lu MHz\n", read_c0_prid(),
est_freq / 1000000, ((est_freq % 1000000) * 100) / 1000000);
_machine_restart = au1000_restart;
_machine_halt = au1000_halt;
pm_power_off = au1000_power_off;
board_setup(); /* board specific setup */
prid = read_c0_prid();
if (sp->cpu_pll_wo)
#ifdef CONFIG_SOC_AU1000_FREQUENCY
cpufreq = CONFIG_SOC_AU1000_FREQUENCY / 1000000;
#else
cpufreq = 396;
#endif
else
cpufreq = (au_readl(SYS_CPUPLL) & 0x3F) * 12;
printk(KERN_INFO "(PRID %08lx) @ %ld MHz\n", prid, cpufreq);
if (sp->cpu_bclk) {
/* Enable BCLK switching */
bclk = au_readl(SYS_POWERCTRL);
au_writel(bclk | 0x60, SYS_POWERCTRL);
printk(KERN_INFO "BCLK switching enabled!\n");
}
if (sp->cpu_od)
if (au1xxx_cpu_needs_config_od())
/* Various early Au1xx0 errata corrected by this */
set_c0_config(1 << 19); /* Set Config[OD] */
else
/* Clear to obtain best system bus performance */
clear_c0_config(1 << 19); /* Clear Config[OD] */
argptr = prom_getcmdline();
#ifdef CONFIG_SERIAL_8250_CONSOLE
argptr = strstr(argptr, "console=");
if (argptr == NULL) {
argptr = prom_getcmdline();
strcat(argptr, " console=ttyS0,115200");
}
#endif
#ifdef CONFIG_FB_AU1100
argptr = strstr(argptr, "video=");
if (argptr == NULL) {
argptr = prom_getcmdline();
/* default panel */
/*strcat(argptr, " video=au1100fb:panel:Sharp_320x240_16");*/
}
#endif
#if defined(CONFIG_SOUND_AU1X00) && !defined(CONFIG_SOC_AU1000)
/* au1000 does not support vra, au1500 and au1100 do */
strcat(argptr, " au1000_audio=vra");
argptr = prom_getcmdline();
#endif
_machine_restart = au1000_restart;
_machine_halt = au1000_halt;
pm_power_off = au1000_power_off;
/* IO/MEM resources. */
set_io_port_base(0);
ioport_resource.start = IOPORT_RESOURCE_START;
ioport_resource.end = IOPORT_RESOURCE_END;
iomem_resource.start = IOMEM_RESOURCE_START;
iomem_resource.end = IOMEM_RESOURCE_END;
while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_E0S);
au_writel(SYS_CNTRL_E0 | SYS_CNTRL_EN0, SYS_COUNTER_CNTRL);
au_sync();
while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T0S);
au_writel(0, SYS_TOYTRIM);
}
#if defined(CONFIG_64BIT_PHYS_ADDR)

View File

@ -15,16 +15,17 @@
#include <asm/regdef.h>
#include <asm/stackframe.h>
.extern __flush_cache_all
.text
.set macro
.set noat
.set noreorder
.set noat
.align 5
/* Save all of the processor general registers and go to sleep.
* A wakeup condition will get us back here to restore the registers.
*/
LEAF(save_and_sleep)
LEAF(au1xxx_save_and_sleep)
subu sp, PT_SIZE
sw $1, PT_R1(sp)
sw $2, PT_R2(sp)
@ -33,14 +34,6 @@ LEAF(save_and_sleep)
sw $5, PT_R5(sp)
sw $6, PT_R6(sp)
sw $7, PT_R7(sp)
sw $8, PT_R8(sp)
sw $9, PT_R9(sp)
sw $10, PT_R10(sp)
sw $11, PT_R11(sp)
sw $12, PT_R12(sp)
sw $13, PT_R13(sp)
sw $14, PT_R14(sp)
sw $15, PT_R15(sp)
sw $16, PT_R16(sp)
sw $17, PT_R17(sp)
sw $18, PT_R18(sp)
@ -49,12 +42,9 @@ LEAF(save_and_sleep)
sw $21, PT_R21(sp)
sw $22, PT_R22(sp)
sw $23, PT_R23(sp)
sw $24, PT_R24(sp)
sw $25, PT_R25(sp)
sw $26, PT_R26(sp)
sw $27, PT_R27(sp)
sw $28, PT_R28(sp)
sw $29, PT_R29(sp)
sw $30, PT_R30(sp)
sw $31, PT_R31(sp)
mfc0 k0, CP0_STATUS
@ -66,20 +56,26 @@ LEAF(save_and_sleep)
mfc0 k0, CP0_CONFIG
sw k0, 0x14(sp)
/* flush caches to make sure context is in memory */
la t1, __flush_cache_all
lw t0, 0(t1)
jalr t0
nop
/* Now set up the scratch registers so the boot rom will
* return to this point upon wakeup.
* sys_scratch0 : SP
* sys_scratch1 : RA
*/
la k0, 1f
lui k1, 0xb190
ori k1, 0x18
sw sp, 0(k1)
ori k1, 0x1c
sw k0, 0(k1)
lui t3, 0xb190 /* sys_xxx */
sw sp, 0x0018(t3)
la k0, 3f /* resume path */
sw k0, 0x001c(t3)
/* Put SDRAM into self refresh. Preload instructions into cache,
* issue a precharge, then auto refresh, then sleep commands to it.
*/
la t0, sdsleep
/* Put SDRAM into self refresh: Preload instructions into cache,
* issue a precharge, auto/self refresh, then sleep commands to it.
*/
la t0, 1f
.set mips3
cache 0x14, 0(t0)
cache 0x14, 32(t0)
@ -87,24 +83,57 @@ LEAF(save_and_sleep)
cache 0x14, 96(t0)
.set mips0
sdsleep:
lui k0, 0xb400
sw zero, 0x001c(k0) /* Precharge */
sw zero, 0x0020(k0) /* Auto refresh */
sw zero, 0x0030(k0) /* SDRAM sleep */
1: lui a0, 0xb400 /* mem_xxx */
#if defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1100) || \
defined(CONFIG_SOC_AU1500)
sw zero, 0x001c(a0) /* Precharge */
sync
sw zero, 0x0020(a0) /* Auto Refresh */
sync
sw zero, 0x0030(a0) /* Sleep */
sync
#endif
#if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200)
sw zero, 0x08c0(a0) /* Precharge */
sync
sw zero, 0x08d0(a0) /* Self Refresh */
sync
lui k1, 0xb190
sw zero, 0x0078(k1) /* get ready to sleep */
/* wait for sdram to enter self-refresh mode */
lui t0, 0x0100
2: lw t1, 0x0850(a0) /* mem_sdstat */
and t2, t1, t0
beq t2, zero, 2b
nop
/* disable SDRAM clocks */
lui t0, 0xcfff
ori t0, t0, 0xffff
lw t1, 0x0840(a0) /* mem_sdconfiga */
and t1, t0, t1 /* clear CE[1:0] */
sw t1, 0x0840(a0) /* mem_sdconfiga */
sync
sw zero, 0x007c(k1) /* Put processor to sleep */
#endif
/* put power supply and processor to sleep */
sw zero, 0x0078(t3) /* sys_slppwr */
sync
sw zero, 0x007c(t3) /* sys_sleep */
sync
nop
nop
nop
nop
nop
nop
nop
nop
/* This is where we return upon wakeup.
* Reload all of the registers and return.
*/
1: nop
lw k0, 0x20(sp)
3: lw k0, 0x20(sp)
mtc0 k0, CP0_STATUS
lw k0, 0x1c(sp)
mtc0 k0, CP0_CONTEXT
@ -113,10 +142,11 @@ sdsleep:
lw k0, 0x14(sp)
mtc0 k0, CP0_CONFIG
/* We need to catch the ealry Alchemy SOCs with
/* We need to catch the early Alchemy SOCs with
* the write-only Config[OD] bit and set it back to one...
*/
jal au1x00_fixup_config_od
nop
lw $1, PT_R1(sp)
lw $2, PT_R2(sp)
lw $3, PT_R3(sp)
@ -124,14 +154,6 @@ sdsleep:
lw $5, PT_R5(sp)
lw $6, PT_R6(sp)
lw $7, PT_R7(sp)
lw $8, PT_R8(sp)
lw $9, PT_R9(sp)
lw $10, PT_R10(sp)
lw $11, PT_R11(sp)
lw $12, PT_R12(sp)
lw $13, PT_R13(sp)
lw $14, PT_R14(sp)
lw $15, PT_R15(sp)
lw $16, PT_R16(sp)
lw $17, PT_R17(sp)
lw $18, PT_R18(sp)
@ -140,15 +162,11 @@ sdsleep:
lw $21, PT_R21(sp)
lw $22, PT_R22(sp)
lw $23, PT_R23(sp)
lw $24, PT_R24(sp)
lw $25, PT_R25(sp)
lw $26, PT_R26(sp)
lw $27, PT_R27(sp)
lw $28, PT_R28(sp)
lw $29, PT_R29(sp)
lw $30, PT_R30(sp)
lw $31, PT_R31(sp)
addiu sp, PT_SIZE
jr ra
END(save_and_sleep)
addiu sp, PT_SIZE
END(au1xxx_save_and_sleep)

View File

@ -1,5 +1,7 @@
/*
* Copyright (C) 2008 Manuel Lauss <mano@roarinelk.homelinux.net>
*
* Previous incarnations were:
* Copyright (C) 2001, 2006, 2008 MontaVista Software, <source@mvista.com>
* Copied and modified Carsten Langgaard's time.c
*
@ -23,244 +25,141 @@
*
* ########################################################################
*
* Setting up the clock on the MIPS boards.
*
* We provide the clock interrupt processing and the timer offset compute
* functions. If CONFIG_PM is selected, we also ensure the 32KHz timer is
* available. -- Dan
* Clocksource/event using the 32.768kHz-clocked Counter1 ('RTC' in the
* databooks). Firmware/Board init code must enable the counters in the
* counter control register, otherwise the CP0 counter clocksource/event
* will be installed instead (and use of 'wait' instruction is prohibited).
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <asm/mipsregs.h>
#include <asm/time.h>
#include <asm/mach-au1x00/au1000.h>
static int no_au1xxx_32khz;
/* 32kHz clock enabled and detected */
#define CNTR_OK (SYS_CNTRL_E0 | SYS_CNTRL_32S)
extern int allow_au1k_wait; /* default off for CP0 Counter */
#ifdef CONFIG_PM
#if HZ < 100 || HZ > 1000
#error "unsupported HZ value! Must be in [100,1000]"
#endif
#define MATCH20_INC (328 * 100 / HZ) /* magic number 328 is for HZ=100... */
static unsigned long last_pc0, last_match20;
#endif
static DEFINE_SPINLOCK(time_lock);
unsigned long wtimer;
#ifdef CONFIG_PM
static irqreturn_t counter0_irq(int irq, void *dev_id)
static cycle_t au1x_counter1_read(void)
{
unsigned long pc0;
int time_elapsed;
static int jiffie_drift;
return au_readl(SYS_RTCREAD);
}
if (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M20) {
/* should never happen! */
printk(KERN_WARNING "counter 0 w status error\n");
return IRQ_NONE;
}
static struct clocksource au1x_counter1_clocksource = {
.name = "alchemy-counter1",
.read = au1x_counter1_read,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
.rating = 100,
};
pc0 = au_readl(SYS_TOYREAD);
if (pc0 < last_match20)
/* counter overflowed */
time_elapsed = (0xffffffff - last_match20) + pc0;
else
time_elapsed = pc0 - last_match20;
while (time_elapsed > 0) {
do_timer(1);
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
time_elapsed -= MATCH20_INC;
last_match20 += MATCH20_INC;
jiffie_drift++;
}
last_pc0 = pc0;
au_writel(last_match20 + MATCH20_INC, SYS_TOYMATCH2);
static int au1x_rtcmatch2_set_next_event(unsigned long delta,
struct clock_event_device *cd)
{
delta += au_readl(SYS_RTCREAD);
/* wait for register access */
while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M21)
;
au_writel(delta, SYS_RTCMATCH2);
au_sync();
/*
* Our counter ticks at 10.009765625 ms/tick, we we're running
* almost 10 uS too slow per tick.
*/
return 0;
}
if (jiffie_drift >= 999) {
jiffie_drift -= 999;
do_timer(1); /* increment jiffies by one */
#ifndef CONFIG_SMP
update_process_times(user_mode(get_irq_regs()));
#endif
}
static void au1x_rtcmatch2_set_mode(enum clock_event_mode mode,
struct clock_event_device *cd)
{
}
static irqreturn_t au1x_rtcmatch2_irq(int irq, void *dev_id)
{
struct clock_event_device *cd = dev_id;
cd->event_handler(cd);
return IRQ_HANDLED;
}
struct irqaction counter0_action = {
.handler = counter0_irq,
.flags = IRQF_DISABLED,
.name = "alchemy-toy",
.dev_id = NULL,
static struct clock_event_device au1x_rtcmatch2_clockdev = {
.name = "rtcmatch2",
.features = CLOCK_EVT_FEAT_ONESHOT,
.rating = 100,
.irq = AU1000_RTC_MATCH2_INT,
.set_next_event = au1x_rtcmatch2_set_next_event,
.set_mode = au1x_rtcmatch2_set_mode,
.cpumask = CPU_MASK_ALL,
};
/* When we wakeup from sleep, we have to "catch up" on all of the
* timer ticks we have missed.
*/
void wakeup_counter0_adjust(void)
{
unsigned long pc0;
int time_elapsed;
pc0 = au_readl(SYS_TOYREAD);
if (pc0 < last_match20)
/* counter overflowed */
time_elapsed = (0xffffffff - last_match20) + pc0;
else
time_elapsed = pc0 - last_match20;
while (time_elapsed > 0) {
time_elapsed -= MATCH20_INC;
last_match20 += MATCH20_INC;
}
last_pc0 = pc0;
au_writel(last_match20 + MATCH20_INC, SYS_TOYMATCH2);
au_sync();
}
/* This is just for debugging to set the timer for a sleep delay. */
void wakeup_counter0_set(int ticks)
{
unsigned long pc0;
pc0 = au_readl(SYS_TOYREAD);
last_pc0 = pc0;
au_writel(last_match20 + (MATCH20_INC * ticks), SYS_TOYMATCH2);
au_sync();
}
#endif
/*
* I haven't found anyone that doesn't use a 12 MHz source clock,
* but just in case.....
*/
#define AU1000_SRC_CLK 12000000
/*
* We read the real processor speed from the PLL. This is important
* because it is more accurate than computing it from the 32 KHz
* counter, if it exists. If we don't have an accurate processor
* speed, all of the peripherals that derive their clocks based on
* this advertised speed will introduce error and sometimes not work
* properly. This function is futher convoluted to still allow configurations
* to do that in case they have really, really old silicon with a
* write-only PLL register, that we need the 32 KHz when power management
* "wait" is enabled, and we need to detect if the 32 KHz isn't present
* but requested......got it? :-) -- Dan
*/
unsigned long calc_clock(void)
{
unsigned long cpu_speed;
unsigned long flags;
unsigned long counter;
spin_lock_irqsave(&time_lock, flags);
/* Power management cares if we don't have a 32 KHz counter. */
no_au1xxx_32khz = 0;
counter = au_readl(SYS_COUNTER_CNTRL);
if (counter & SYS_CNTRL_E0) {
int trim_divide = 16;
au_writel(counter | SYS_CNTRL_EN1, SYS_COUNTER_CNTRL);
while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S);
/* RTC now ticks at 32.768/16 kHz */
au_writel(trim_divide - 1, SYS_RTCTRIM);
while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S);
while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S);
au_writel(0, SYS_TOYWRITE);
while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S);
} else
no_au1xxx_32khz = 1;
/*
* On early Au1000, sys_cpupll was write-only. Since these
* silicon versions of Au1000 are not sold by AMD, we don't bend
* over backwards trying to determine the frequency.
*/
if (cur_cpu_spec[0]->cpu_pll_wo)
#ifdef CONFIG_SOC_AU1000_FREQUENCY
cpu_speed = CONFIG_SOC_AU1000_FREQUENCY;
#else
cpu_speed = 396000000;
#endif
else
cpu_speed = (au_readl(SYS_CPUPLL) & 0x0000003f) * AU1000_SRC_CLK;
/* On Alchemy CPU:counter ratio is 1:1 */
mips_hpt_frequency = cpu_speed;
/* Equation: Baudrate = CPU / (SD * 2 * CLKDIV * 16) */
set_au1x00_uart_baud_base(cpu_speed / (2 * ((int)(au_readl(SYS_POWERCTRL)
& 0x03) + 2) * 16));
spin_unlock_irqrestore(&time_lock, flags);
return cpu_speed;
}
static struct irqaction au1x_rtcmatch2_irqaction = {
.handler = au1x_rtcmatch2_irq,
.flags = IRQF_DISABLED | IRQF_TIMER,
.name = "timer",
.dev_id = &au1x_rtcmatch2_clockdev,
};
void __init plat_time_init(void)
{
unsigned int est_freq = calc_clock();
struct clock_event_device *cd = &au1x_rtcmatch2_clockdev;
unsigned long t;
est_freq += 5000; /* round */
est_freq -= est_freq%10000;
printk(KERN_INFO "CPU frequency %u.%02u MHz\n",
est_freq / 1000000, ((est_freq % 1000000) * 100) / 1000000);
set_au1x00_speed(est_freq);
set_au1x00_lcd_clock(); /* program the LCD clock */
#ifdef CONFIG_PM
/*
* setup counter 0, since it keeps ticking after a
* 'wait' instruction has been executed. The CP0 timer and
* counter 1 do NOT continue running after 'wait'
*
* It's too early to call request_irq() here, so we handle
* counter 0 interrupt as a special irq and it doesn't show
* up under /proc/interrupts.
*
* Check to ensure we really have a 32 KHz oscillator before
* we do this.
/* Check if firmware (YAMON, ...) has enabled 32kHz and clock
* has been detected. If so install the rtcmatch2 clocksource,
* otherwise don't bother. Note that both bits being set is by
* no means a definite guarantee that the counters actually work
* (the 32S bit seems to be stuck set to 1 once a single clock-
* edge is detected, hence the timeouts).
*/
if (no_au1xxx_32khz)
printk(KERN_WARNING "WARNING: no 32KHz clock found.\n");
else {
while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C0S);
au_writel(0, SYS_TOYWRITE);
while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C0S);
if (CNTR_OK != (au_readl(SYS_COUNTER_CNTRL) & CNTR_OK))
goto cntr_err;
au_writel(au_readl(SYS_WAKEMSK) | (1 << 8), SYS_WAKEMSK);
au_writel(~0, SYS_WAKESRC);
au_sync();
while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M20);
/*
* setup counter 1 (RTC) to tick at full speed
*/
t = 0xffffff;
while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S) && t--)
asm volatile ("nop");
if (!t)
goto cntr_err;
/* Setup match20 to interrupt once every HZ */
last_pc0 = last_match20 = au_readl(SYS_TOYREAD);
au_writel(last_match20 + MATCH20_INC, SYS_TOYMATCH2);
au_sync();
while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M20);
setup_irq(AU1000_TOY_MATCH2_INT, &counter0_action);
au_writel(0, SYS_RTCTRIM); /* 32.768 kHz */
au_sync();
/* We can use the real 'wait' instruction. */
allow_au1k_wait = 1;
}
t = 0xffffff;
while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && t--)
asm volatile ("nop");
if (!t)
goto cntr_err;
au_writel(0, SYS_RTCWRITE);
au_sync();
#endif
t = 0xffffff;
while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && t--)
asm volatile ("nop");
if (!t)
goto cntr_err;
/* register counter1 clocksource and event device */
clocksource_set_clock(&au1x_counter1_clocksource, 32768);
clocksource_register(&au1x_counter1_clocksource);
cd->shift = 32;
cd->mult = div_sc(32768, NSEC_PER_SEC, cd->shift);
cd->max_delta_ns = clockevent_delta2ns(0xffffffff, cd);
cd->min_delta_ns = clockevent_delta2ns(8, cd); /* ~0.25ms */
clockevents_register_device(cd);
setup_irq(AU1000_RTC_MATCH2_INT, &au1x_rtcmatch2_irqaction);
printk(KERN_INFO "Alchemy clocksource installed\n");
/* can now use 'wait' */
allow_au1k_wait = 1;
return;
cntr_err:
/* counters unusable, use C0 counter */
r4k_clockevent_init();
init_r4k_clocksource();
allow_au1k_wait = 0;
}

View File

@ -1,62 +0,0 @@
/*
* BRIEF MODULE DESCRIPTION
* PB1000 board setup
*
* Copyright 2001, 2008 MontaVista Software Inc.
* Author: MontaVista Software, Inc. <source@mvista.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/bootinfo.h>
#include <prom.h>
const char *get_system_type(void)
{
#ifdef CONFIG_MIPS_BOSPORUS
return "Alchemy Bosporus Gateway Reference";
#else
return "Alchemy Db1x00";
#endif
}
void __init prom_init(void)
{
unsigned char *memsize_str;
unsigned long memsize;
prom_argc = fw_arg0;
prom_argv = (char **)fw_arg1;
prom_envp = (char **)fw_arg2;
prom_init_cmdline();
memsize_str = prom_getenv("memsize");
if (!memsize_str)
memsize = 0x04000000;
else
strict_strtol(memsize_str, 0, &memsize);
add_memory_region(0, memsize, BOOT_MEM_RAM);
}

View File

@ -0,0 +1,18 @@
#
# Alchemy Develboards
#
obj-y += prom.o
obj-$(CONFIG_PM) += pm.o
obj-$(CONFIG_MIPS_PB1000) += pb1000/
obj-$(CONFIG_MIPS_PB1100) += pb1100/
obj-$(CONFIG_MIPS_PB1200) += pb1200/
obj-$(CONFIG_MIPS_PB1500) += pb1500/
obj-$(CONFIG_MIPS_PB1550) += pb1550/
obj-$(CONFIG_MIPS_DB1000) += db1x00/
obj-$(CONFIG_MIPS_DB1100) += db1x00/
obj-$(CONFIG_MIPS_DB1200) += pb1200/
obj-$(CONFIG_MIPS_DB1500) += db1x00/
obj-$(CONFIG_MIPS_DB1550) += db1x00/
obj-$(CONFIG_MIPS_BOSPORUS) += db1x00/
obj-$(CONFIG_MIPS_MIRAGE) += db1x00/

View File

@ -5,4 +5,4 @@
# Makefile for the Alchemy Semiconductor DBAu1xx0 boards.
#
lib-y := init.o board_setup.o irqmap.o
obj-y := board_setup.o irqmap.o

View File

@ -32,8 +32,20 @@
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-db1x00/db1x00.h>
#include <prom.h>
static BCSR * const bcsr = (BCSR *)BCSR_KSEG1_ADDR;
const char *get_system_type(void)
{
#ifdef CONFIG_MIPS_BOSPORUS
return "Alchemy Bosporus Gateway Reference";
#else
return "Alchemy Db1x00";
#endif
}
void board_reset(void)
{
/* Hit BCSR.SW_RESET[RESET] */
@ -43,6 +55,31 @@ void board_reset(void)
void __init board_setup(void)
{
u32 pin_func = 0;
char *argptr;
argptr = prom_getcmdline();
#ifdef CONFIG_SERIAL_8250_CONSOLE
argptr = strstr(argptr, "console=");
if (argptr == NULL) {
argptr = prom_getcmdline();
strcat(argptr, " console=ttyS0,115200");
}
#endif
#ifdef CONFIG_FB_AU1100
argptr = strstr(argptr, "video=");
if (argptr == NULL) {
argptr = prom_getcmdline();
/* default panel */
/*strcat(argptr, " video=au1100fb:panel:Sharp_320x240_16");*/
}
#endif
#if defined(CONFIG_SOUND_AU1X00) && !defined(CONFIG_SOC_AU1000)
/* au1000 does not support vra, au1500 and au1100 do */
strcat(argptr, " au1000_audio=vra");
argptr = prom_getcmdline();
#endif
/* Not valid for Au1550 */
#if defined(CONFIG_IRDA) && \

View File

@ -27,6 +27,7 @@
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <asm/mach-au1x00/au1000.h>
@ -66,21 +67,24 @@ struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
#ifndef CONFIG_MIPS_MIRAGE
#ifdef CONFIG_MIPS_DB1550
{ AU1000_GPIO_3, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card 0 IRQ# */
{ AU1000_GPIO_5, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card 1 IRQ# */
{ AU1000_GPIO_3, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 0 IRQ# */
{ AU1000_GPIO_5, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 1 IRQ# */
#else
{ AU1000_GPIO_0, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card 0 Fully_Interted# */
{ AU1000_GPIO_1, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card 0 STSCHG# */
{ AU1000_GPIO_2, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card 0 IRQ# */
{ AU1000_GPIO_0, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 0 Fully_Interted# */
{ AU1000_GPIO_1, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 0 STSCHG# */
{ AU1000_GPIO_2, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 0 IRQ# */
{ AU1000_GPIO_3, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card 1 Fully_Interted# */
{ AU1000_GPIO_4, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card 1 STSCHG# */
{ AU1000_GPIO_5, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card 1 IRQ# */
{ AU1000_GPIO_3, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 1 Fully_Interted# */
{ AU1000_GPIO_4, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 1 STSCHG# */
{ AU1000_GPIO_5, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card 1 IRQ# */
#endif
#else
{ AU1000_GPIO_7, INTC_INT_RISE_EDGE, 0 }, /* touchscreen pen down */
{ AU1000_GPIO_7, IRQF_TRIGGER_RISING, 0 }, /* touchscreen pen down */
#endif
};
int __initdata au1xxx_nr_irqs = ARRAY_SIZE(au1xxx_irq_map);
void __init board_init_irq(void)
{
au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
}

View File

@ -5,4 +5,4 @@
# Makefile for the Alchemy Semiconductor Pb1000 board.
#
lib-y := init.o board_setup.o irqmap.o
obj-y := board_setup.o

View File

@ -23,22 +23,48 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-pb1x00/pb1000.h>
#include <prom.h>
struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
{ AU1000_GPIO_15, IRQF_TRIGGER_LOW, 0 },
};
const char *get_system_type(void)
{
return "Alchemy Pb1000";
}
void board_reset(void)
{
}
void __init board_init_irq(void)
{
au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
}
void __init board_setup(void)
{
u32 pin_func, static_cfg0;
u32 sys_freqctrl, sys_clksrc;
u32 prid = read_c0_prid();
#ifdef CONFIG_SERIAL_8250_CONSOLE
char *argptr = prom_getcmdline();
argptr = strstr(argptr, "console=");
if (argptr == NULL) {
argptr = prom_getcmdline();
strcat(argptr, " console=ttyS0,115200");
}
#endif
/* Set AUX clock to 12 MHz * 8 = 96 MHz */
au_writel(8, SYS_AUXPLL);
au_writel(0, SYS_PINSTATERD);

View File

@ -5,4 +5,4 @@
# Makefile for the Alchemy Semiconductor Pb1100 board.
#
lib-y := init.o board_setup.o irqmap.o
obj-y := board_setup.o

View File

@ -25,19 +25,66 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-pb1x00/pb1100.h>
#include <prom.h>
struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
{ AU1000_GPIO_9, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card Fully_Inserted# */
{ AU1000_GPIO_10, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card STSCHG# */
{ AU1000_GPIO_11, IRQF_TRIGGER_LOW, 0 }, /* PCMCIA Card IRQ# */
{ AU1000_GPIO_13, IRQF_TRIGGER_LOW, 0 }, /* DC_IRQ# */
};
const char *get_system_type(void)
{
return "Alchemy Pb1100";
}
void board_reset(void)
{
/* Hit BCSR.RST_VDDI[SOFT_RESET] */
au_writel(0x00000000, PB1100_RST_VDDI);
}
void __init board_init_irq(void)
{
au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
}
void __init board_setup(void)
{
volatile void __iomem *base = (volatile void __iomem *)0xac000000UL;
char *argptr;
argptr = prom_getcmdline();
#ifdef CONFIG_SERIAL_8250_CONSOLE
argptr = strstr(argptr, "console=");
if (argptr == NULL) {
argptr = prom_getcmdline();
strcat(argptr, " console=ttyS0,115200");
}
#endif
#ifdef CONFIG_FB_AU1100
argptr = strstr(argptr, "video=");
if (argptr == NULL) {
argptr = prom_getcmdline();
/* default panel */
/*strcat(argptr, " video=au1100fb:panel:Sharp_320x240_16");*/
}
#endif
#if defined(CONFIG_SOUND_AU1X00) && !defined(CONFIG_SOC_AU1000)
/* au1000 does not support vra, au1500 and au1100 do */
strcat(argptr, " au1000_audio=vra");
argptr = prom_getcmdline();
#endif
/* Set AUX clock to 12 MHz * 8 = 96 MHz */
au_writel(8, SYS_AUXPLL);

View File

@ -2,7 +2,6 @@
# Makefile for the Alchemy Semiconductor Pb1200/DBAu1200 boards.
#
lib-y := init.o board_setup.o irqmap.o
obj-y += platform.o
obj-y := board_setup.o irqmap.o platform.o
EXTRA_CFLAGS += -Werror

View File

@ -30,8 +30,11 @@
#include <prom.h>
#include <au1xxx.h>
extern void _board_init_irq(void);
extern void (*board_init_irq)(void);
const char *get_system_type(void)
{
return "Alchemy Pb1200";
}
void board_reset(void)
{
@ -41,7 +44,19 @@ void board_reset(void)
void __init board_setup(void)
{
char *argptr = NULL;
char *argptr;
argptr = prom_getcmdline();
#ifdef CONFIG_SERIAL_8250_CONSOLE
argptr = strstr(argptr, "console=");
if (argptr == NULL) {
argptr = prom_getcmdline();
strcat(argptr, " console=ttyS0,115200");
}
#endif
#ifdef CONFIG_FB_AU1200
strcat(argptr, " video=au1200fb:panel:bs");
#endif
#if 0
{
@ -99,16 +114,6 @@ void __init board_setup(void)
}
#endif
#ifdef CONFIG_FB_AU1200
argptr = prom_getcmdline();
#ifdef CONFIG_MIPS_PB1200
strcat(argptr, " video=au1200fb:panel:bs");
#endif
#ifdef CONFIG_MIPS_DB1200
strcat(argptr, " video=au1200fb:panel:bs");
#endif
#endif
/*
* The Pb1200 development board uses external MUX for PSC0 to
* support SMB/SPI. bcsr->resets bit 12: 0=SMB 1=SPI
@ -124,9 +129,6 @@ void __init board_setup(void)
#ifdef CONFIG_MIPS_DB1200
printk(KERN_INFO "AMD Alchemy Db1200 Board\n");
#endif
/* Setup Pb1200 External Interrupt Controller */
board_init_irq = _board_init_irq;
}
int board_au1200fb_panel(void)

View File

@ -40,91 +40,65 @@
struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
/* This is external interrupt cascade */
{ AU1000_GPIO_7, INTC_INT_LOW_LEVEL, 0 },
{ AU1000_GPIO_7, IRQF_TRIGGER_LOW, 0 },
};
int __initdata au1xxx_nr_irqs = ARRAY_SIZE(au1xxx_irq_map);
/*
* Support for External interrupts on the Pb1200 Development platform.
*/
static volatile int pb1200_cascade_en;
irqreturn_t pb1200_cascade_handler(int irq, void *dev_id)
static void pb1200_cascade_handler(unsigned int irq, struct irq_desc *d)
{
unsigned short bisr = bcsr->int_status;
int extirq_nr = 0;
/* Clear all the edge interrupts. This has no effect on level. */
bcsr->int_status = bisr;
for ( ; bisr; bisr &= bisr - 1) {
extirq_nr = PB1200_INT_BEGIN + __ffs(bisr);
/* Ack and dispatch IRQ */
do_IRQ(extirq_nr);
}
return IRQ_RETVAL(1);
for ( ; bisr; bisr &= bisr - 1)
generic_handle_irq(PB1200_INT_BEGIN + __ffs(bisr));
}
inline void pb1200_enable_irq(unsigned int irq_nr)
{
bcsr->intset_mask = 1 << (irq_nr - PB1200_INT_BEGIN);
bcsr->intset = 1 << (irq_nr - PB1200_INT_BEGIN);
}
inline void pb1200_disable_irq(unsigned int irq_nr)
/* NOTE: both the enable and mask bits must be cleared, otherwise the
* CPLD generates tons of spurious interrupts (at least on the DB1200).
*/
static void pb1200_mask_irq(unsigned int irq_nr)
{
bcsr->intclr_mask = 1 << (irq_nr - PB1200_INT_BEGIN);
bcsr->intclr = 1 << (irq_nr - PB1200_INT_BEGIN);
au_sync();
}
static unsigned int pb1200_setup_cascade(void)
static void pb1200_maskack_irq(unsigned int irq_nr)
{
return request_irq(AU1000_GPIO_7, &pb1200_cascade_handler,
0, "Pb1200 Cascade", &pb1200_cascade_handler);
bcsr->intclr_mask = 1 << (irq_nr - PB1200_INT_BEGIN);
bcsr->intclr = 1 << (irq_nr - PB1200_INT_BEGIN);
bcsr->int_status = 1 << (irq_nr - PB1200_INT_BEGIN); /* ack */
au_sync();
}
static unsigned int pb1200_startup_irq(unsigned int irq)
static void pb1200_unmask_irq(unsigned int irq_nr)
{
if (++pb1200_cascade_en == 1) {
int res;
res = pb1200_setup_cascade();
if (res)
return res;
}
pb1200_enable_irq(irq);
return 0;
bcsr->intset = 1 << (irq_nr - PB1200_INT_BEGIN);
bcsr->intset_mask = 1 << (irq_nr - PB1200_INT_BEGIN);
au_sync();
}
static void pb1200_shutdown_irq(unsigned int irq)
{
pb1200_disable_irq(irq);
if (--pb1200_cascade_en == 0)
free_irq(AU1000_GPIO_7, &pb1200_cascade_handler);
}
static struct irq_chip external_irq_type = {
static struct irq_chip pb1200_cpld_irq_type = {
#ifdef CONFIG_MIPS_PB1200
.name = "Pb1200 Ext",
#endif
#ifdef CONFIG_MIPS_DB1200
.name = "Db1200 Ext",
#endif
.startup = pb1200_startup_irq,
.shutdown = pb1200_shutdown_irq,
.ack = pb1200_disable_irq,
.mask = pb1200_disable_irq,
.mask_ack = pb1200_disable_irq,
.unmask = pb1200_enable_irq,
.mask = pb1200_mask_irq,
.mask_ack = pb1200_maskack_irq,
.unmask = pb1200_unmask_irq,
};
void _board_init_irq(void)
void __init board_init_irq(void)
{
unsigned int irq;
au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
#ifdef CONFIG_MIPS_PB1200
/* We have a problem with CPLD rev 3. */
if (((bcsr->whoami & BCSR_WHOAMI_CPLD) >> 4) <= 3) {
@ -146,15 +120,15 @@ void _board_init_irq(void)
panic("Game over. Your score is 0.");
}
#endif
/* mask & disable & ack all */
bcsr->intclr_mask = 0xffff;
bcsr->intclr = 0xffff;
bcsr->int_status = 0xffff;
au_sync();
for (irq = PB1200_INT_BEGIN; irq <= PB1200_INT_END; irq++) {
set_irq_chip_and_handler(irq, &external_irq_type,
handle_level_irq);
pb1200_disable_irq(irq);
}
for (irq = PB1200_INT_BEGIN; irq <= PB1200_INT_END; irq++)
set_irq_chip_and_handler_name(irq, &pb1200_cpld_irq_type,
handle_level_irq, "level");
/*
* GPIO_7 can not be hooked here, so it is hooked upon first
* request of any source attached to the cascade.
*/
set_irq_chained_handler(AU1000_GPIO_7, pb1200_cascade_handler);
}

View File

@ -5,4 +5,4 @@
# Makefile for the Alchemy Semiconductor Pb1500 board.
#
lib-y := init.o board_setup.o irqmap.o
obj-y := board_setup.o

View File

@ -25,20 +25,64 @@
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-pb1x00/pb1500.h>
#include <prom.h>
char irq_tab_alchemy[][5] __initdata = {
[12] = { -1, INTA, INTX, INTX, INTX }, /* IDSEL 12 - HPT370 */
[13] = { -1, INTA, INTB, INTC, INTD }, /* IDSEL 13 - PCI slot */
};
struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
{ AU1500_GPIO_204, IRQF_TRIGGER_HIGH, 0 },
{ AU1500_GPIO_201, IRQF_TRIGGER_LOW, 0 },
{ AU1500_GPIO_202, IRQF_TRIGGER_LOW, 0 },
{ AU1500_GPIO_203, IRQF_TRIGGER_LOW, 0 },
{ AU1500_GPIO_205, IRQF_TRIGGER_LOW, 0 },
};
const char *get_system_type(void)
{
return "Alchemy Pb1500";
}
void board_reset(void)
{
/* Hit BCSR.RST_VDDI[SOFT_RESET] */
au_writel(0x00000000, PB1500_RST_VDDI);
}
void __init board_init_irq(void)
{
au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
}
void __init board_setup(void)
{
u32 pin_func;
u32 sys_freqctrl, sys_clksrc;
char *argptr;
argptr = prom_getcmdline();
#ifdef CONFIG_SERIAL_8250_CONSOLE
argptr = strstr(argptr, "console=");
if (argptr == NULL) {
argptr = prom_getcmdline();
strcat(argptr, " console=ttyS0,115200");
}
#endif
#if defined(CONFIG_SOUND_AU1X00) && !defined(CONFIG_SOC_AU1000)
/* au1000 does not support vra, au1500 and au1100 do */
strcat(argptr, " au1000_audio=vra");
argptr = prom_getcmdline();
#endif
sys_clksrc = sys_freqctrl = pin_func = 0;
/* Set AUX clock to 12 MHz * 8 = 96 MHz */

View File

@ -5,4 +5,4 @@
# Makefile for the Alchemy Semiconductor Pb1550 board.
#
lib-y := init.o board_setup.o irqmap.o
obj-y := board_setup.o

View File

@ -28,20 +28,54 @@
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-pb1x00/pb1550.h>
#include <prom.h>
char irq_tab_alchemy[][5] __initdata = {
[12] = { -1, INTB, INTC, INTD, INTA }, /* IDSEL 12 - PCI slot 2 (left) */
[13] = { -1, INTA, INTB, INTC, INTD }, /* IDSEL 13 - PCI slot 1 (right) */
};
struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
{ AU1000_GPIO_0, IRQF_TRIGGER_LOW, 0 },
{ AU1000_GPIO_1, IRQF_TRIGGER_LOW, 0 },
};
const char *get_system_type(void)
{
return "Alchemy Pb1550";
}
void board_reset(void)
{
/* Hit BCSR.SYSTEM[RESET] */
au_writew(au_readw(0xAF00001C) & ~BCSR_SYSTEM_RESET, 0xAF00001C);
}
void __init board_init_irq(void)
{
au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
}
void __init board_setup(void)
{
u32 pin_func;
#ifdef CONFIG_SERIAL_8250_CONSOLE
char *argptr;
argptr = prom_getcmdline();
argptr = strstr(argptr, "console=");
if (argptr == NULL) {
argptr = prom_getcmdline();
strcat(argptr, " console=ttyS0,115200");
}
#endif
/*
* Enable PSC1 SYNC for AC'97. Normaly done in audio driver,
* but it is board specific code, so put it here.

View File

@ -0,0 +1,229 @@
/*
* Alchemy Development Board example suspend userspace interface.
*
* (c) 2008 Manuel Lauss <mano@roarinelk.homelinux.net>
*/
#include <linux/init.h>
#include <linux/kobject.h>
#include <linux/suspend.h>
#include <linux/sysfs.h>
#include <asm/mach-au1x00/au1000.h>
/*
* Generic suspend userspace interface for Alchemy development boards.
* This code exports a few sysfs nodes under /sys/power/db1x/ which
* can be used by userspace to en/disable all au1x-provided wakeup
* sources and configure the timeout after which the the TOYMATCH2 irq
* is to trigger a wakeup.
*/
static unsigned long db1x_pm_sleep_secs;
static unsigned long db1x_pm_wakemsk;
static unsigned long db1x_pm_last_wakesrc;
static int db1x_pm_enter(suspend_state_t state)
{
/* enable GPIO based wakeup */
au_writel(1, SYS_PININPUTEN);
/* clear and setup wake cause and source */
au_writel(0, SYS_WAKEMSK);
au_sync();
au_writel(0, SYS_WAKESRC);
au_sync();
au_writel(db1x_pm_wakemsk, SYS_WAKEMSK);
au_sync();
/* setup 1Hz-timer-based wakeup: wait for reg access */
while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M20)
asm volatile ("nop");
au_writel(au_readl(SYS_TOYREAD) + db1x_pm_sleep_secs, SYS_TOYMATCH2);
au_sync();
/* wait for value to really hit the register */
while (au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_M20)
asm volatile ("nop");
/* ...and now the sandman can come! */
au_sleep();
return 0;
}
static int db1x_pm_begin(suspend_state_t state)
{
if (!db1x_pm_wakemsk) {
printk(KERN_ERR "db1x: no wakeup source activated!\n");
return -EINVAL;
}
return 0;
}
static void db1x_pm_end(void)
{
/* read and store wakeup source, the clear the register. To
* be able to clear it, WAKEMSK must be cleared first.
*/
db1x_pm_last_wakesrc = au_readl(SYS_WAKESRC);
au_writel(0, SYS_WAKEMSK);
au_writel(0, SYS_WAKESRC);
au_sync();
}
static struct platform_suspend_ops db1x_pm_ops = {
.valid = suspend_valid_only_mem,
.begin = db1x_pm_begin,
.enter = db1x_pm_enter,
.end = db1x_pm_end,
};
#define ATTRCMP(x) (0 == strcmp(attr->attr.name, #x))
static ssize_t db1x_pmattr_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
int idx;
if (ATTRCMP(timer_timeout))
return sprintf(buf, "%lu\n", db1x_pm_sleep_secs);
else if (ATTRCMP(timer))
return sprintf(buf, "%u\n",
!!(db1x_pm_wakemsk & SYS_WAKEMSK_M2));
else if (ATTRCMP(wakesrc))
return sprintf(buf, "%lu\n", db1x_pm_last_wakesrc);
else if (ATTRCMP(gpio0) || ATTRCMP(gpio1) || ATTRCMP(gpio2) ||
ATTRCMP(gpio3) || ATTRCMP(gpio4) || ATTRCMP(gpio5) ||
ATTRCMP(gpio6) || ATTRCMP(gpio7)) {
idx = (attr->attr.name)[4] - '0';
return sprintf(buf, "%d\n",
!!(db1x_pm_wakemsk & SYS_WAKEMSK_GPIO(idx)));
} else if (ATTRCMP(wakemsk)) {
return sprintf(buf, "%08lx\n", db1x_pm_wakemsk);
}
return -ENOENT;
}
static ssize_t db1x_pmattr_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *instr,
size_t bytes)
{
unsigned long l;
int tmp;
if (ATTRCMP(timer_timeout)) {
tmp = strict_strtoul(instr, 0, &l);
if (tmp)
return tmp;
db1x_pm_sleep_secs = l;
} else if (ATTRCMP(timer)) {
if (instr[0] != '0')
db1x_pm_wakemsk |= SYS_WAKEMSK_M2;
else
db1x_pm_wakemsk &= ~SYS_WAKEMSK_M2;
} else if (ATTRCMP(gpio0) || ATTRCMP(gpio1) || ATTRCMP(gpio2) ||
ATTRCMP(gpio3) || ATTRCMP(gpio4) || ATTRCMP(gpio5) ||
ATTRCMP(gpio6) || ATTRCMP(gpio7)) {
tmp = (attr->attr.name)[4] - '0';
if (instr[0] != '0') {
db1x_pm_wakemsk |= SYS_WAKEMSK_GPIO(tmp);
} else {
db1x_pm_wakemsk &= ~SYS_WAKEMSK_GPIO(tmp);
}
} else if (ATTRCMP(wakemsk)) {
tmp = strict_strtoul(instr, 0, &l);
if (tmp)
return tmp;
db1x_pm_wakemsk = l & 0x0000003f;
} else
bytes = -ENOENT;
return bytes;
}
#define ATTR(x) \
static struct kobj_attribute x##_attribute = \
__ATTR(x, 0664, db1x_pmattr_show, \
db1x_pmattr_store);
ATTR(gpio0) /* GPIO-based wakeup enable */
ATTR(gpio1)
ATTR(gpio2)
ATTR(gpio3)
ATTR(gpio4)
ATTR(gpio5)
ATTR(gpio6)
ATTR(gpio7)
ATTR(timer) /* TOYMATCH2-based wakeup enable */
ATTR(timer_timeout) /* timer-based wakeup timeout value, in seconds */
ATTR(wakesrc) /* contents of SYS_WAKESRC after last wakeup */
ATTR(wakemsk) /* direct access to SYS_WAKEMSK */
#define ATTR_LIST(x) & x ## _attribute.attr
static struct attribute *db1x_pmattrs[] = {
ATTR_LIST(gpio0),
ATTR_LIST(gpio1),
ATTR_LIST(gpio2),
ATTR_LIST(gpio3),
ATTR_LIST(gpio4),
ATTR_LIST(gpio5),
ATTR_LIST(gpio6),
ATTR_LIST(gpio7),
ATTR_LIST(timer),
ATTR_LIST(timer_timeout),
ATTR_LIST(wakesrc),
ATTR_LIST(wakemsk),
NULL, /* terminator */
};
static struct attribute_group db1x_pmattr_group = {
.name = "db1x",
.attrs = db1x_pmattrs,
};
/*
* Initialize suspend interface
*/
static int __init pm_init(void)
{
/* init TOY to tick at 1Hz if not already done. No need to wait
* for confirmation since there's plenty of time from here to
* the next suspend cycle.
*/
if (au_readl(SYS_TOYTRIM) != 32767) {
au_writel(32767, SYS_TOYTRIM);
au_sync();
}
db1x_pm_last_wakesrc = au_readl(SYS_WAKESRC);
au_writel(0, SYS_WAKESRC);
au_sync();
au_writel(0, SYS_WAKEMSK);
au_sync();
suspend_set_ops(&db1x_pm_ops);
return sysfs_create_group(power_kobj, &db1x_pmattr_group);
}
late_initcall(pm_init);

View File

@ -1,9 +1,9 @@
/*
* Common code used by all Alchemy develboards.
*
* BRIEF MODULE DESCRIPTION
* Pb1550 board setup
* Extracted from files which had this to say:
*
* Copyright 2001, 2008 MontaVista Software Inc.
* Copyright 2000, 2008 MontaVista Software Inc.
* Author: MontaVista Software, Inc. <source@mvista.com>
*
* This program is free software; you can redistribute it and/or modify it
@ -29,15 +29,19 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/bootinfo.h>
#include <asm/mach-au1x00/au1000.h>
#include <prom.h>
const char *get_system_type(void)
{
return "Alchemy Pb1550";
}
#if defined(CONFIG_MIPS_PB1000) || defined(CONFIG_MIPS_DB1000) || \
defined(CONFIG_MIPS_PB1100) || defined(CONFIG_MIPS_DB1100) || \
defined(CONFIG_MIPS_PB1500) || defined(CONFIG_MIPS_DB1500) || \
defined(CONFIG_MIPS_BOSPORUS) || defined(CONFIG_MIPS_MIRAGE)
#define ALCHEMY_BOARD_DEFAULT_MEMSIZE 0x04000000
#else /* Au1550/Au1200-based develboards */
#define ALCHEMY_BOARD_DEFAULT_MEMSIZE 0x08000000
#endif
void __init prom_init(void)
{
@ -51,8 +55,8 @@ void __init prom_init(void)
prom_init_cmdline();
memsize_str = prom_getenv("memsize");
if (!memsize_str)
memsize = 0x08000000;
memsize = ALCHEMY_BOARD_DEFAULT_MEMSIZE;
else
strict_strtol(memsize_str, 0, &memsize);
strict_strtoul(memsize_str, 0, &memsize);
add_memory_region(0, memsize, BOOT_MEM_RAM);
}

View File

@ -32,6 +32,8 @@
#include <asm/mach-au1x00/au1000.h>
#include <prom.h>
extern int (*board_pci_idsel)(unsigned int devsel, int assert);
int mtx1_pci_idsel(unsigned int devsel, int assert);
@ -43,6 +45,16 @@ void board_reset(void)
void __init board_setup(void)
{
#ifdef CONFIG_SERIAL_8250_CONSOLE
char *argptr;
argptr = prom_getcmdline();
argptr = strstr(argptr, "console=");
if (argptr == NULL) {
argptr = prom_getcmdline();
strcat(argptr, " console=ttyS0,115200");
}
#endif
#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
/* Enable USB power switch */
au_writel(au_readl(GPIO2_DIR) | 0x10, GPIO2_DIR);

View File

@ -55,6 +55,6 @@ void __init prom_init(void)
if (!memsize_str)
memsize = 0x04000000;
else
strict_strtol(memsize_str, 0, &memsize);
strict_strtoul(memsize_str, 0, &memsize);
add_memory_region(0, memsize, BOOT_MEM_RAM);
}

View File

@ -27,7 +27,7 @@
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <asm/mach-au1x00/au1000.h>
char irq_tab_alchemy[][5] __initdata = {
@ -42,11 +42,15 @@ char irq_tab_alchemy[][5] __initdata = {
};
struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
{ AU1500_GPIO_204, INTC_INT_HIGH_LEVEL, 0 },
{ AU1500_GPIO_201, INTC_INT_LOW_LEVEL, 0 },
{ AU1500_GPIO_202, INTC_INT_LOW_LEVEL, 0 },
{ AU1500_GPIO_203, INTC_INT_LOW_LEVEL, 0 },
{ AU1500_GPIO_205, INTC_INT_LOW_LEVEL, 0 },
{ AU1500_GPIO_204, IRQF_TRIGGER_HIGH, 0 },
{ AU1500_GPIO_201, IRQF_TRIGGER_LOW, 0 },
{ AU1500_GPIO_202, IRQF_TRIGGER_LOW, 0 },
{ AU1500_GPIO_203, IRQF_TRIGGER_LOW, 0 },
{ AU1500_GPIO_205, IRQF_TRIGGER_LOW, 0 },
};
int __initdata au1xxx_nr_irqs = ARRAY_SIZE(au1xxx_irq_map);
void __init board_init_irq(void)
{
au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
}

View File

@ -1,57 +0,0 @@
/*
* BRIEF MODULE DESCRIPTION
* Pb1000 board setup
*
* Copyright 2001, 2008 MontaVista Software Inc.
* Author: MontaVista Software, Inc. <source@mvista.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/bootinfo.h>
#include <prom.h>
const char *get_system_type(void)
{
return "Alchemy Pb1000";
}
void __init prom_init(void)
{
unsigned char *memsize_str;
unsigned long memsize;
prom_argc = (int)fw_arg0;
prom_argv = (char **)fw_arg1;
prom_envp = (char **)fw_arg2;
prom_init_cmdline();
memsize_str = prom_getenv("memsize");
if (!memsize_str)
memsize = 0x04000000;
else
strict_strtol(memsize_str, 0, &memsize);
add_memory_region(0, memsize, BOOT_MEM_RAM);
}

View File

@ -1,38 +0,0 @@
/*
* BRIEF MODULE DESCRIPTION
* Au1xxx irq map table
*
* Copyright 2003 Embedded Edge, LLC
* dan@embeddededge.com
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <asm/mach-au1x00/au1000.h>
struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
{ AU1000_GPIO_15, INTC_INT_LOW_LEVEL, 0 },
};
int __initdata au1xxx_nr_irqs = ARRAY_SIZE(au1xxx_irq_map);

View File

@ -1,60 +0,0 @@
/*
*
* BRIEF MODULE DESCRIPTION
* Pb1100 board setup
*
* Copyright 2002, 2008 MontaVista Software Inc.
* Author: MontaVista Software, Inc. <source@mvista.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/bootinfo.h>
#include <prom.h>
const char *get_system_type(void)
{
return "Alchemy Pb1100";
}
void __init prom_init(void)
{
unsigned char *memsize_str;
unsigned long memsize;
prom_argc = fw_arg0;
prom_argv = (char **)fw_arg1;
prom_envp = (char **)fw_arg3;
prom_init_cmdline();
memsize_str = prom_getenv("memsize");
if (!memsize_str)
memsize = 0x04000000;
else
strict_strtol(memsize_str, 0, &memsize);
add_memory_region(0, memsize, BOOT_MEM_RAM);
}

View File

@ -1,40 +0,0 @@
/*
* BRIEF MODULE DESCRIPTION
* Au1xx0 IRQ map table
*
* Copyright 2003 Embedded Edge, LLC
* dan@embeddededge.com
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <asm/mach-au1x00/au1000.h>
struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
{ AU1000_GPIO_9, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card Fully_Inserted# */
{ AU1000_GPIO_10, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card STSCHG# */
{ AU1000_GPIO_11, INTC_INT_LOW_LEVEL, 0 }, /* PCMCIA Card IRQ# */
{ AU1000_GPIO_13, INTC_INT_LOW_LEVEL, 0 }, /* DC_IRQ# */
};
int __initdata au1xxx_nr_irqs = ARRAY_SIZE(au1xxx_irq_map);

View File

@ -1,58 +0,0 @@
/*
*
* BRIEF MODULE DESCRIPTION
* PB1200 board setup
*
* Copyright 2001, 2008 MontaVista Software Inc.
* Author: MontaVista Software, Inc. <source@mvista.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/bootinfo.h>
#include <prom.h>
const char *get_system_type(void)
{
return "Alchemy Pb1200";
}
void __init prom_init(void)
{
unsigned char *memsize_str;
unsigned long memsize;
prom_argc = (int)fw_arg0;
prom_argv = (char **)fw_arg1;
prom_envp = (char **)fw_arg2;
prom_init_cmdline();
memsize_str = prom_getenv("memsize");
if (!memsize_str)
memsize = 0x08000000;
else
strict_strtol(memsize_str, 0, &memsize);
add_memory_region(0, memsize, BOOT_MEM_RAM);
}

View File

@ -1,58 +0,0 @@
/*
*
* BRIEF MODULE DESCRIPTION
* Pb1500 board setup
*
* Copyright 2001, 2008 MontaVista Software Inc.
* Author: MontaVista Software, Inc. <source@mvista.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/bootinfo.h>
#include <prom.h>
const char *get_system_type(void)
{
return "Alchemy Pb1500";
}
void __init prom_init(void)
{
unsigned char *memsize_str;
unsigned long memsize;
prom_argc = (int)fw_arg0;
prom_argv = (char **)fw_arg1;
prom_envp = (char **)fw_arg2;
prom_init_cmdline();
memsize_str = prom_getenv("memsize");
if (!memsize_str)
memsize = 0x04000000;
else
strict_strtol(memsize_str, 0, &memsize);
add_memory_region(0, memsize, BOOT_MEM_RAM);
}

View File

@ -1,46 +0,0 @@
/*
* BRIEF MODULE DESCRIPTION
* Au1xxx irq map table
*
* Copyright 2003 Embedded Edge, LLC
* dan@embeddededge.com
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <asm/mach-au1x00/au1000.h>
char irq_tab_alchemy[][5] __initdata = {
[12] = { -1, INTA, INTX, INTX, INTX }, /* IDSEL 12 - HPT370 */
[13] = { -1, INTA, INTB, INTC, INTD }, /* IDSEL 13 - PCI slot */
};
struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
{ AU1500_GPIO_204, INTC_INT_HIGH_LEVEL, 0 },
{ AU1500_GPIO_201, INTC_INT_LOW_LEVEL, 0 },
{ AU1500_GPIO_202, INTC_INT_LOW_LEVEL, 0 },
{ AU1500_GPIO_203, INTC_INT_LOW_LEVEL, 0 },
{ AU1500_GPIO_205, INTC_INT_LOW_LEVEL, 0 },
};
int __initdata au1xxx_nr_irqs = ARRAY_SIZE(au1xxx_irq_map);

View File

@ -1,43 +0,0 @@
/*
* BRIEF MODULE DESCRIPTION
* Au1xx0 IRQ map table
*
* Copyright 2003 Embedded Edge, LLC
* dan@embeddededge.com
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
* NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <asm/mach-au1x00/au1000.h>
char irq_tab_alchemy[][5] __initdata = {
[12] = { -1, INTB, INTC, INTD, INTA }, /* IDSEL 12 - PCI slot 2 (left) */
[13] = { -1, INTA, INTB, INTC, INTD }, /* IDSEL 13 - PCI slot 1 (right) */
};
struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
{ AU1000_GPIO_0, INTC_INT_LOW_LEVEL, 0 },
{ AU1000_GPIO_1, INTC_INT_LOW_LEVEL, 0 },
};
int __initdata au1xxx_nr_irqs = ARRAY_SIZE(au1xxx_irq_map);

View File

@ -28,6 +28,8 @@
#include <asm/mach-au1x00/au1000.h>
#include <prom.h>
void board_reset(void)
{
/* Hit BCSR.SYSTEM_CONTROL[SW_RST] */
@ -38,6 +40,16 @@ void __init board_setup(void)
{
u32 pin_func;
#ifdef CONFIG_SERIAL_8250_CONSOLE
char *argptr;
argptr = prom_getcmdline();
argptr = strstr(argptr, "console=");
if (argptr == NULL) {
argptr = prom_getcmdline();
strcat(argptr, " console=ttyS0,115200");
}
#endif
/* Set multiple use pins (UART3/GPIO) to UART (it's used as UART too) */
pin_func = au_readl(SYS_PINFUNC) & ~SYS_PF_UR3;
pin_func |= SYS_PF_UR3;

View File

@ -53,6 +53,6 @@ void __init prom_init(void)
if (!memsize_str)
memsize = 0x04000000;
else
strict_strtol(memsize_str, 0, &memsize);
strict_strtoul(memsize_str, 0, &memsize);
add_memory_region(0, memsize, BOOT_MEM_RAM);
}

View File

@ -27,23 +27,26 @@
*/
#include <linux/init.h>
#include <linux/interrupt.h>
#include <asm/mach-au1x00/au1000.h>
struct au1xxx_irqmap __initdata au1xxx_irq_map[] = {
{ AU1500_GPIO_204, INTC_INT_HIGH_LEVEL, 0 },
{ AU1500_GPIO_201, INTC_INT_LOW_LEVEL, 0 },
{ AU1500_GPIO_202, INTC_INT_LOW_LEVEL, 0 },
{ AU1500_GPIO_203, INTC_INT_LOW_LEVEL, 0 },
{ AU1500_GPIO_205, INTC_INT_LOW_LEVEL, 0 },
{ AU1500_GPIO_207, INTC_INT_LOW_LEVEL, 0 },
{ AU1500_GPIO_204, IRQF_TRIGGER_HIGH, 0 },
{ AU1500_GPIO_201, IRQF_TRIGGER_LOW, 0 },
{ AU1500_GPIO_202, IRQF_TRIGGER_LOW, 0 },
{ AU1500_GPIO_203, IRQF_TRIGGER_LOW, 0 },
{ AU1500_GPIO_205, IRQF_TRIGGER_LOW, 0 },
{ AU1500_GPIO_207, IRQF_TRIGGER_LOW, 0 },
{ AU1000_GPIO_0, INTC_INT_LOW_LEVEL, 0 },
{ AU1000_GPIO_1, INTC_INT_LOW_LEVEL, 0 },
{ AU1000_GPIO_2, INTC_INT_LOW_LEVEL, 0 },
{ AU1000_GPIO_3, INTC_INT_LOW_LEVEL, 0 },
{ AU1000_GPIO_4, INTC_INT_LOW_LEVEL, 0 }, /* CF interrupt */
{ AU1000_GPIO_5, INTC_INT_LOW_LEVEL, 0 },
{ AU1000_GPIO_0, IRQF_TRIGGER_LOW, 0 },
{ AU1000_GPIO_1, IRQF_TRIGGER_LOW, 0 },
{ AU1000_GPIO_2, IRQF_TRIGGER_LOW, 0 },
{ AU1000_GPIO_3, IRQF_TRIGGER_LOW, 0 },
{ AU1000_GPIO_4, IRQF_TRIGGER_LOW, 0 }, /* CF interrupt */
{ AU1000_GPIO_5, IRQF_TRIGGER_LOW, 0 },
};
int __initdata au1xxx_nr_irqs = ARRAY_SIZE(au1xxx_irq_map);
void __init board_init_irq(void)
{
au1xxx_setup_irqmap(au1xxx_irq_map, ARRAY_SIZE(au1xxx_irq_map));
}

View File

@ -0,0 +1,85 @@
config CAVIUM_OCTEON_SPECIFIC_OPTIONS
bool "Enable Octeon specific options"
depends on CPU_CAVIUM_OCTEON
default "y"
config CAVIUM_OCTEON_2ND_KERNEL
bool "Build the kernel to be used as a 2nd kernel on the same chip"
depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
default "n"
help
This option configures this kernel to be linked at a different
address and use the 2nd uart for output. This allows a kernel built
with this option to be run at the same time as one built without this
option.
config CAVIUM_OCTEON_HW_FIX_UNALIGNED
bool "Enable hardware fixups of unaligned loads and stores"
depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
default "y"
help
Configure the Octeon hardware to automatically fix unaligned loads
and stores. Normally unaligned accesses are fixed using a kernel
exception handler. This option enables the hardware automatic fixups,
which requires only an extra 3 cycles. Disable this option if you
are running code that relies on address exceptions on unaligned
accesses.
config CAVIUM_OCTEON_CVMSEG_SIZE
int "Number of L1 cache lines reserved for CVMSEG memory"
depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
range 0 54
default 1
help
CVMSEG LM is a segment that accesses portions of the dcache as a
local memory; the larger CVMSEG is, the smaller the cache is.
This selects the size of CVMSEG LM, which is in cache blocks. The
legally range is from zero to 54 cache blocks (i.e. CVMSEG LM is
between zero and 6192 bytes).
config CAVIUM_OCTEON_LOCK_L2
bool "Lock often used kernel code in the L2"
depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS
default "y"
help
Enable locking parts of the kernel into the L2 cache.
config CAVIUM_OCTEON_LOCK_L2_TLB
bool "Lock the TLB handler in L2"
depends on CAVIUM_OCTEON_LOCK_L2
default "y"
help
Lock the low level TLB fast path into L2.
config CAVIUM_OCTEON_LOCK_L2_EXCEPTION
bool "Lock the exception handler in L2"
depends on CAVIUM_OCTEON_LOCK_L2
default "y"
help
Lock the low level exception handler into L2.
config CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
bool "Lock the interrupt handler in L2"
depends on CAVIUM_OCTEON_LOCK_L2
default "y"
help
Lock the low level interrupt handler into L2.
config CAVIUM_OCTEON_LOCK_L2_INTERRUPT
bool "Lock the 2nd level interrupt handler in L2"
depends on CAVIUM_OCTEON_LOCK_L2
default "y"
help
Lock the 2nd level interrupt handler in L2.
config CAVIUM_OCTEON_LOCK_L2_MEMCPY
bool "Lock memcpy() in L2"
depends on CAVIUM_OCTEON_LOCK_L2
default "y"
help
Lock the kernel's implementation of memcpy() into L2.
config ARCH_SPARSEMEM_ENABLE
def_bool y
select SPARSEMEM_STATIC
depends on CPU_CAVIUM_OCTEON

View File

@ -0,0 +1,16 @@
#
# Makefile for the Cavium Octeon specific kernel interface routines
# under Linux.
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2005-2008 Cavium Networks
#
obj-y := setup.o serial.o octeon-irq.o csrc-octeon.o
obj-y += dma-octeon.o flash_setup.o
obj-y += octeon-memcpy.o
obj-$(CONFIG_SMP) += smp.o

View File

@ -0,0 +1,58 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 by Ralf Baechle
*/
#include <linux/clocksource.h>
#include <linux/init.h>
#include <asm/time.h>
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-ipd-defs.h>
/*
* Set the current core's cvmcount counter to the value of the
* IPD_CLK_COUNT. We do this on all cores as they are brought
* on-line. This allows for a read from a local cpu register to
* access a synchronized counter.
*
*/
void octeon_init_cvmcount(void)
{
unsigned long flags;
unsigned loops = 2;
/* Clobber loops so GCC will not unroll the following while loop. */
asm("" : "+r" (loops));
local_irq_save(flags);
/*
* Loop several times so we are executing from the cache,
* which should give more deterministic timing.
*/
while (loops--)
write_c0_cvmcount(cvmx_read_csr(CVMX_IPD_CLK_COUNT));
local_irq_restore(flags);
}
static cycle_t octeon_cvmcount_read(void)
{
return read_c0_cvmcount();
}
static struct clocksource clocksource_mips = {
.name = "OCTEON_CVMCOUNT",
.read = octeon_cvmcount_read,
.mask = CLOCKSOURCE_MASK(64),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
void __init plat_time_init(void)
{
clocksource_mips.rating = 300;
clocksource_set_clock(&clocksource_mips, mips_hpt_frequency);
clocksource_register(&clocksource_mips);
}

View File

@ -0,0 +1,32 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
* Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
* Copyright (C) 2005 Ilya A. Volynets-Evenbakh <ilya@total-knowledge.com>
* swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
* IP32 changes by Ilya.
* Cavium Networks: Create new dma setup for Cavium Networks Octeon based on
* the kernels original.
*/
#include <linux/types.h>
#include <linux/mm.h>
#include <dma-coherence.h>
dma_addr_t octeon_map_dma_mem(struct device *dev, void *ptr, size_t size)
{
/* Without PCI/PCIe this function can be called for Octeon internal
devices such as USB. These devices all support 64bit addressing */
mb();
return virt_to_phys(ptr);
}
void octeon_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
{
/* Without PCI/PCIe this function can be called for Octeon internal
* devices such as USB. These devices all support 64bit addressing */
return;
}

View File

@ -0,0 +1,13 @@
#
# Makefile for the Cavium Octeon specific kernel interface routines
# under Linux.
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 2005-2008 Cavium Networks
#
obj-y += cvmx-bootmem.o cvmx-l2c.o cvmx-sysinfo.o octeon-model.o

View File

@ -0,0 +1,586 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Simple allocate only memory allocator. Used to allocate memory at
* application start time.
*/
#include <linux/kernel.h>
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-spinlock.h>
#include <asm/octeon/cvmx-bootmem.h>
/*#define DEBUG */
static struct cvmx_bootmem_desc *cvmx_bootmem_desc;
/* See header file for descriptions of functions */
/*
* Wrapper functions are provided for reading/writing the size and
* next block values as these may not be directly addressible (in 32
* bit applications, for instance.) Offsets of data elements in
* bootmem list, must match cvmx_bootmem_block_header_t.
*/
#define NEXT_OFFSET 0
#define SIZE_OFFSET 8
static void cvmx_bootmem_phy_set_size(uint64_t addr, uint64_t size)
{
cvmx_write64_uint64((addr + SIZE_OFFSET) | (1ull << 63), size);
}
static void cvmx_bootmem_phy_set_next(uint64_t addr, uint64_t next)
{
cvmx_write64_uint64((addr + NEXT_OFFSET) | (1ull << 63), next);
}
static uint64_t cvmx_bootmem_phy_get_size(uint64_t addr)
{
return cvmx_read64_uint64((addr + SIZE_OFFSET) | (1ull << 63));
}
static uint64_t cvmx_bootmem_phy_get_next(uint64_t addr)
{
return cvmx_read64_uint64((addr + NEXT_OFFSET) | (1ull << 63));
}
void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment,
uint64_t min_addr, uint64_t max_addr)
{
int64_t address;
address =
cvmx_bootmem_phy_alloc(size, min_addr, max_addr, alignment, 0);
if (address > 0)
return cvmx_phys_to_ptr(address);
else
return NULL;
}
void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address,
uint64_t alignment)
{
return cvmx_bootmem_alloc_range(size, alignment, address,
address + size);
}
void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment)
{
return cvmx_bootmem_alloc_range(size, alignment, 0, 0);
}
int cvmx_bootmem_free_named(char *name)
{
return cvmx_bootmem_phy_named_block_free(name, 0);
}
struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name)
{
return cvmx_bootmem_phy_named_block_find(name, 0);
}
void cvmx_bootmem_lock(void)
{
cvmx_spinlock_lock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock));
}
void cvmx_bootmem_unlock(void)
{
cvmx_spinlock_unlock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock));
}
int cvmx_bootmem_init(void *mem_desc_ptr)
{
/* Here we set the global pointer to the bootmem descriptor
* block. This pointer will be used directly, so we will set
* it up to be directly usable by the application. It is set
* up as follows for the various runtime/ABI combinations:
*
* Linux 64 bit: Set XKPHYS bit
* Linux 32 bit: use mmap to create mapping, use virtual address
* CVMX 64 bit: use physical address directly
* CVMX 32 bit: use physical address directly
*
* Note that the CVMX environment assumes the use of 1-1 TLB
* mappings so that the physical addresses can be used
* directly
*/
if (!cvmx_bootmem_desc) {
#if defined(CVMX_ABI_64)
/* Set XKPHYS bit */
cvmx_bootmem_desc = cvmx_phys_to_ptr(CAST64(mem_desc_ptr));
#else
cvmx_bootmem_desc = (struct cvmx_bootmem_desc *) mem_desc_ptr;
#endif
}
return 0;
}
/*
* The cvmx_bootmem_phy* functions below return 64 bit physical
* addresses, and expose more features that the cvmx_bootmem_functions
* above. These are required for full memory space access in 32 bit
* applications, as well as for using some advance features. Most
* applications should not need to use these.
*/
int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min,
uint64_t address_max, uint64_t alignment,
uint32_t flags)
{
uint64_t head_addr;
uint64_t ent_addr;
/* points to previous list entry, NULL current entry is head of list */
uint64_t prev_addr = 0;
uint64_t new_ent_addr = 0;
uint64_t desired_min_addr;
#ifdef DEBUG
cvmx_dprintf("cvmx_bootmem_phy_alloc: req_size: 0x%llx, "
"min_addr: 0x%llx, max_addr: 0x%llx, align: 0x%llx\n",
(unsigned long long)req_size,
(unsigned long long)address_min,
(unsigned long long)address_max,
(unsigned long long)alignment);
#endif
if (cvmx_bootmem_desc->major_version > 3) {
cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
"version: %d.%d at addr: %p\n",
(int)cvmx_bootmem_desc->major_version,
(int)cvmx_bootmem_desc->minor_version,
cvmx_bootmem_desc);
goto error_out;
}
/*
* Do a variety of checks to validate the arguments. The
* allocator code will later assume that these checks have
* been made. We validate that the requested constraints are
* not self-contradictory before we look through the list of
* available memory.
*/
/* 0 is not a valid req_size for this allocator */
if (!req_size)
goto error_out;
/* Round req_size up to mult of minimum alignment bytes */
req_size = (req_size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) &
~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1);
/*
* Convert !0 address_min and 0 address_max to special case of
* range that specifies an exact memory block to allocate. Do
* this before other checks and adjustments so that this
* tranformation will be validated.
*/
if (address_min && !address_max)
address_max = address_min + req_size;
else if (!address_min && !address_max)
address_max = ~0ull; /* If no limits given, use max limits */
/*
* Enforce minimum alignment (this also keeps the minimum free block
* req_size the same as the alignment req_size.
*/
if (alignment < CVMX_BOOTMEM_ALIGNMENT_SIZE)
alignment = CVMX_BOOTMEM_ALIGNMENT_SIZE;
/*
* Adjust address minimum based on requested alignment (round
* up to meet alignment). Do this here so we can reject
* impossible requests up front. (NOP for address_min == 0)
*/
if (alignment)
address_min = __ALIGN_MASK(address_min, (alignment - 1));
/*
* Reject inconsistent args. We have adjusted these, so this
* may fail due to our internal changes even if this check
* would pass for the values the user supplied.
*/
if (req_size > address_max - address_min)
goto error_out;
/* Walk through the list entries - first fit found is returned */
if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
cvmx_bootmem_lock();
head_addr = cvmx_bootmem_desc->head_addr;
ent_addr = head_addr;
for (; ent_addr;
prev_addr = ent_addr,
ent_addr = cvmx_bootmem_phy_get_next(ent_addr)) {
uint64_t usable_base, usable_max;
uint64_t ent_size = cvmx_bootmem_phy_get_size(ent_addr);
if (cvmx_bootmem_phy_get_next(ent_addr)
&& ent_addr > cvmx_bootmem_phy_get_next(ent_addr)) {
cvmx_dprintf("Internal bootmem_alloc() error: ent: "
"0x%llx, next: 0x%llx\n",
(unsigned long long)ent_addr,
(unsigned long long)
cvmx_bootmem_phy_get_next(ent_addr));
goto error_out;
}
/*
* Determine if this is an entry that can satisify the
* request Check to make sure entry is large enough to
* satisfy request.
*/
usable_base =
__ALIGN_MASK(max(address_min, ent_addr), alignment - 1);
usable_max = min(address_max, ent_addr + ent_size);
/*
* We should be able to allocate block at address
* usable_base.
*/
desired_min_addr = usable_base;
/*
* Determine if request can be satisfied from the
* current entry.
*/
if (!((ent_addr + ent_size) > usable_base
&& ent_addr < address_max
&& req_size <= usable_max - usable_base))
continue;
/*
* We have found an entry that has room to satisfy the
* request, so allocate it from this entry. If end
* CVMX_BOOTMEM_FLAG_END_ALLOC set, then allocate from
* the end of this block rather than the beginning.
*/
if (flags & CVMX_BOOTMEM_FLAG_END_ALLOC) {
desired_min_addr = usable_max - req_size;
/*
* Align desired address down to required
* alignment.
*/
desired_min_addr &= ~(alignment - 1);
}
/* Match at start of entry */
if (desired_min_addr == ent_addr) {
if (req_size < ent_size) {
/*
* big enough to create a new block
* from top portion of block.
*/
new_ent_addr = ent_addr + req_size;
cvmx_bootmem_phy_set_next(new_ent_addr,
cvmx_bootmem_phy_get_next(ent_addr));
cvmx_bootmem_phy_set_size(new_ent_addr,
ent_size -
req_size);
/*
* Adjust next pointer as following
* code uses this.
*/
cvmx_bootmem_phy_set_next(ent_addr,
new_ent_addr);
}
/*
* adjust prev ptr or head to remove this
* entry from list.
*/
if (prev_addr)
cvmx_bootmem_phy_set_next(prev_addr,
cvmx_bootmem_phy_get_next(ent_addr));
else
/*
* head of list being returned, so
* update head ptr.
*/
cvmx_bootmem_desc->head_addr =
cvmx_bootmem_phy_get_next(ent_addr);
if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
cvmx_bootmem_unlock();
return desired_min_addr;
}
/*
* block returned doesn't start at beginning of entry,
* so we know that we will be splitting a block off
* the front of this one. Create a new block from the
* beginning, add to list, and go to top of loop
* again.
*
* create new block from high portion of
* block, so that top block starts at desired
* addr.
*/
new_ent_addr = desired_min_addr;
cvmx_bootmem_phy_set_next(new_ent_addr,
cvmx_bootmem_phy_get_next
(ent_addr));
cvmx_bootmem_phy_set_size(new_ent_addr,
cvmx_bootmem_phy_get_size
(ent_addr) -
(desired_min_addr -
ent_addr));
cvmx_bootmem_phy_set_size(ent_addr,
desired_min_addr - ent_addr);
cvmx_bootmem_phy_set_next(ent_addr, new_ent_addr);
/* Loop again to handle actual alloc from new block */
}
error_out:
/* We didn't find anything, so return error */
if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
cvmx_bootmem_unlock();
return -1;
}
int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags)
{
uint64_t cur_addr;
uint64_t prev_addr = 0; /* zero is invalid */
int retval = 0;
#ifdef DEBUG
cvmx_dprintf("__cvmx_bootmem_phy_free addr: 0x%llx, size: 0x%llx\n",
(unsigned long long)phy_addr, (unsigned long long)size);
#endif
if (cvmx_bootmem_desc->major_version > 3) {
cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
"version: %d.%d at addr: %p\n",
(int)cvmx_bootmem_desc->major_version,
(int)cvmx_bootmem_desc->minor_version,
cvmx_bootmem_desc);
return 0;
}
/* 0 is not a valid size for this allocator */
if (!size)
return 0;
if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
cvmx_bootmem_lock();
cur_addr = cvmx_bootmem_desc->head_addr;
if (cur_addr == 0 || phy_addr < cur_addr) {
/* add at front of list - special case with changing head ptr */
if (cur_addr && phy_addr + size > cur_addr)
goto bootmem_free_done; /* error, overlapping section */
else if (phy_addr + size == cur_addr) {
/* Add to front of existing first block */
cvmx_bootmem_phy_set_next(phy_addr,
cvmx_bootmem_phy_get_next
(cur_addr));
cvmx_bootmem_phy_set_size(phy_addr,
cvmx_bootmem_phy_get_size
(cur_addr) + size);
cvmx_bootmem_desc->head_addr = phy_addr;
} else {
/* New block before first block. OK if cur_addr is 0 */
cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
cvmx_bootmem_phy_set_size(phy_addr, size);
cvmx_bootmem_desc->head_addr = phy_addr;
}
retval = 1;
goto bootmem_free_done;
}
/* Find place in list to add block */
while (cur_addr && phy_addr > cur_addr) {
prev_addr = cur_addr;
cur_addr = cvmx_bootmem_phy_get_next(cur_addr);
}
if (!cur_addr) {
/*
* We have reached the end of the list, add on to end,
* checking to see if we need to combine with last
* block
*/
if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) ==
phy_addr) {
cvmx_bootmem_phy_set_size(prev_addr,
cvmx_bootmem_phy_get_size
(prev_addr) + size);
} else {
cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
cvmx_bootmem_phy_set_size(phy_addr, size);
cvmx_bootmem_phy_set_next(phy_addr, 0);
}
retval = 1;
goto bootmem_free_done;
} else {
/*
* insert between prev and cur nodes, checking for
* merge with either/both.
*/
if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) ==
phy_addr) {
/* Merge with previous */
cvmx_bootmem_phy_set_size(prev_addr,
cvmx_bootmem_phy_get_size
(prev_addr) + size);
if (phy_addr + size == cur_addr) {
/* Also merge with current */
cvmx_bootmem_phy_set_size(prev_addr,
cvmx_bootmem_phy_get_size(cur_addr) +
cvmx_bootmem_phy_get_size(prev_addr));
cvmx_bootmem_phy_set_next(prev_addr,
cvmx_bootmem_phy_get_next(cur_addr));
}
retval = 1;
goto bootmem_free_done;
} else if (phy_addr + size == cur_addr) {
/* Merge with current */
cvmx_bootmem_phy_set_size(phy_addr,
cvmx_bootmem_phy_get_size
(cur_addr) + size);
cvmx_bootmem_phy_set_next(phy_addr,
cvmx_bootmem_phy_get_next
(cur_addr));
cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
retval = 1;
goto bootmem_free_done;
}
/* It is a standalone block, add in between prev and cur */
cvmx_bootmem_phy_set_size(phy_addr, size);
cvmx_bootmem_phy_set_next(phy_addr, cur_addr);
cvmx_bootmem_phy_set_next(prev_addr, phy_addr);
}
retval = 1;
bootmem_free_done:
if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
cvmx_bootmem_unlock();
return retval;
}
struct cvmx_bootmem_named_block_desc *
cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags)
{
unsigned int i;
struct cvmx_bootmem_named_block_desc *named_block_array_ptr;
#ifdef DEBUG
cvmx_dprintf("cvmx_bootmem_phy_named_block_find: %s\n", name);
#endif
/*
* Lock the structure to make sure that it is not being
* changed while we are examining it.
*/
if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
cvmx_bootmem_lock();
/* Use XKPHYS for 64 bit linux */
named_block_array_ptr = (struct cvmx_bootmem_named_block_desc *)
cvmx_phys_to_ptr(cvmx_bootmem_desc->named_block_array_addr);
#ifdef DEBUG
cvmx_dprintf
("cvmx_bootmem_phy_named_block_find: named_block_array_ptr: %p\n",
named_block_array_ptr);
#endif
if (cvmx_bootmem_desc->major_version == 3) {
for (i = 0;
i < cvmx_bootmem_desc->named_block_num_blocks; i++) {
if ((name && named_block_array_ptr[i].size
&& !strncmp(name, named_block_array_ptr[i].name,
cvmx_bootmem_desc->named_block_name_len
- 1))
|| (!name && !named_block_array_ptr[i].size)) {
if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
cvmx_bootmem_unlock();
return &(named_block_array_ptr[i]);
}
}
} else {
cvmx_dprintf("ERROR: Incompatible bootmem descriptor "
"version: %d.%d at addr: %p\n",
(int)cvmx_bootmem_desc->major_version,
(int)cvmx_bootmem_desc->minor_version,
cvmx_bootmem_desc);
}
if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING))
cvmx_bootmem_unlock();
return NULL;
}
int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags)
{
struct cvmx_bootmem_named_block_desc *named_block_ptr;
if (cvmx_bootmem_desc->major_version != 3) {
cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: "
"%d.%d at addr: %p\n",
(int)cvmx_bootmem_desc->major_version,
(int)cvmx_bootmem_desc->minor_version,
cvmx_bootmem_desc);
return 0;
}
#ifdef DEBUG
cvmx_dprintf("cvmx_bootmem_phy_named_block_free: %s\n", name);
#endif
/*
* Take lock here, as name lookup/block free/name free need to
* be atomic.
*/
cvmx_bootmem_lock();
named_block_ptr =
cvmx_bootmem_phy_named_block_find(name,
CVMX_BOOTMEM_FLAG_NO_LOCKING);
if (named_block_ptr) {
#ifdef DEBUG
cvmx_dprintf("cvmx_bootmem_phy_named_block_free: "
"%s, base: 0x%llx, size: 0x%llx\n",
name,
(unsigned long long)named_block_ptr->base_addr,
(unsigned long long)named_block_ptr->size);
#endif
__cvmx_bootmem_phy_free(named_block_ptr->base_addr,
named_block_ptr->size,
CVMX_BOOTMEM_FLAG_NO_LOCKING);
named_block_ptr->size = 0;
/* Set size to zero to indicate block not used. */
}
cvmx_bootmem_unlock();
return named_block_ptr != NULL; /* 0 on failure, 1 on success */
}

View File

@ -0,0 +1,734 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Implementation of the Level 2 Cache (L2C) control, measurement, and
* debugging facilities.
*/
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-l2c.h>
#include <asm/octeon/cvmx-spinlock.h>
/*
* This spinlock is used internally to ensure that only one core is
* performing certain L2 operations at a time.
*
* NOTE: This only protects calls from within a single application -
* if multiple applications or operating systems are running, then it
* is up to the user program to coordinate between them.
*/
static cvmx_spinlock_t cvmx_l2c_spinlock;
static inline int l2_size_half(void)
{
uint64_t val = cvmx_read_csr(CVMX_L2D_FUS3);
return !!(val & (1ull << 34));
}
int cvmx_l2c_get_core_way_partition(uint32_t core)
{
uint32_t field;
/* Validate the core number */
if (core >= cvmx_octeon_num_cores())
return -1;
/*
* Use the lower two bits of the coreNumber to determine the
* bit offset of the UMSK[] field in the L2C_SPAR register.
*/
field = (core & 0x3) * 8;
/*
* Return the UMSK[] field from the appropriate L2C_SPAR
* register based on the coreNumber.
*/
switch (core & 0xC) {
case 0x0:
return (cvmx_read_csr(CVMX_L2C_SPAR0) & (0xFF << field)) >>
field;
case 0x4:
return (cvmx_read_csr(CVMX_L2C_SPAR1) & (0xFF << field)) >>
field;
case 0x8:
return (cvmx_read_csr(CVMX_L2C_SPAR2) & (0xFF << field)) >>
field;
case 0xC:
return (cvmx_read_csr(CVMX_L2C_SPAR3) & (0xFF << field)) >>
field;
}
return 0;
}
int cvmx_l2c_set_core_way_partition(uint32_t core, uint32_t mask)
{
uint32_t field;
uint32_t valid_mask;
valid_mask = (0x1 << cvmx_l2c_get_num_assoc()) - 1;
mask &= valid_mask;
/* A UMSK setting which blocks all L2C Ways is an error. */
if (mask == valid_mask)
return -1;
/* Validate the core number */
if (core >= cvmx_octeon_num_cores())
return -1;
/* Check to make sure current mask & new mask don't block all ways */
if (((mask | cvmx_l2c_get_core_way_partition(core)) & valid_mask) ==
valid_mask)
return -1;
/* Use the lower two bits of core to determine the bit offset of the
* UMSK[] field in the L2C_SPAR register.
*/
field = (core & 0x3) * 8;
/* Assign the new mask setting to the UMSK[] field in the appropriate
* L2C_SPAR register based on the core_num.
*
*/
switch (core & 0xC) {
case 0x0:
cvmx_write_csr(CVMX_L2C_SPAR0,
(cvmx_read_csr(CVMX_L2C_SPAR0) &
~(0xFF << field)) | mask << field);
break;
case 0x4:
cvmx_write_csr(CVMX_L2C_SPAR1,
(cvmx_read_csr(CVMX_L2C_SPAR1) &
~(0xFF << field)) | mask << field);
break;
case 0x8:
cvmx_write_csr(CVMX_L2C_SPAR2,
(cvmx_read_csr(CVMX_L2C_SPAR2) &
~(0xFF << field)) | mask << field);
break;
case 0xC:
cvmx_write_csr(CVMX_L2C_SPAR3,
(cvmx_read_csr(CVMX_L2C_SPAR3) &
~(0xFF << field)) | mask << field);
break;
}
return 0;
}
int cvmx_l2c_set_hw_way_partition(uint32_t mask)
{
uint32_t valid_mask;
valid_mask = 0xff;
if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN38XX)) {
if (l2_size_half())
valid_mask = 0xf;
} else if (l2_size_half())
valid_mask = 0x3;
mask &= valid_mask;
/* A UMSK setting which blocks all L2C Ways is an error. */
if (mask == valid_mask)
return -1;
/* Check to make sure current mask & new mask don't block all ways */
if (((mask | cvmx_l2c_get_hw_way_partition()) & valid_mask) ==
valid_mask)
return -1;
cvmx_write_csr(CVMX_L2C_SPAR4,
(cvmx_read_csr(CVMX_L2C_SPAR4) & ~0xFF) | mask);
return 0;
}
int cvmx_l2c_get_hw_way_partition(void)
{
return cvmx_read_csr(CVMX_L2C_SPAR4) & (0xFF);
}
void cvmx_l2c_config_perf(uint32_t counter, enum cvmx_l2c_event event,
uint32_t clear_on_read)
{
union cvmx_l2c_pfctl pfctl;
pfctl.u64 = cvmx_read_csr(CVMX_L2C_PFCTL);
switch (counter) {
case 0:
pfctl.s.cnt0sel = event;
pfctl.s.cnt0ena = 1;
if (!cvmx_octeon_is_pass1())
pfctl.s.cnt0rdclr = clear_on_read;
break;
case 1:
pfctl.s.cnt1sel = event;
pfctl.s.cnt1ena = 1;
if (!cvmx_octeon_is_pass1())
pfctl.s.cnt1rdclr = clear_on_read;
break;
case 2:
pfctl.s.cnt2sel = event;
pfctl.s.cnt2ena = 1;
if (!cvmx_octeon_is_pass1())
pfctl.s.cnt2rdclr = clear_on_read;
break;
case 3:
default:
pfctl.s.cnt3sel = event;
pfctl.s.cnt3ena = 1;
if (!cvmx_octeon_is_pass1())
pfctl.s.cnt3rdclr = clear_on_read;
break;
}
cvmx_write_csr(CVMX_L2C_PFCTL, pfctl.u64);
}
uint64_t cvmx_l2c_read_perf(uint32_t counter)
{
switch (counter) {
case 0:
return cvmx_read_csr(CVMX_L2C_PFC0);
case 1:
return cvmx_read_csr(CVMX_L2C_PFC1);
case 2:
return cvmx_read_csr(CVMX_L2C_PFC2);
case 3:
default:
return cvmx_read_csr(CVMX_L2C_PFC3);
}
}
/**
* @INTERNAL
* Helper function use to fault in cache lines for L2 cache locking
*
* @addr: Address of base of memory region to read into L2 cache
* @len: Length (in bytes) of region to fault in
*/
static void fault_in(uint64_t addr, int len)
{
volatile char *ptr;
volatile char dummy;
/*
* Adjust addr and length so we get all cache lines even for
* small ranges spanning two cache lines
*/
len += addr & CVMX_CACHE_LINE_MASK;
addr &= ~CVMX_CACHE_LINE_MASK;
ptr = (volatile char *)cvmx_phys_to_ptr(addr);
/*
* Invalidate L1 cache to make sure all loads result in data
* being in L2.
*/
CVMX_DCACHE_INVALIDATE;
while (len > 0) {
dummy += *ptr;
len -= CVMX_CACHE_LINE_SIZE;
ptr += CVMX_CACHE_LINE_SIZE;
}
}
int cvmx_l2c_lock_line(uint64_t addr)
{
int retval = 0;
union cvmx_l2c_dbg l2cdbg;
union cvmx_l2c_lckbase lckbase;
union cvmx_l2c_lckoff lckoff;
union cvmx_l2t_err l2t_err;
l2cdbg.u64 = 0;
lckbase.u64 = 0;
lckoff.u64 = 0;
cvmx_spinlock_lock(&cvmx_l2c_spinlock);
/* Clear l2t error bits if set */
l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
l2t_err.s.lckerr = 1;
l2t_err.s.lckerr2 = 1;
cvmx_write_csr(CVMX_L2T_ERR, l2t_err.u64);
addr &= ~CVMX_CACHE_LINE_MASK;
/* Set this core as debug core */
l2cdbg.s.ppnum = cvmx_get_core_num();
CVMX_SYNC;
cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
cvmx_read_csr(CVMX_L2C_DBG);
lckoff.s.lck_offset = 0; /* Only lock 1 line at a time */
cvmx_write_csr(CVMX_L2C_LCKOFF, lckoff.u64);
cvmx_read_csr(CVMX_L2C_LCKOFF);
if (((union cvmx_l2c_cfg) (cvmx_read_csr(CVMX_L2C_CFG))).s.idxalias) {
int alias_shift =
CVMX_L2C_IDX_ADDR_SHIFT + 2 * CVMX_L2_SET_BITS - 1;
uint64_t addr_tmp =
addr ^ (addr & ((1 << alias_shift) - 1)) >>
CVMX_L2_SET_BITS;
lckbase.s.lck_base = addr_tmp >> 7;
} else {
lckbase.s.lck_base = addr >> 7;
}
lckbase.s.lck_ena = 1;
cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
cvmx_read_csr(CVMX_L2C_LCKBASE); /* Make sure it gets there */
fault_in(addr, CVMX_CACHE_LINE_SIZE);
lckbase.s.lck_ena = 0;
cvmx_write_csr(CVMX_L2C_LCKBASE, lckbase.u64);
cvmx_read_csr(CVMX_L2C_LCKBASE); /* Make sure it gets there */
/* Stop being debug core */
cvmx_write_csr(CVMX_L2C_DBG, 0);
cvmx_read_csr(CVMX_L2C_DBG);
l2t_err.u64 = cvmx_read_csr(CVMX_L2T_ERR);
if (l2t_err.s.lckerr || l2t_err.s.lckerr2)
retval = 1; /* We were unable to lock the line */
cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
return retval;
}
int cvmx_l2c_lock_mem_region(uint64_t start, uint64_t len)
{
int retval = 0;
/* Round start/end to cache line boundaries */
len += start & CVMX_CACHE_LINE_MASK;
start &= ~CVMX_CACHE_LINE_MASK;
len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
while (len) {
retval += cvmx_l2c_lock_line(start);
start += CVMX_CACHE_LINE_SIZE;
len -= CVMX_CACHE_LINE_SIZE;
}
return retval;
}
void cvmx_l2c_flush(void)
{
uint64_t assoc, set;
uint64_t n_assoc, n_set;
union cvmx_l2c_dbg l2cdbg;
cvmx_spinlock_lock(&cvmx_l2c_spinlock);
l2cdbg.u64 = 0;
if (!OCTEON_IS_MODEL(OCTEON_CN30XX))
l2cdbg.s.ppnum = cvmx_get_core_num();
l2cdbg.s.finv = 1;
n_set = CVMX_L2_SETS;
n_assoc = l2_size_half() ? (CVMX_L2_ASSOC / 2) : CVMX_L2_ASSOC;
for (set = 0; set < n_set; set++) {
for (assoc = 0; assoc < n_assoc; assoc++) {
l2cdbg.s.set = assoc;
/* Enter debug mode, and make sure all other
** writes complete before we enter debug
** mode */
CVMX_SYNCW;
cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
cvmx_read_csr(CVMX_L2C_DBG);
CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG
(CVMX_MIPS_SPACE_XKPHYS,
set * CVMX_CACHE_LINE_SIZE), 0);
CVMX_SYNCW; /* Push STF out to L2 */
/* Exit debug mode */
CVMX_SYNC;
cvmx_write_csr(CVMX_L2C_DBG, 0);
cvmx_read_csr(CVMX_L2C_DBG);
}
}
cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
}
int cvmx_l2c_unlock_line(uint64_t address)
{
int assoc;
union cvmx_l2c_tag tag;
union cvmx_l2c_dbg l2cdbg;
uint32_t tag_addr;
uint32_t index = cvmx_l2c_address_to_index(address);
cvmx_spinlock_lock(&cvmx_l2c_spinlock);
/* Compute portion of address that is stored in tag */
tag_addr =
((address >> CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) &
((1 << CVMX_L2C_TAG_ADDR_ALIAS_SHIFT) - 1));
for (assoc = 0; assoc < CVMX_L2_ASSOC; assoc++) {
tag = cvmx_get_l2c_tag(assoc, index);
if (tag.s.V && (tag.s.addr == tag_addr)) {
l2cdbg.u64 = 0;
l2cdbg.s.ppnum = cvmx_get_core_num();
l2cdbg.s.set = assoc;
l2cdbg.s.finv = 1;
CVMX_SYNC;
/* Enter debug mode */
cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
cvmx_read_csr(CVMX_L2C_DBG);
CVMX_PREPARE_FOR_STORE(CVMX_ADD_SEG
(CVMX_MIPS_SPACE_XKPHYS,
address), 0);
CVMX_SYNC;
/* Exit debug mode */
cvmx_write_csr(CVMX_L2C_DBG, 0);
cvmx_read_csr(CVMX_L2C_DBG);
cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
return tag.s.L;
}
}
cvmx_spinlock_unlock(&cvmx_l2c_spinlock);
return 0;
}
int cvmx_l2c_unlock_mem_region(uint64_t start, uint64_t len)
{
int num_unlocked = 0;
/* Round start/end to cache line boundaries */
len += start & CVMX_CACHE_LINE_MASK;
start &= ~CVMX_CACHE_LINE_MASK;
len = (len + CVMX_CACHE_LINE_MASK) & ~CVMX_CACHE_LINE_MASK;
while (len > 0) {
num_unlocked += cvmx_l2c_unlock_line(start);
start += CVMX_CACHE_LINE_SIZE;
len -= CVMX_CACHE_LINE_SIZE;
}
return num_unlocked;
}
/*
* Internal l2c tag types. These are converted to a generic structure
* that can be used on all chips.
*/
union __cvmx_l2c_tag {
uint64_t u64;
struct cvmx_l2c_tag_cn50xx {
uint64_t reserved:40;
uint64_t V:1; /* Line valid */
uint64_t D:1; /* Line dirty */
uint64_t L:1; /* Line locked */
uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:20; /* Phys mem addr (33..14) */
} cn50xx;
struct cvmx_l2c_tag_cn30xx {
uint64_t reserved:41;
uint64_t V:1; /* Line valid */
uint64_t D:1; /* Line dirty */
uint64_t L:1; /* Line locked */
uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:19; /* Phys mem addr (33..15) */
} cn30xx;
struct cvmx_l2c_tag_cn31xx {
uint64_t reserved:42;
uint64_t V:1; /* Line valid */
uint64_t D:1; /* Line dirty */
uint64_t L:1; /* Line locked */
uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:18; /* Phys mem addr (33..16) */
} cn31xx;
struct cvmx_l2c_tag_cn38xx {
uint64_t reserved:43;
uint64_t V:1; /* Line valid */
uint64_t D:1; /* Line dirty */
uint64_t L:1; /* Line locked */
uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:17; /* Phys mem addr (33..17) */
} cn38xx;
struct cvmx_l2c_tag_cn58xx {
uint64_t reserved:44;
uint64_t V:1; /* Line valid */
uint64_t D:1; /* Line dirty */
uint64_t L:1; /* Line locked */
uint64_t U:1; /* Use, LRU eviction */
uint64_t addr:16; /* Phys mem addr (33..18) */
} cn58xx;
struct cvmx_l2c_tag_cn58xx cn56xx; /* 2048 sets */
struct cvmx_l2c_tag_cn31xx cn52xx; /* 512 sets */
};
/**
* @INTERNAL
* Function to read a L2C tag. This code make the current core
* the 'debug core' for the L2. This code must only be executed by
* 1 core at a time.
*
* @assoc: Association (way) of the tag to dump
* @index: Index of the cacheline
*
* Returns The Octeon model specific tag structure. This is
* translated by a wrapper function to a generic form that is
* easier for applications to use.
*/
static union __cvmx_l2c_tag __read_l2_tag(uint64_t assoc, uint64_t index)
{
uint64_t debug_tag_addr = (((1ULL << 63) | (index << 7)) + 96);
uint64_t core = cvmx_get_core_num();
union __cvmx_l2c_tag tag_val;
uint64_t dbg_addr = CVMX_L2C_DBG;
unsigned long flags;
union cvmx_l2c_dbg debug_val;
debug_val.u64 = 0;
/*
* For low core count parts, the core number is always small enough
* to stay in the correct field and not set any reserved bits.
*/
debug_val.s.ppnum = core;
debug_val.s.l2t = 1;
debug_val.s.set = assoc;
/*
* Make sure core is quiet (no prefetches, etc.) before
* entering debug mode.
*/
CVMX_SYNC;
/* Flush L1 to make sure debug load misses L1 */
CVMX_DCACHE_INVALIDATE;
local_irq_save(flags);
/*
* The following must be done in assembly as when in debug
* mode all data loads from L2 return special debug data, not
* normal memory contents. Also, interrupts must be
* disabled, since if an interrupt occurs while in debug mode
* the ISR will get debug data from all its memory reads
* instead of the contents of memory
*/
asm volatile (".set push \n"
" .set mips64 \n"
" .set noreorder \n"
/* Enter debug mode, wait for store */
" sd %[dbg_val], 0(%[dbg_addr]) \n"
" ld $0, 0(%[dbg_addr]) \n"
/* Read L2C tag data */
" ld %[tag_val], 0(%[tag_addr]) \n"
/* Exit debug mode, wait for store */
" sd $0, 0(%[dbg_addr]) \n"
" ld $0, 0(%[dbg_addr]) \n"
/* Invalidate dcache to discard debug data */
" cache 9, 0($0) \n"
" .set pop" :
[tag_val] "=r"(tag_val.u64) : [dbg_addr] "r"(dbg_addr),
[dbg_val] "r"(debug_val.u64),
[tag_addr] "r"(debug_tag_addr) : "memory");
local_irq_restore(flags);
return tag_val;
}
union cvmx_l2c_tag cvmx_l2c_get_tag(uint32_t association, uint32_t index)
{
union __cvmx_l2c_tag tmp_tag;
union cvmx_l2c_tag tag;
tag.u64 = 0;
if ((int)association >= cvmx_l2c_get_num_assoc()) {
cvmx_dprintf
("ERROR: cvmx_get_l2c_tag association out of range\n");
return tag;
}
if ((int)index >= cvmx_l2c_get_num_sets()) {
cvmx_dprintf("ERROR: cvmx_get_l2c_tag "
"index out of range (arg: %d, max: %d\n",
index, cvmx_l2c_get_num_sets());
return tag;
}
/* __read_l2_tag is intended for internal use only */
tmp_tag = __read_l2_tag(association, index);
/*
* Convert all tag structure types to generic version, as it
* can represent all models.
*/
if (OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
tag.s.V = tmp_tag.cn58xx.V;
tag.s.D = tmp_tag.cn58xx.D;
tag.s.L = tmp_tag.cn58xx.L;
tag.s.U = tmp_tag.cn58xx.U;
tag.s.addr = tmp_tag.cn58xx.addr;
} else if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
tag.s.V = tmp_tag.cn38xx.V;
tag.s.D = tmp_tag.cn38xx.D;
tag.s.L = tmp_tag.cn38xx.L;
tag.s.U = tmp_tag.cn38xx.U;
tag.s.addr = tmp_tag.cn38xx.addr;
} else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
|| OCTEON_IS_MODEL(OCTEON_CN52XX)) {
tag.s.V = tmp_tag.cn31xx.V;
tag.s.D = tmp_tag.cn31xx.D;
tag.s.L = tmp_tag.cn31xx.L;
tag.s.U = tmp_tag.cn31xx.U;
tag.s.addr = tmp_tag.cn31xx.addr;
} else if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
tag.s.V = tmp_tag.cn30xx.V;
tag.s.D = tmp_tag.cn30xx.D;
tag.s.L = tmp_tag.cn30xx.L;
tag.s.U = tmp_tag.cn30xx.U;
tag.s.addr = tmp_tag.cn30xx.addr;
} else if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
tag.s.V = tmp_tag.cn50xx.V;
tag.s.D = tmp_tag.cn50xx.D;
tag.s.L = tmp_tag.cn50xx.L;
tag.s.U = tmp_tag.cn50xx.U;
tag.s.addr = tmp_tag.cn50xx.addr;
} else {
cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
}
return tag;
}
uint32_t cvmx_l2c_address_to_index(uint64_t addr)
{
uint64_t idx = addr >> CVMX_L2C_IDX_ADDR_SHIFT;
union cvmx_l2c_cfg l2c_cfg;
l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
if (l2c_cfg.s.idxalias) {
idx ^=
((addr & CVMX_L2C_ALIAS_MASK) >>
CVMX_L2C_TAG_ADDR_ALIAS_SHIFT);
}
idx &= CVMX_L2C_IDX_MASK;
return idx;
}
int cvmx_l2c_get_cache_size_bytes(void)
{
return cvmx_l2c_get_num_sets() * cvmx_l2c_get_num_assoc() *
CVMX_CACHE_LINE_SIZE;
}
/**
* Return log base 2 of the number of sets in the L2 cache
* Returns
*/
int cvmx_l2c_get_set_bits(void)
{
int l2_set_bits;
if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
l2_set_bits = 11; /* 2048 sets */
else if (OCTEON_IS_MODEL(OCTEON_CN38XX))
l2_set_bits = 10; /* 1024 sets */
else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
|| OCTEON_IS_MODEL(OCTEON_CN52XX))
l2_set_bits = 9; /* 512 sets */
else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
l2_set_bits = 8; /* 256 sets */
else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
l2_set_bits = 7; /* 128 sets */
else {
cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
l2_set_bits = 11; /* 2048 sets */
}
return l2_set_bits;
}
/* Return the number of sets in the L2 Cache */
int cvmx_l2c_get_num_sets(void)
{
return 1 << cvmx_l2c_get_set_bits();
}
/* Return the number of associations in the L2 Cache */
int cvmx_l2c_get_num_assoc(void)
{
int l2_assoc;
if (OCTEON_IS_MODEL(OCTEON_CN56XX) ||
OCTEON_IS_MODEL(OCTEON_CN52XX) ||
OCTEON_IS_MODEL(OCTEON_CN58XX) ||
OCTEON_IS_MODEL(OCTEON_CN50XX) || OCTEON_IS_MODEL(OCTEON_CN38XX))
l2_assoc = 8;
else if (OCTEON_IS_MODEL(OCTEON_CN31XX) ||
OCTEON_IS_MODEL(OCTEON_CN30XX))
l2_assoc = 4;
else {
cvmx_dprintf("Unsupported OCTEON Model in %s\n", __func__);
l2_assoc = 8;
}
/* Check to see if part of the cache is disabled */
if (cvmx_fuse_read(265))
l2_assoc = l2_assoc >> 2;
else if (cvmx_fuse_read(264))
l2_assoc = l2_assoc >> 1;
return l2_assoc;
}
/**
* Flush a line from the L2 cache
* This should only be called from one core at a time, as this routine
* sets the core to the 'debug' core in order to flush the line.
*
* @assoc: Association (or way) to flush
* @index: Index to flush
*/
void cvmx_l2c_flush_line(uint32_t assoc, uint32_t index)
{
union cvmx_l2c_dbg l2cdbg;
l2cdbg.u64 = 0;
l2cdbg.s.ppnum = cvmx_get_core_num();
l2cdbg.s.finv = 1;
l2cdbg.s.set = assoc;
/*
* Enter debug mode, and make sure all other writes complete
* before we enter debug mode.
*/
asm volatile ("sync" : : : "memory");
cvmx_write_csr(CVMX_L2C_DBG, l2cdbg.u64);
cvmx_read_csr(CVMX_L2C_DBG);
CVMX_PREPARE_FOR_STORE(((1ULL << 63) + (index) * 128), 0);
/* Exit debug mode */
asm volatile ("sync" : : : "memory");
cvmx_write_csr(CVMX_L2C_DBG, 0);
cvmx_read_csr(CVMX_L2C_DBG);
}

View File

@ -0,0 +1,116 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* This module provides system/board/application information obtained
* by the bootloader.
*/
#include <asm/octeon/cvmx.h>
#include <asm/octeon/cvmx-spinlock.h>
#include <asm/octeon/cvmx-sysinfo.h>
/**
* This structure defines the private state maintained by sysinfo module.
*
*/
static struct {
struct cvmx_sysinfo sysinfo; /* system information */
cvmx_spinlock_t lock; /* mutex spinlock */
} state = {
.lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER
};
/*
* Global variables that define the min/max of the memory region set
* up for 32 bit userspace access.
*/
uint64_t linux_mem32_min;
uint64_t linux_mem32_max;
uint64_t linux_mem32_wired;
uint64_t linux_mem32_offset;
/**
* This function returns the application information as obtained
* by the bootloader. This provides the core mask of the cores
* running the same application image, as well as the physical
* memory regions available to the core.
*
* Returns Pointer to the boot information structure
*
*/
struct cvmx_sysinfo *cvmx_sysinfo_get(void)
{
return &(state.sysinfo);
}
/**
* This function is used in non-simple executive environments (such as
* Linux kernel, u-boot, etc.) to configure the minimal fields that
* are required to use simple executive files directly.
*
* Locking (if required) must be handled outside of this
* function
*
* @phy_mem_desc_ptr:
* Pointer to global physical memory descriptor
* (bootmem descriptor) @board_type: Octeon board
* type enumeration
*
* @board_rev_major:
* Board major revision
* @board_rev_minor:
* Board minor revision
* @cpu_clock_hz:
* CPU clock freqency in hertz
*
* Returns 0: Failure
* 1: success
*/
int cvmx_sysinfo_minimal_initialize(void *phy_mem_desc_ptr,
uint16_t board_type,
uint8_t board_rev_major,
uint8_t board_rev_minor,
uint32_t cpu_clock_hz)
{
/* The sysinfo structure was already initialized */
if (state.sysinfo.board_type)
return 0;
memset(&(state.sysinfo), 0x0, sizeof(state.sysinfo));
state.sysinfo.phy_mem_desc_ptr = phy_mem_desc_ptr;
state.sysinfo.board_type = board_type;
state.sysinfo.board_rev_major = board_rev_major;
state.sysinfo.board_rev_minor = board_rev_minor;
state.sysinfo.cpu_clock_hz = cpu_clock_hz;
return 1;
}

View File

@ -0,0 +1,358 @@
/***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* File defining functions for working with different Octeon
* models.
*/
#include <asm/octeon/octeon.h>
/**
* Given the chip processor ID from COP0, this function returns a
* string representing the chip model number. The string is of the
* form CNXXXXpX.X-FREQ-SUFFIX.
* - XXXX = The chip model number
* - X.X = Chip pass number
* - FREQ = Current frequency in Mhz
* - SUFFIX = NSP, EXP, SCP, SSP, or CP
*
* @chip_id: Chip ID
*
* Returns Model string
*/
const char *octeon_model_get_string(uint32_t chip_id)
{
static char buffer[32];
return octeon_model_get_string_buffer(chip_id, buffer);
}
/*
* Version of octeon_model_get_string() that takes buffer as argument,
* as running early in u-boot static/global variables don't work when
* running from flash.
*/
const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer)
{
const char *family;
const char *core_model;
char pass[4];
int clock_mhz;
const char *suffix;
union cvmx_l2d_fus3 fus3;
int num_cores;
union cvmx_mio_fus_dat2 fus_dat2;
union cvmx_mio_fus_dat3 fus_dat3;
char fuse_model[10];
uint32_t fuse_data = 0;
fus3.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
fus_dat2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
num_cores = cvmx_octeon_num_cores();
/* Make sure the non existant devices look disabled */
switch ((chip_id >> 8) & 0xff) {
case 6: /* CN50XX */
case 2: /* CN30XX */
fus_dat3.s.nodfa_dte = 1;
fus_dat3.s.nozip = 1;
break;
case 4: /* CN57XX or CN56XX */
fus_dat3.s.nodfa_dte = 1;
break;
default:
break;
}
/* Make a guess at the suffix */
/* NSP = everything */
/* EXP = No crypto */
/* SCP = No DFA, No zip */
/* CP = No DFA, No crypto, No zip */
if (fus_dat3.s.nodfa_dte) {
if (fus_dat2.s.nocrypto)
suffix = "CP";
else
suffix = "SCP";
} else if (fus_dat2.s.nocrypto)
suffix = "EXP";
else
suffix = "NSP";
/*
* Assume pass number is encoded using <5:3><2:0>. Exceptions
* will be fixed later.
*/
sprintf(pass, "%u.%u", ((chip_id >> 3) & 7) + 1, chip_id & 7);
/*
* Use the number of cores to determine the last 2 digits of
* the model number. There are some exceptions that are fixed
* later.
*/
switch (num_cores) {
case 16:
core_model = "60";
break;
case 15:
core_model = "58";
break;
case 14:
core_model = "55";
break;
case 13:
core_model = "52";
break;
case 12:
core_model = "50";
break;
case 11:
core_model = "48";
break;
case 10:
core_model = "45";
break;
case 9:
core_model = "42";
break;
case 8:
core_model = "40";
break;
case 7:
core_model = "38";
break;
case 6:
core_model = "34";
break;
case 5:
core_model = "32";
break;
case 4:
core_model = "30";
break;
case 3:
core_model = "25";
break;
case 2:
core_model = "20";
break;
case 1:
core_model = "10";
break;
default:
core_model = "XX";
break;
}
/* Now figure out the family, the first two digits */
switch ((chip_id >> 8) & 0xff) {
case 0: /* CN38XX, CN37XX or CN36XX */
if (fus3.cn38xx.crip_512k) {
/*
* For some unknown reason, the 16 core one is
* called 37 instead of 36.
*/
if (num_cores >= 16)
family = "37";
else
family = "36";
} else
family = "38";
/*
* This series of chips didn't follow the standard
* pass numbering.
*/
switch (chip_id & 0xf) {
case 0:
strcpy(pass, "1.X");
break;
case 1:
strcpy(pass, "2.X");
break;
case 3:
strcpy(pass, "3.X");
break;
default:
strcpy(pass, "X.X");
break;
}
break;
case 1: /* CN31XX or CN3020 */
if ((chip_id & 0x10) || fus3.cn31xx.crip_128k)
family = "30";
else
family = "31";
/*
* This series of chips didn't follow the standard
* pass numbering.
*/
switch (chip_id & 0xf) {
case 0:
strcpy(pass, "1.0");
break;
case 2:
strcpy(pass, "1.1");
break;
default:
strcpy(pass, "X.X");
break;
}
break;
case 2: /* CN3010 or CN3005 */
family = "30";
/* A chip with half cache is an 05 */
if (fus3.cn30xx.crip_64k)
core_model = "05";
/*
* This series of chips didn't follow the standard
* pass numbering.
*/
switch (chip_id & 0xf) {
case 0:
strcpy(pass, "1.0");
break;
case 2:
strcpy(pass, "1.1");
break;
default:
strcpy(pass, "X.X");
break;
}
break;
case 3: /* CN58XX */
family = "58";
/* Special case. 4 core, no crypto */
if ((num_cores == 4) && fus_dat2.cn38xx.nocrypto)
core_model = "29";
/* Pass 1 uses different encodings for pass numbers */
if ((chip_id & 0xFF) < 0x8) {
switch (chip_id & 0x3) {
case 0:
strcpy(pass, "1.0");
break;
case 1:
strcpy(pass, "1.1");
break;
case 3:
strcpy(pass, "1.2");
break;
default:
strcpy(pass, "1.X");
break;
}
}
break;
case 4: /* CN57XX, CN56XX, CN55XX, CN54XX */
if (fus_dat2.cn56xx.raid_en) {
if (fus3.cn56xx.crip_1024k)
family = "55";
else
family = "57";
if (fus_dat2.cn56xx.nocrypto)
suffix = "SP";
else
suffix = "SSP";
} else {
if (fus_dat2.cn56xx.nocrypto)
suffix = "CP";
else {
suffix = "NSP";
if (fus_dat3.s.nozip)
suffix = "SCP";
}
if (fus3.cn56xx.crip_1024k)
family = "54";
else
family = "56";
}
break;
case 6: /* CN50XX */
family = "50";
break;
case 7: /* CN52XX */
if (fus3.cn52xx.crip_256k)
family = "51";
else
family = "52";
break;
default:
family = "XX";
core_model = "XX";
strcpy(pass, "X.X");
suffix = "XXX";
break;
}
clock_mhz = octeon_get_clock_rate() / 1000000;
if (family[0] != '3') {
/* Check for model in fuses, overrides normal decode */
/* This is _not_ valid for Octeon CN3XXX models */
fuse_data |= cvmx_fuse_read_byte(51);
fuse_data = fuse_data << 8;
fuse_data |= cvmx_fuse_read_byte(50);
fuse_data = fuse_data << 8;
fuse_data |= cvmx_fuse_read_byte(49);
fuse_data = fuse_data << 8;
fuse_data |= cvmx_fuse_read_byte(48);
if (fuse_data & 0x7ffff) {
int model = fuse_data & 0x3fff;
int suffix = (fuse_data >> 14) & 0x1f;
if (suffix && model) {
/*
* Have both number and suffix in
* fuses, so both
*/
sprintf(fuse_model, "%d%c",
model, 'A' + suffix - 1);
core_model = "";
family = fuse_model;
} else if (suffix && !model) {
/*
* Only have suffix, so add suffix to
* 'normal' model number.
*/
sprintf(fuse_model, "%s%c", core_model,
'A' + suffix - 1);
core_model = fuse_model;
} else {
/*
* Don't have suffix, so just use
* model from fuses.
*/
sprintf(fuse_model, "%d", model);
core_model = "";
family = fuse_model;
}
}
}
sprintf(buffer, "CN%s%sp%s-%d-%s",
family, core_model, pass, clock_mhz, suffix);
return buffer;
}

View File

@ -0,0 +1,84 @@
/*
* Octeon Bootbus flash setup
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007, 2008 Cavium Networks
*/
#include <linux/kernel.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/map.h>
#include <linux/mtd/partitions.h>
#include <asm/octeon/octeon.h>
static struct map_info flash_map;
static struct mtd_info *mymtd;
#ifdef CONFIG_MTD_PARTITIONS
static int nr_parts;
static struct mtd_partition *parts;
static const char *part_probe_types[] = {
"cmdlinepart",
#ifdef CONFIG_MTD_REDBOOT_PARTS
"RedBoot",
#endif
NULL
};
#endif
/**
* Module/ driver initialization.
*
* Returns Zero on success
*/
static int __init flash_init(void)
{
/*
* Read the bootbus region 0 setup to determine the base
* address of the flash.
*/
union cvmx_mio_boot_reg_cfgx region_cfg;
region_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(0));
if (region_cfg.s.en) {
/*
* The bootloader always takes the flash and sets its
* address so the entire flash fits below
* 0x1fc00000. This way the flash aliases to
* 0x1fc00000 for booting. Software can access the
* full flash at the true address, while core boot can
* access 4MB.
*/
/* Use this name so old part lines work */
flash_map.name = "phys_mapped_flash";
flash_map.phys = region_cfg.s.base << 16;
flash_map.size = 0x1fc00000 - flash_map.phys;
flash_map.bankwidth = 1;
flash_map.virt = ioremap(flash_map.phys, flash_map.size);
pr_notice("Bootbus flash: Setting flash for %luMB flash at "
"0x%08lx\n", flash_map.size >> 20, flash_map.phys);
simple_map_init(&flash_map);
mymtd = do_map_probe("cfi_probe", &flash_map);
if (mymtd) {
mymtd->owner = THIS_MODULE;
#ifdef CONFIG_MTD_PARTITIONS
nr_parts = parse_mtd_partitions(mymtd,
part_probe_types,
&parts, 0);
if (nr_parts > 0)
add_mtd_partitions(mymtd, parts, nr_parts);
else
add_mtd_device(mymtd);
#else
add_mtd_device(mymtd);
#endif
} else {
pr_err("Failed to register MTD device for flash\n");
}
}
return 0;
}
late_initcall(flash_init);

View File

@ -0,0 +1,497 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004-2008 Cavium Networks
*/
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/hardirq.h>
#include <asm/octeon/octeon.h>
DEFINE_RWLOCK(octeon_irq_ciu0_rwlock);
DEFINE_RWLOCK(octeon_irq_ciu1_rwlock);
DEFINE_SPINLOCK(octeon_irq_msi_lock);
static void octeon_irq_core_ack(unsigned int irq)
{
unsigned int bit = irq - OCTEON_IRQ_SW0;
/*
* We don't need to disable IRQs to make these atomic since
* they are already disabled earlier in the low level
* interrupt code.
*/
clear_c0_status(0x100 << bit);
/* The two user interrupts must be cleared manually. */
if (bit < 2)
clear_c0_cause(0x100 << bit);
}
static void octeon_irq_core_eoi(unsigned int irq)
{
irq_desc_t *desc = irq_desc + irq;
unsigned int bit = irq - OCTEON_IRQ_SW0;
/*
* If an IRQ is being processed while we are disabling it the
* handler will attempt to unmask the interrupt after it has
* been disabled.
*/
if (desc->status & IRQ_DISABLED)
return;
/* There is a race here. We should fix it. */
/*
* We don't need to disable IRQs to make these atomic since
* they are already disabled earlier in the low level
* interrupt code.
*/
set_c0_status(0x100 << bit);
}
static void octeon_irq_core_enable(unsigned int irq)
{
unsigned long flags;
unsigned int bit = irq - OCTEON_IRQ_SW0;
/*
* We need to disable interrupts to make sure our updates are
* atomic.
*/
local_irq_save(flags);
set_c0_status(0x100 << bit);
local_irq_restore(flags);
}
static void octeon_irq_core_disable_local(unsigned int irq)
{
unsigned long flags;
unsigned int bit = irq - OCTEON_IRQ_SW0;
/*
* We need to disable interrupts to make sure our updates are
* atomic.
*/
local_irq_save(flags);
clear_c0_status(0x100 << bit);
local_irq_restore(flags);
}
static void octeon_irq_core_disable(unsigned int irq)
{
#ifdef CONFIG_SMP
on_each_cpu((void (*)(void *)) octeon_irq_core_disable_local,
(void *) (long) irq, 1);
#else
octeon_irq_core_disable_local(irq);
#endif
}
static struct irq_chip octeon_irq_chip_core = {
.name = "Core",
.enable = octeon_irq_core_enable,
.disable = octeon_irq_core_disable,
.ack = octeon_irq_core_ack,
.eoi = octeon_irq_core_eoi,
};
static void octeon_irq_ciu0_ack(unsigned int irq)
{
/*
* In order to avoid any locking accessing the CIU, we
* acknowledge CIU interrupts by disabling all of them. This
* way we can use a per core register and avoid any out of
* core locking requirements. This has the side affect that
* CIU interrupts can't be processed recursively.
*
* We don't need to disable IRQs to make these atomic since
* they are already disabled earlier in the low level
* interrupt code.
*/
clear_c0_status(0x100 << 2);
}
static void octeon_irq_ciu0_eoi(unsigned int irq)
{
/*
* Enable all CIU interrupts again. We don't need to disable
* IRQs to make these atomic since they are already disabled
* earlier in the low level interrupt code.
*/
set_c0_status(0x100 << 2);
}
static void octeon_irq_ciu0_enable(unsigned int irq)
{
int coreid = cvmx_get_core_num();
unsigned long flags;
uint64_t en0;
int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
/*
* A read lock is used here to make sure only one core is ever
* updating the CIU enable bits at a time. During an enable
* the cores don't interfere with each other. During a disable
* the write lock stops any enables that might cause a
* problem.
*/
read_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
en0 |= 1ull << bit;
cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
read_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
}
static void octeon_irq_ciu0_disable(unsigned int irq)
{
int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
unsigned long flags;
uint64_t en0;
#ifdef CONFIG_SMP
int cpu;
write_lock_irqsave(&octeon_irq_ciu0_rwlock, flags);
for_each_online_cpu(cpu) {
int coreid = cpu_logical_map(cpu);
en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
en0 &= ~(1ull << bit);
cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
}
/*
* We need to do a read after the last update to make sure all
* of them are done.
*/
cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
write_unlock_irqrestore(&octeon_irq_ciu0_rwlock, flags);
#else
int coreid = cvmx_get_core_num();
local_irq_save(flags);
en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
en0 &= ~(1ull << bit);
cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
local_irq_restore(flags);
#endif
}
#ifdef CONFIG_SMP
static void octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
{
int cpu;
int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
write_lock(&octeon_irq_ciu0_rwlock);
for_each_online_cpu(cpu) {
int coreid = cpu_logical_map(cpu);
uint64_t en0 =
cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
if (cpumask_test_cpu(cpu, dest))
en0 |= 1ull << bit;
else
en0 &= ~(1ull << bit);
cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
}
/*
* We need to do a read after the last update to make sure all
* of them are done.
*/
cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
write_unlock(&octeon_irq_ciu0_rwlock);
}
#endif
static struct irq_chip octeon_irq_chip_ciu0 = {
.name = "CIU0",
.enable = octeon_irq_ciu0_enable,
.disable = octeon_irq_ciu0_disable,
.ack = octeon_irq_ciu0_ack,
.eoi = octeon_irq_ciu0_eoi,
#ifdef CONFIG_SMP
.set_affinity = octeon_irq_ciu0_set_affinity,
#endif
};
static void octeon_irq_ciu1_ack(unsigned int irq)
{
/*
* In order to avoid any locking accessing the CIU, we
* acknowledge CIU interrupts by disabling all of them. This
* way we can use a per core register and avoid any out of
* core locking requirements. This has the side affect that
* CIU interrupts can't be processed recursively. We don't
* need to disable IRQs to make these atomic since they are
* already disabled earlier in the low level interrupt code.
*/
clear_c0_status(0x100 << 3);
}
static void octeon_irq_ciu1_eoi(unsigned int irq)
{
/*
* Enable all CIU interrupts again. We don't need to disable
* IRQs to make these atomic since they are already disabled
* earlier in the low level interrupt code.
*/
set_c0_status(0x100 << 3);
}
static void octeon_irq_ciu1_enable(unsigned int irq)
{
int coreid = cvmx_get_core_num();
unsigned long flags;
uint64_t en1;
int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
/*
* A read lock is used here to make sure only one core is ever
* updating the CIU enable bits at a time. During an enable
* the cores don't interfere with each other. During a disable
* the write lock stops any enables that might cause a
* problem.
*/
read_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
en1 |= 1ull << bit;
cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
read_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
}
static void octeon_irq_ciu1_disable(unsigned int irq)
{
int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
unsigned long flags;
uint64_t en1;
#ifdef CONFIG_SMP
int cpu;
write_lock_irqsave(&octeon_irq_ciu1_rwlock, flags);
for_each_online_cpu(cpu) {
int coreid = cpu_logical_map(cpu);
en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
en1 &= ~(1ull << bit);
cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
}
/*
* We need to do a read after the last update to make sure all
* of them are done.
*/
cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
write_unlock_irqrestore(&octeon_irq_ciu1_rwlock, flags);
#else
int coreid = cvmx_get_core_num();
local_irq_save(flags);
en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
en1 &= ~(1ull << bit);
cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
local_irq_restore(flags);
#endif
}
#ifdef CONFIG_SMP
static void octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask *dest)
{
int cpu;
int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
write_lock(&octeon_irq_ciu1_rwlock);
for_each_online_cpu(cpu) {
int coreid = cpu_logical_map(cpu);
uint64_t en1 =
cvmx_read_csr(CVMX_CIU_INTX_EN1
(coreid * 2 + 1));
if (cpumask_test_cpu(cpu, dest))
en1 |= 1ull << bit;
else
en1 &= ~(1ull << bit);
cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
}
/*
* We need to do a read after the last update to make sure all
* of them are done.
*/
cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
write_unlock(&octeon_irq_ciu1_rwlock);
}
#endif
static struct irq_chip octeon_irq_chip_ciu1 = {
.name = "CIU1",
.enable = octeon_irq_ciu1_enable,
.disable = octeon_irq_ciu1_disable,
.ack = octeon_irq_ciu1_ack,
.eoi = octeon_irq_ciu1_eoi,
#ifdef CONFIG_SMP
.set_affinity = octeon_irq_ciu1_set_affinity,
#endif
};
#ifdef CONFIG_PCI_MSI
static void octeon_irq_msi_ack(unsigned int irq)
{
if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
/* These chips have PCI */
cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV,
1ull << (irq - OCTEON_IRQ_MSI_BIT0));
} else {
/*
* These chips have PCIe. Thankfully the ACK doesn't
* need any locking.
*/
cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0,
1ull << (irq - OCTEON_IRQ_MSI_BIT0));
}
}
static void octeon_irq_msi_eoi(unsigned int irq)
{
/* Nothing needed */
}
static void octeon_irq_msi_enable(unsigned int irq)
{
if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
/*
* Octeon PCI doesn't have the ability to mask/unmask
* MSI interrupts individually. Instead of
* masking/unmasking them in groups of 16, we simple
* assume MSI devices are well behaved. MSI
* interrupts are always enable and the ACK is assumed
* to be enough.
*/
} else {
/* These chips have PCIe. Note that we only support
* the first 64 MSI interrupts. Unfortunately all the
* MSI enables are in the same register. We use
* MSI0's lock to control access to them all.
*/
uint64_t en;
unsigned long flags;
spin_lock_irqsave(&octeon_irq_msi_lock, flags);
en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0);
cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
}
}
static void octeon_irq_msi_disable(unsigned int irq)
{
if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
/* See comment in enable */
} else {
/*
* These chips have PCIe. Note that we only support
* the first 64 MSI interrupts. Unfortunately all the
* MSI enables are in the same register. We use
* MSI0's lock to control access to them all.
*/
uint64_t en;
unsigned long flags;
spin_lock_irqsave(&octeon_irq_msi_lock, flags);
en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0));
cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
}
}
static struct irq_chip octeon_irq_chip_msi = {
.name = "MSI",
.enable = octeon_irq_msi_enable,
.disable = octeon_irq_msi_disable,
.ack = octeon_irq_msi_ack,
.eoi = octeon_irq_msi_eoi,
};
#endif
void __init arch_init_irq(void)
{
int irq;
#ifdef CONFIG_SMP
/* Set the default affinity to the boot cpu. */
cpumask_clear(irq_default_affinity);
cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
#endif
if (NR_IRQS < OCTEON_IRQ_LAST)
pr_err("octeon_irq_init: NR_IRQS is set too low\n");
/* 0 - 15 reserved for i8259 master and slave controller. */
/* 17 - 23 Mips internal */
for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) {
set_irq_chip_and_handler(irq, &octeon_irq_chip_core,
handle_percpu_irq);
}
/* 24 - 87 CIU_INT_SUM0 */
for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu0,
handle_percpu_irq);
}
/* 88 - 151 CIU_INT_SUM1 */
for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) {
set_irq_chip_and_handler(irq, &octeon_irq_chip_ciu1,
handle_percpu_irq);
}
#ifdef CONFIG_PCI_MSI
/* 152 - 215 PCI/PCIe MSI interrupts */
for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) {
set_irq_chip_and_handler(irq, &octeon_irq_chip_msi,
handle_percpu_irq);
}
#endif
set_c0_status(0x300 << 2);
}
asmlinkage void plat_irq_dispatch(void)
{
const unsigned long core_id = cvmx_get_core_num();
const uint64_t ciu_sum0_address = CVMX_CIU_INTX_SUM0(core_id * 2);
const uint64_t ciu_en0_address = CVMX_CIU_INTX_EN0(core_id * 2);
const uint64_t ciu_sum1_address = CVMX_CIU_INT_SUM1;
const uint64_t ciu_en1_address = CVMX_CIU_INTX_EN1(core_id * 2 + 1);
unsigned long cop0_cause;
unsigned long cop0_status;
uint64_t ciu_en;
uint64_t ciu_sum;
while (1) {
cop0_cause = read_c0_cause();
cop0_status = read_c0_status();
cop0_cause &= cop0_status;
cop0_cause &= ST0_IM;
if (unlikely(cop0_cause & STATUSF_IP2)) {
ciu_sum = cvmx_read_csr(ciu_sum0_address);
ciu_en = cvmx_read_csr(ciu_en0_address);
ciu_sum &= ciu_en;
if (likely(ciu_sum))
do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1);
else
spurious_interrupt();
} else if (unlikely(cop0_cause & STATUSF_IP3)) {
ciu_sum = cvmx_read_csr(ciu_sum1_address);
ciu_en = cvmx_read_csr(ciu_en1_address);
ciu_sum &= ciu_en;
if (likely(ciu_sum))
do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1);
else
spurious_interrupt();
} else if (likely(cop0_cause)) {
do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
} else {
break;
}
}
}

View File

@ -0,0 +1,521 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Unified implementation of memcpy, memmove and the __copy_user backend.
*
* Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org)
* Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc.
* Copyright (C) 2002 Broadcom, Inc.
* memcpy/copy_user author: Mark Vandevoorde
*
* Mnemonic names for arguments to memcpy/__copy_user
*/
#include <asm/asm.h>
#include <asm/asm-offsets.h>
#include <asm/regdef.h>
#define dst a0
#define src a1
#define len a2
/*
* Spec
*
* memcpy copies len bytes from src to dst and sets v0 to dst.
* It assumes that
* - src and dst don't overlap
* - src is readable
* - dst is writable
* memcpy uses the standard calling convention
*
* __copy_user copies up to len bytes from src to dst and sets a2 (len) to
* the number of uncopied bytes due to an exception caused by a read or write.
* __copy_user assumes that src and dst don't overlap, and that the call is
* implementing one of the following:
* copy_to_user
* - src is readable (no exceptions when reading src)
* copy_from_user
* - dst is writable (no exceptions when writing dst)
* __copy_user uses a non-standard calling convention; see
* arch/mips/include/asm/uaccess.h
*
* When an exception happens on a load, the handler must
# ensure that all of the destination buffer is overwritten to prevent
* leaking information to user mode programs.
*/
/*
* Implementation
*/
/*
* The exception handler for loads requires that:
* 1- AT contain the address of the byte just past the end of the source
* of the copy,
* 2- src_entry <= src < AT, and
* 3- (dst - src) == (dst_entry - src_entry),
* The _entry suffix denotes values when __copy_user was called.
*
* (1) is set up up by uaccess.h and maintained by not writing AT in copy_user
* (2) is met by incrementing src by the number of bytes copied
* (3) is met by not doing loads between a pair of increments of dst and src
*
* The exception handlers for stores adjust len (if necessary) and return.
* These handlers do not need to overwrite any data.
*
* For __rmemcpy and memmove an exception is always a kernel bug, therefore
* they're not protected.
*/
#define EXC(inst_reg,addr,handler) \
9: inst_reg, addr; \
.section __ex_table,"a"; \
PTR 9b, handler; \
.previous
/*
* Only on the 64-bit kernel we can made use of 64-bit registers.
*/
#ifdef CONFIG_64BIT
#define USE_DOUBLE
#endif
#ifdef USE_DOUBLE
#define LOAD ld
#define LOADL ldl
#define LOADR ldr
#define STOREL sdl
#define STORER sdr
#define STORE sd
#define ADD daddu
#define SUB dsubu
#define SRL dsrl
#define SRA dsra
#define SLL dsll
#define SLLV dsllv
#define SRLV dsrlv
#define NBYTES 8
#define LOG_NBYTES 3
/*
* As we are sharing code base with the mips32 tree (which use the o32 ABI
* register definitions). We need to redefine the register definitions from
* the n64 ABI register naming to the o32 ABI register naming.
*/
#undef t0
#undef t1
#undef t2
#undef t3
#define t0 $8
#define t1 $9
#define t2 $10
#define t3 $11
#define t4 $12
#define t5 $13
#define t6 $14
#define t7 $15
#else
#define LOAD lw
#define LOADL lwl
#define LOADR lwr
#define STOREL swl
#define STORER swr
#define STORE sw
#define ADD addu
#define SUB subu
#define SRL srl
#define SLL sll
#define SRA sra
#define SLLV sllv
#define SRLV srlv
#define NBYTES 4
#define LOG_NBYTES 2
#endif /* USE_DOUBLE */
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define LDFIRST LOADR
#define LDREST LOADL
#define STFIRST STORER
#define STREST STOREL
#define SHIFT_DISCARD SLLV
#else
#define LDFIRST LOADL
#define LDREST LOADR
#define STFIRST STOREL
#define STREST STORER
#define SHIFT_DISCARD SRLV
#endif
#define FIRST(unit) ((unit)*NBYTES)
#define REST(unit) (FIRST(unit)+NBYTES-1)
#define UNIT(unit) FIRST(unit)
#define ADDRMASK (NBYTES-1)
.text
.set noreorder
.set noat
/*
* A combined memcpy/__copy_user
* __copy_user sets len to 0 for success; else to an upper bound of
* the number of uncopied bytes.
* memcpy sets v0 to dst.
*/
.align 5
LEAF(memcpy) /* a0=dst a1=src a2=len */
move v0, dst /* return value */
__memcpy:
FEXPORT(__copy_user)
/*
* Note: dst & src may be unaligned, len may be 0
* Temps
*/
#
# Octeon doesn't care if the destination is unaligned. The hardware
# can fix it faster than we can special case the assembly.
#
pref 0, 0(src)
sltu t0, len, NBYTES # Check if < 1 word
bnez t0, copy_bytes_checklen
and t0, src, ADDRMASK # Check if src unaligned
bnez t0, src_unaligned
sltu t0, len, 4*NBYTES # Check if < 4 words
bnez t0, less_than_4units
sltu t0, len, 8*NBYTES # Check if < 8 words
bnez t0, less_than_8units
sltu t0, len, 16*NBYTES # Check if < 16 words
bnez t0, cleanup_both_aligned
sltu t0, len, 128+1 # Check if len < 129
bnez t0, 1f # Skip prefetch if len is too short
sltu t0, len, 256+1 # Check if len < 257
bnez t0, 1f # Skip prefetch if len is too short
pref 0, 128(src) # We must not prefetch invalid addresses
#
# This is where we loop if there is more than 128 bytes left
2: pref 0, 256(src) # We must not prefetch invalid addresses
#
# This is where we loop if we can't prefetch anymore
1:
EXC( LOAD t0, UNIT(0)(src), l_exc)
EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
SUB len, len, 16*NBYTES
EXC( STORE t0, UNIT(0)(dst), s_exc_p16u)
EXC( STORE t1, UNIT(1)(dst), s_exc_p15u)
EXC( STORE t2, UNIT(2)(dst), s_exc_p14u)
EXC( STORE t3, UNIT(3)(dst), s_exc_p13u)
EXC( LOAD t0, UNIT(4)(src), l_exc_copy)
EXC( LOAD t1, UNIT(5)(src), l_exc_copy)
EXC( LOAD t2, UNIT(6)(src), l_exc_copy)
EXC( LOAD t3, UNIT(7)(src), l_exc_copy)
EXC( STORE t0, UNIT(4)(dst), s_exc_p12u)
EXC( STORE t1, UNIT(5)(dst), s_exc_p11u)
EXC( STORE t2, UNIT(6)(dst), s_exc_p10u)
ADD src, src, 16*NBYTES
EXC( STORE t3, UNIT(7)(dst), s_exc_p9u)
ADD dst, dst, 16*NBYTES
EXC( LOAD t0, UNIT(-8)(src), l_exc_copy)
EXC( LOAD t1, UNIT(-7)(src), l_exc_copy)
EXC( LOAD t2, UNIT(-6)(src), l_exc_copy)
EXC( LOAD t3, UNIT(-5)(src), l_exc_copy)
EXC( STORE t0, UNIT(-8)(dst), s_exc_p8u)
EXC( STORE t1, UNIT(-7)(dst), s_exc_p7u)
EXC( STORE t2, UNIT(-6)(dst), s_exc_p6u)
EXC( STORE t3, UNIT(-5)(dst), s_exc_p5u)
EXC( LOAD t0, UNIT(-4)(src), l_exc_copy)
EXC( LOAD t1, UNIT(-3)(src), l_exc_copy)
EXC( LOAD t2, UNIT(-2)(src), l_exc_copy)
EXC( LOAD t3, UNIT(-1)(src), l_exc_copy)
EXC( STORE t0, UNIT(-4)(dst), s_exc_p4u)
EXC( STORE t1, UNIT(-3)(dst), s_exc_p3u)
EXC( STORE t2, UNIT(-2)(dst), s_exc_p2u)
EXC( STORE t3, UNIT(-1)(dst), s_exc_p1u)
sltu t0, len, 256+1 # See if we can prefetch more
beqz t0, 2b
sltu t0, len, 128 # See if we can loop more time
beqz t0, 1b
nop
#
# Jump here if there are less than 16*NBYTES left.
#
cleanup_both_aligned:
beqz len, done
sltu t0, len, 8*NBYTES
bnez t0, less_than_8units
nop
EXC( LOAD t0, UNIT(0)(src), l_exc)
EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
SUB len, len, 8*NBYTES
EXC( STORE t0, UNIT(0)(dst), s_exc_p8u)
EXC( STORE t1, UNIT(1)(dst), s_exc_p7u)
EXC( STORE t2, UNIT(2)(dst), s_exc_p6u)
EXC( STORE t3, UNIT(3)(dst), s_exc_p5u)
EXC( LOAD t0, UNIT(4)(src), l_exc_copy)
EXC( LOAD t1, UNIT(5)(src), l_exc_copy)
EXC( LOAD t2, UNIT(6)(src), l_exc_copy)
EXC( LOAD t3, UNIT(7)(src), l_exc_copy)
EXC( STORE t0, UNIT(4)(dst), s_exc_p4u)
EXC( STORE t1, UNIT(5)(dst), s_exc_p3u)
EXC( STORE t2, UNIT(6)(dst), s_exc_p2u)
EXC( STORE t3, UNIT(7)(dst), s_exc_p1u)
ADD src, src, 8*NBYTES
beqz len, done
ADD dst, dst, 8*NBYTES
#
# Jump here if there are less than 8*NBYTES left.
#
less_than_8units:
sltu t0, len, 4*NBYTES
bnez t0, less_than_4units
nop
EXC( LOAD t0, UNIT(0)(src), l_exc)
EXC( LOAD t1, UNIT(1)(src), l_exc_copy)
EXC( LOAD t2, UNIT(2)(src), l_exc_copy)
EXC( LOAD t3, UNIT(3)(src), l_exc_copy)
SUB len, len, 4*NBYTES
EXC( STORE t0, UNIT(0)(dst), s_exc_p4u)
EXC( STORE t1, UNIT(1)(dst), s_exc_p3u)
EXC( STORE t2, UNIT(2)(dst), s_exc_p2u)
EXC( STORE t3, UNIT(3)(dst), s_exc_p1u)
ADD src, src, 4*NBYTES
beqz len, done
ADD dst, dst, 4*NBYTES
#
# Jump here if there are less than 4*NBYTES left. This means
# we may need to copy up to 3 NBYTES words.
#
less_than_4units:
sltu t0, len, 1*NBYTES
bnez t0, copy_bytes_checklen
nop
#
# 1) Copy NBYTES, then check length again
#
EXC( LOAD t0, 0(src), l_exc)
SUB len, len, NBYTES
sltu t1, len, 8
EXC( STORE t0, 0(dst), s_exc_p1u)
ADD src, src, NBYTES
bnez t1, copy_bytes_checklen
ADD dst, dst, NBYTES
#
# 2) Copy NBYTES, then check length again
#
EXC( LOAD t0, 0(src), l_exc)
SUB len, len, NBYTES
sltu t1, len, 8
EXC( STORE t0, 0(dst), s_exc_p1u)
ADD src, src, NBYTES
bnez t1, copy_bytes_checklen
ADD dst, dst, NBYTES
#
# 3) Copy NBYTES, then check length again
#
EXC( LOAD t0, 0(src), l_exc)
SUB len, len, NBYTES
ADD src, src, NBYTES
ADD dst, dst, NBYTES
b copy_bytes_checklen
EXC( STORE t0, -8(dst), s_exc_p1u)
src_unaligned:
#define rem t8
SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
beqz t0, cleanup_src_unaligned
and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
1:
/*
* Avoid consecutive LD*'s to the same register since some mips
* implementations can't issue them in the same cycle.
* It's OK to load FIRST(N+1) before REST(N) because the two addresses
* are to the same unit (unless src is aligned, but it's not).
*/
EXC( LDFIRST t0, FIRST(0)(src), l_exc)
EXC( LDFIRST t1, FIRST(1)(src), l_exc_copy)
SUB len, len, 4*NBYTES
EXC( LDREST t0, REST(0)(src), l_exc_copy)
EXC( LDREST t1, REST(1)(src), l_exc_copy)
EXC( LDFIRST t2, FIRST(2)(src), l_exc_copy)
EXC( LDFIRST t3, FIRST(3)(src), l_exc_copy)
EXC( LDREST t2, REST(2)(src), l_exc_copy)
EXC( LDREST t3, REST(3)(src), l_exc_copy)
ADD src, src, 4*NBYTES
EXC( STORE t0, UNIT(0)(dst), s_exc_p4u)
EXC( STORE t1, UNIT(1)(dst), s_exc_p3u)
EXC( STORE t2, UNIT(2)(dst), s_exc_p2u)
EXC( STORE t3, UNIT(3)(dst), s_exc_p1u)
bne len, rem, 1b
ADD dst, dst, 4*NBYTES
cleanup_src_unaligned:
beqz len, done
and rem, len, NBYTES-1 # rem = len % NBYTES
beq rem, len, copy_bytes
nop
1:
EXC( LDFIRST t0, FIRST(0)(src), l_exc)
EXC( LDREST t0, REST(0)(src), l_exc_copy)
SUB len, len, NBYTES
EXC( STORE t0, 0(dst), s_exc_p1u)
ADD src, src, NBYTES
bne len, rem, 1b
ADD dst, dst, NBYTES
copy_bytes_checklen:
beqz len, done
nop
copy_bytes:
/* 0 < len < NBYTES */
#define COPY_BYTE(N) \
EXC( lb t0, N(src), l_exc); \
SUB len, len, 1; \
beqz len, done; \
EXC( sb t0, N(dst), s_exc_p1)
COPY_BYTE(0)
COPY_BYTE(1)
#ifdef USE_DOUBLE
COPY_BYTE(2)
COPY_BYTE(3)
COPY_BYTE(4)
COPY_BYTE(5)
#endif
EXC( lb t0, NBYTES-2(src), l_exc)
SUB len, len, 1
jr ra
EXC( sb t0, NBYTES-2(dst), s_exc_p1)
done:
jr ra
nop
END(memcpy)
l_exc_copy:
/*
* Copy bytes from src until faulting load address (or until a
* lb faults)
*
* When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
* may be more than a byte beyond the last address.
* Hence, the lb below may get an exception.
*
* Assumes src < THREAD_BUADDR($28)
*/
LOAD t0, TI_TASK($28)
nop
LOAD t0, THREAD_BUADDR(t0)
1:
EXC( lb t1, 0(src), l_exc)
ADD src, src, 1
sb t1, 0(dst) # can't fault -- we're copy_from_user
bne src, t0, 1b
ADD dst, dst, 1
l_exc:
LOAD t0, TI_TASK($28)
nop
LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
nop
SUB len, AT, t0 # len number of uncopied bytes
/*
* Here's where we rely on src and dst being incremented in tandem,
* See (3) above.
* dst += (fault addr - src) to put dst at first byte to clear
*/
ADD dst, t0 # compute start address in a1
SUB dst, src
/*
* Clear len bytes starting at dst. Can't call __bzero because it
* might modify len. An inefficient loop for these rare times...
*/
beqz len, done
SUB src, len, 1
1: sb zero, 0(dst)
ADD dst, dst, 1
bnez src, 1b
SUB src, src, 1
jr ra
nop
#define SEXC(n) \
s_exc_p ## n ## u: \
jr ra; \
ADD len, len, n*NBYTES
SEXC(16)
SEXC(15)
SEXC(14)
SEXC(13)
SEXC(12)
SEXC(11)
SEXC(10)
SEXC(9)
SEXC(8)
SEXC(7)
SEXC(6)
SEXC(5)
SEXC(4)
SEXC(3)
SEXC(2)
SEXC(1)
s_exc_p1:
jr ra
ADD len, len, 1
s_exc:
jr ra
nop
.align 5
LEAF(memmove)
ADD t0, a0, a2
ADD t1, a1, a2
sltu t0, a1, t0 # dst + len <= src -> memcpy
sltu t1, a0, t1 # dst >= src + len -> memcpy
and t0, t1
beqz t0, __memcpy
move v0, a0 /* return value */
beqz a2, r_out
END(memmove)
/* fall through to __rmemcpy */
LEAF(__rmemcpy) /* a0=dst a1=src a2=len */
sltu t0, a1, a0
beqz t0, r_end_bytes_up # src >= dst
nop
ADD a0, a2 # dst = dst + len
ADD a1, a2 # src = src + len
r_end_bytes:
lb t0, -1(a1)
SUB a2, a2, 0x1
sb t0, -1(a0)
SUB a1, a1, 0x1
bnez a2, r_end_bytes
SUB a0, a0, 0x1
r_out:
jr ra
move a2, zero
r_end_bytes_up:
lb t0, (a1)
SUB a2, a2, 0x1
sb t0, (a0)
ADD a1, a1, 0x1
bnez a2, r_end_bytes_up
ADD a0, a0, 0x1
jr ra
move a2, zero
END(__rmemcpy)

View File

@ -0,0 +1,136 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004-2007 Cavium Networks
*/
#include <linux/console.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/serial.h>
#include <linux/serial_8250.h>
#include <linux/serial_reg.h>
#include <linux/tty.h>
#include <asm/time.h>
#include <asm/octeon/octeon.h>
#ifdef CONFIG_GDB_CONSOLE
#define DEBUG_UART 0
#else
#define DEBUG_UART 1
#endif
unsigned int octeon_serial_in(struct uart_port *up, int offset)
{
int rv = cvmx_read_csr((uint64_t)(up->membase + (offset << 3)));
if (offset == UART_IIR && (rv & 0xf) == 7) {
/* Busy interrupt, read the USR (39) and try again. */
cvmx_read_csr((uint64_t)(up->membase + (39 << 3)));
rv = cvmx_read_csr((uint64_t)(up->membase + (offset << 3)));
}
return rv;
}
void octeon_serial_out(struct uart_port *up, int offset, int value)
{
/*
* If bits 6 or 7 of the OCTEON UART's LCR are set, it quits
* working.
*/
if (offset == UART_LCR)
value &= 0x9f;
cvmx_write_csr((uint64_t)(up->membase + (offset << 3)), (u8)value);
}
/*
* Allocated in .bss, so it is all zeroed.
*/
#define OCTEON_MAX_UARTS 3
static struct plat_serial8250_port octeon_uart8250_data[OCTEON_MAX_UARTS + 1];
static struct platform_device octeon_uart8250_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = octeon_uart8250_data,
},
};
static void __init octeon_uart_set_common(struct plat_serial8250_port *p)
{
p->flags = ASYNC_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE;
p->type = PORT_OCTEON;
p->iotype = UPIO_MEM;
p->regshift = 3; /* I/O addresses are every 8 bytes */
p->uartclk = mips_hpt_frequency;
p->serial_in = octeon_serial_in;
p->serial_out = octeon_serial_out;
}
static int __init octeon_serial_init(void)
{
int enable_uart0;
int enable_uart1;
int enable_uart2;
struct plat_serial8250_port *p;
#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
/*
* If we are configured to run as the second of two kernels,
* disable uart0 and enable uart1. Uart0 is owned by the first
* kernel
*/
enable_uart0 = 0;
enable_uart1 = 1;
#else
/*
* We are configured for the first kernel. We'll enable uart0
* if the bootloader told us to use 0, otherwise will enable
* uart 1.
*/
enable_uart0 = (octeon_get_boot_uart() == 0);
enable_uart1 = (octeon_get_boot_uart() == 1);
#ifdef CONFIG_KGDB
enable_uart1 = 1;
#endif
#endif
/* Right now CN52XX is the only chip with a third uart */
enable_uart2 = OCTEON_IS_MODEL(OCTEON_CN52XX);
p = octeon_uart8250_data;
if (enable_uart0) {
/* Add a ttyS device for hardware uart 0 */
octeon_uart_set_common(p);
p->membase = (void *) CVMX_MIO_UARTX_RBR(0);
p->mapbase = CVMX_MIO_UARTX_RBR(0) & ((1ull << 49) - 1);
p->irq = OCTEON_IRQ_UART0;
p++;
}
if (enable_uart1) {
/* Add a ttyS device for hardware uart 1 */
octeon_uart_set_common(p);
p->membase = (void *) CVMX_MIO_UARTX_RBR(1);
p->mapbase = CVMX_MIO_UARTX_RBR(1) & ((1ull << 49) - 1);
p->irq = OCTEON_IRQ_UART1;
p++;
}
if (enable_uart2) {
/* Add a ttyS device for hardware uart 2 */
octeon_uart_set_common(p);
p->membase = (void *) CVMX_MIO_UART2_RBR;
p->mapbase = CVMX_MIO_UART2_RBR & ((1ull << 49) - 1);
p->irq = OCTEON_IRQ_UART2;
p++;
}
BUG_ON(p > &octeon_uart8250_data[OCTEON_MAX_UARTS]);
return platform_device_register(&octeon_uart8250_device);
}
device_initcall(octeon_serial_init);

View File

@ -0,0 +1,929 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2004-2007 Cavium Networks
* Copyright (C) 2008 Wind River Systems
*/
#include <linux/init.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/serial.h>
#include <linux/types.h>
#include <linux/string.h> /* for memset */
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/time.h>
#include <linux/platform_device.h>
#include <linux/serial_core.h>
#include <linux/serial_8250.h>
#include <linux/string.h>
#include <asm/processor.h>
#include <asm/reboot.h>
#include <asm/smp-ops.h>
#include <asm/system.h>
#include <asm/irq_cpu.h>
#include <asm/mipsregs.h>
#include <asm/bootinfo.h>
#include <asm/sections.h>
#include <asm/time.h>
#include <asm/octeon/octeon.h>
#ifdef CONFIG_CAVIUM_DECODE_RSL
extern void cvmx_interrupt_rsl_decode(void);
extern int __cvmx_interrupt_ecc_report_single_bit_errors;
extern void cvmx_interrupt_rsl_enable(void);
#endif
extern struct plat_smp_ops octeon_smp_ops;
#ifdef CONFIG_PCI
extern void pci_console_init(const char *arg);
#endif
#ifdef CONFIG_CAVIUM_RESERVE32
extern uint64_t octeon_reserve32_memory;
#endif
static unsigned long long MAX_MEMORY = 512ull << 20;
struct octeon_boot_descriptor *octeon_boot_desc_ptr;
struct cvmx_bootinfo *octeon_bootinfo;
EXPORT_SYMBOL(octeon_bootinfo);
#ifdef CONFIG_CAVIUM_RESERVE32
uint64_t octeon_reserve32_memory;
EXPORT_SYMBOL(octeon_reserve32_memory);
#endif
static int octeon_uart;
extern asmlinkage void handle_int(void);
extern asmlinkage void plat_irq_dispatch(void);
/**
* Return non zero if we are currently running in the Octeon simulator
*
* Returns
*/
int octeon_is_simulation(void)
{
return octeon_bootinfo->board_type == CVMX_BOARD_TYPE_SIM;
}
EXPORT_SYMBOL(octeon_is_simulation);
/**
* Return true if Octeon is in PCI Host mode. This means
* Linux can control the PCI bus.
*
* Returns Non zero if Octeon in host mode.
*/
int octeon_is_pci_host(void)
{
#ifdef CONFIG_PCI
return octeon_bootinfo->config_flags & CVMX_BOOTINFO_CFG_FLAG_PCI_HOST;
#else
return 0;
#endif
}
/**
* Get the clock rate of Octeon
*
* Returns Clock rate in HZ
*/
uint64_t octeon_get_clock_rate(void)
{
if (octeon_is_simulation())
octeon_bootinfo->eclock_hz = 6000000;
return octeon_bootinfo->eclock_hz;
}
EXPORT_SYMBOL(octeon_get_clock_rate);
/**
* Write to the LCD display connected to the bootbus. This display
* exists on most Cavium evaluation boards. If it doesn't exist, then
* this function doesn't do anything.
*
* @s: String to write
*/
void octeon_write_lcd(const char *s)
{
if (octeon_bootinfo->led_display_base_addr) {
void __iomem *lcd_address =
ioremap_nocache(octeon_bootinfo->led_display_base_addr,
8);
int i;
for (i = 0; i < 8; i++, s++) {
if (*s)
iowrite8(*s, lcd_address + i);
else
iowrite8(' ', lcd_address + i);
}
iounmap(lcd_address);
}
}
/**
* Return the console uart passed by the bootloader
*
* Returns uart (0 or 1)
*/
int octeon_get_boot_uart(void)
{
int uart;
#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
uart = 1;
#else
uart = (octeon_boot_desc_ptr->flags & OCTEON_BL_FLAG_CONSOLE_UART1) ?
1 : 0;
#endif
return uart;
}
/**
* Get the coremask Linux was booted on.
*
* Returns Core mask
*/
int octeon_get_boot_coremask(void)
{
return octeon_boot_desc_ptr->core_mask;
}
/**
* Check the hardware BIST results for a CPU
*/
void octeon_check_cpu_bist(void)
{
const int coreid = cvmx_get_core_num();
unsigned long long mask;
unsigned long long bist_val;
/* Check BIST results for COP0 registers */
mask = 0x1f00000000ull;
bist_val = read_octeon_c0_icacheerr();
if (bist_val & mask)
pr_err("Core%d BIST Failure: CacheErr(icache) = 0x%llx\n",
coreid, bist_val);
bist_val = read_octeon_c0_dcacheerr();
if (bist_val & 1)
pr_err("Core%d L1 Dcache parity error: "
"CacheErr(dcache) = 0x%llx\n",
coreid, bist_val);
mask = 0xfc00000000000000ull;
bist_val = read_c0_cvmmemctl();
if (bist_val & mask)
pr_err("Core%d BIST Failure: COP0_CVM_MEM_CTL = 0x%llx\n",
coreid, bist_val);
write_octeon_c0_dcacheerr(0);
}
#ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
/**
* Called on every core to setup the wired tlb entry needed
* if CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB is set.
*
*/
static void octeon_hal_setup_per_cpu_reserved32(void *unused)
{
/*
* The config has selected to wire the reserve32 memory for all
* userspace applications. We need to put a wired TLB entry in for each
* 512MB of reserve32 memory. We only handle double 256MB pages here,
* so reserve32 must be multiple of 512MB.
*/
uint32_t size = CONFIG_CAVIUM_RESERVE32;
uint32_t entrylo0 =
0x7 | ((octeon_reserve32_memory & ((1ul << 40) - 1)) >> 6);
uint32_t entrylo1 = entrylo0 + (256 << 14);
uint32_t entryhi = (0x80000000UL - (CONFIG_CAVIUM_RESERVE32 << 20));
while (size >= 512) {
#if 0
pr_info("CPU%d: Adding double wired TLB entry for 0x%lx\n",
smp_processor_id(), entryhi);
#endif
add_wired_entry(entrylo0, entrylo1, entryhi, PM_256M);
entrylo0 += 512 << 14;
entrylo1 += 512 << 14;
entryhi += 512 << 20;
size -= 512;
}
}
#endif /* CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB */
/**
* Called to release the named block which was used to made sure
* that nobody used the memory for something else during
* init. Now we'll free it so userspace apps can use this
* memory region with bootmem_alloc.
*
* This function is called only once from prom_free_prom_memory().
*/
void octeon_hal_setup_reserved32(void)
{
#ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
on_each_cpu(octeon_hal_setup_per_cpu_reserved32, NULL, 0, 1);
#endif
}
/**
* Reboot Octeon
*
* @command: Command to pass to the bootloader. Currently ignored.
*/
static void octeon_restart(char *command)
{
/* Disable all watchdogs before soft reset. They don't get cleared */
#ifdef CONFIG_SMP
int cpu;
for_each_online_cpu(cpu)
cvmx_write_csr(CVMX_CIU_WDOGX(cpu_logical_map(cpu)), 0);
#else
cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
#endif
mb();
while (1)
cvmx_write_csr(CVMX_CIU_SOFT_RST, 1);
}
/**
* Permanently stop a core.
*
* @arg: Ignored.
*/
static void octeon_kill_core(void *arg)
{
mb();
if (octeon_is_simulation()) {
/* The simulator needs the watchdog to stop for dead cores */
cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0);
/* A break instruction causes the simulator stop a core */
asm volatile ("sync\nbreak");
}
}
/**
* Halt the system
*/
static void octeon_halt(void)
{
smp_call_function(octeon_kill_core, NULL, 0);
switch (octeon_bootinfo->board_type) {
case CVMX_BOARD_TYPE_NAO38:
/* Driving a 1 to GPIO 12 shuts off this board */
cvmx_write_csr(CVMX_GPIO_BIT_CFGX(12), 1);
cvmx_write_csr(CVMX_GPIO_TX_SET, 0x1000);
break;
default:
octeon_write_lcd("PowerOff");
break;
}
octeon_kill_core(NULL);
}
#if 0
/**
* Platform time init specifics.
* Returns
*/
void __init plat_time_init(void)
{
/* Nothing special here, but we are required to have one */
}
#endif
/**
* Handle all the error condition interrupts that might occur.
*
*/
#ifdef CONFIG_CAVIUM_DECODE_RSL
static irqreturn_t octeon_rlm_interrupt(int cpl, void *dev_id)
{
cvmx_interrupt_rsl_decode();
return IRQ_HANDLED;
}
#endif
/**
* Return a string representing the system type
*
* Returns
*/
const char *octeon_board_type_string(void)
{
static char name[80];
sprintf(name, "%s (%s)",
cvmx_board_type_to_string(octeon_bootinfo->board_type),
octeon_model_get_string(read_c0_prid()));
return name;
}
const char *get_system_type(void)
__attribute__ ((alias("octeon_board_type_string")));
void octeon_user_io_init(void)
{
union octeon_cvmemctl cvmmemctl;
union cvmx_iob_fau_timeout fau_timeout;
union cvmx_pow_nw_tim nm_tim;
uint64_t cvmctl;
/* Get the current settings for CP0_CVMMEMCTL_REG */
cvmmemctl.u64 = read_c0_cvmmemctl();
/* R/W If set, marked write-buffer entries time out the same
* as as other entries; if clear, marked write-buffer entries
* use the maximum timeout. */
cvmmemctl.s.dismarkwblongto = 1;
/* R/W If set, a merged store does not clear the write-buffer
* entry timeout state. */
cvmmemctl.s.dismrgclrwbto = 0;
/* R/W Two bits that are the MSBs of the resultant CVMSEG LM
* word location for an IOBDMA. The other 8 bits come from the
* SCRADDR field of the IOBDMA. */
cvmmemctl.s.iobdmascrmsb = 0;
/* R/W If set, SYNCWS and SYNCS only order marked stores; if
* clear, SYNCWS and SYNCS only order unmarked
* stores. SYNCWSMARKED has no effect when DISSYNCWS is
* set. */
cvmmemctl.s.syncwsmarked = 0;
/* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as SYNC. */
cvmmemctl.s.dissyncws = 0;
/* R/W If set, no stall happens on write buffer full. */
if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
cvmmemctl.s.diswbfst = 1;
else
cvmmemctl.s.diswbfst = 0;
/* R/W If set (and SX set), supervisor-level loads/stores can
* use XKPHYS addresses with <48>==0 */
cvmmemctl.s.xkmemenas = 0;
/* R/W If set (and UX set), user-level loads/stores can use
* XKPHYS addresses with VA<48>==0 */
cvmmemctl.s.xkmemenau = 0;
/* R/W If set (and SX set), supervisor-level loads/stores can
* use XKPHYS addresses with VA<48>==1 */
cvmmemctl.s.xkioenas = 0;
/* R/W If set (and UX set), user-level loads/stores can use
* XKPHYS addresses with VA<48>==1 */
cvmmemctl.s.xkioenau = 0;
/* R/W If set, all stores act as SYNCW (NOMERGE must be set
* when this is set) RW, reset to 0. */
cvmmemctl.s.allsyncw = 0;
/* R/W If set, no stores merge, and all stores reach the
* coherent bus in order. */
cvmmemctl.s.nomerge = 0;
/* R/W Selects the bit in the counter used for DID time-outs 0
* = 231, 1 = 230, 2 = 229, 3 = 214. Actual time-out is
* between 1x and 2x this interval. For example, with
* DIDTTO=3, expiration interval is between 16K and 32K. */
cvmmemctl.s.didtto = 0;
/* R/W If set, the (mem) CSR clock never turns off. */
cvmmemctl.s.csrckalwys = 0;
/* R/W If set, mclk never turns off. */
cvmmemctl.s.mclkalwys = 0;
/* R/W Selects the bit in the counter used for write buffer
* flush time-outs (WBFLT+11) is the bit position in an
* internal counter used to determine expiration. The write
* buffer expires between 1x and 2x this interval. For
* example, with WBFLT = 0, a write buffer expires between 2K
* and 4K cycles after the write buffer entry is allocated. */
cvmmemctl.s.wbfltime = 0;
/* R/W If set, do not put Istream in the L2 cache. */
cvmmemctl.s.istrnol2 = 0;
/* R/W The write buffer threshold. */
cvmmemctl.s.wbthresh = 10;
/* R/W If set, CVMSEG is available for loads/stores in
* kernel/debug mode. */
#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
cvmmemctl.s.cvmsegenak = 1;
#else
cvmmemctl.s.cvmsegenak = 0;
#endif
/* R/W If set, CVMSEG is available for loads/stores in
* supervisor mode. */
cvmmemctl.s.cvmsegenas = 0;
/* R/W If set, CVMSEG is available for loads/stores in user
* mode. */
cvmmemctl.s.cvmsegenau = 0;
/* R/W Size of local memory in cache blocks, 54 (6912 bytes)
* is max legal value. */
cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE;
if (smp_processor_id() == 0)
pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128);
write_c0_cvmmemctl(cvmmemctl.u64);
/* Move the performance counter interrupts to IRQ 6 */
cvmctl = read_c0_cvmctl();
cvmctl &= ~(7 << 7);
cvmctl |= 6 << 7;
write_c0_cvmctl(cvmctl);
/* Set a default for the hardware timeouts */
fau_timeout.u64 = 0;
fau_timeout.s.tout_val = 0xfff;
/* Disable tagwait FAU timeout */
fau_timeout.s.tout_enb = 0;
cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_timeout.u64);
nm_tim.u64 = 0;
/* 4096 cycles */
nm_tim.s.nw_tim = 3;
cvmx_write_csr(CVMX_POW_NW_TIM, nm_tim.u64);
write_octeon_c0_icacheerr(0);
write_c0_derraddr1(0);
}
/**
* Early entry point for arch setup
*/
void __init prom_init(void)
{
struct cvmx_sysinfo *sysinfo;
const int coreid = cvmx_get_core_num();
int i;
int argc;
struct uart_port octeon_port;
#ifdef CONFIG_CAVIUM_RESERVE32
int64_t addr = -1;
#endif
/*
* The bootloader passes a pointer to the boot descriptor in
* $a3, this is available as fw_arg3.
*/
octeon_boot_desc_ptr = (struct octeon_boot_descriptor *)fw_arg3;
octeon_bootinfo =
cvmx_phys_to_ptr(octeon_boot_desc_ptr->cvmx_desc_vaddr);
cvmx_bootmem_init(cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr));
/*
* Only enable the LED controller if we're running on a CN38XX, CN58XX,
* or CN56XX. The CN30XX and CN31XX don't have an LED controller.
*/
if (!octeon_is_simulation() &&
octeon_has_feature(OCTEON_FEATURE_LED_CONTROLLER)) {
cvmx_write_csr(CVMX_LED_EN, 0);
cvmx_write_csr(CVMX_LED_PRT, 0);
cvmx_write_csr(CVMX_LED_DBG, 0);
cvmx_write_csr(CVMX_LED_PRT_FMT, 0);
cvmx_write_csr(CVMX_LED_UDD_CNTX(0), 32);
cvmx_write_csr(CVMX_LED_UDD_CNTX(1), 32);
cvmx_write_csr(CVMX_LED_UDD_DATX(0), 0);
cvmx_write_csr(CVMX_LED_UDD_DATX(1), 0);
cvmx_write_csr(CVMX_LED_EN, 1);
}
#ifdef CONFIG_CAVIUM_RESERVE32
/*
* We need to temporarily allocate all memory in the reserve32
* region. This makes sure the kernel doesn't allocate this
* memory when it is getting memory from the
* bootloader. Later, after the memory allocations are
* complete, the reserve32 will be freed.
*/
#ifdef CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB
if (CONFIG_CAVIUM_RESERVE32 & 0x1ff)
pr_err("CAVIUM_RESERVE32 isn't a multiple of 512MB. "
"This is required if CAVIUM_RESERVE32_USE_WIRED_TLB "
"is set\n");
else
addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
0, 0, 512 << 20,
"CAVIUM_RESERVE32", 0);
#else
/*
* Allocate memory for RESERVED32 aligned on 2MB boundary. This
* is in case we later use hugetlb entries with it.
*/
addr = cvmx_bootmem_phy_named_block_alloc(CONFIG_CAVIUM_RESERVE32 << 20,
0, 0, 2 << 20,
"CAVIUM_RESERVE32", 0);
#endif
if (addr < 0)
pr_err("Failed to allocate CAVIUM_RESERVE32 memory area\n");
else
octeon_reserve32_memory = addr;
#endif
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2
if (cvmx_read_csr(CVMX_L2D_FUS3) & (3ull << 34)) {
pr_info("Skipping L2 locking due to reduced L2 cache size\n");
} else {
uint32_t ebase = read_c0_ebase() & 0x3ffff000;
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_TLB
/* TLB refill */
cvmx_l2c_lock_mem_region(ebase, 0x100);
#endif
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_EXCEPTION
/* General exception */
cvmx_l2c_lock_mem_region(ebase + 0x180, 0x80);
#endif
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_LOW_LEVEL_INTERRUPT
/* Interrupt handler */
cvmx_l2c_lock_mem_region(ebase + 0x200, 0x80);
#endif
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_INTERRUPT
cvmx_l2c_lock_mem_region(__pa_symbol(handle_int), 0x100);
cvmx_l2c_lock_mem_region(__pa_symbol(plat_irq_dispatch), 0x80);
#endif
#ifdef CONFIG_CAVIUM_OCTEON_LOCK_L2_MEMCPY
cvmx_l2c_lock_mem_region(__pa_symbol(memcpy), 0x480);
#endif
}
#endif
sysinfo = cvmx_sysinfo_get();
memset(sysinfo, 0, sizeof(*sysinfo));
sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20;
sysinfo->phy_mem_desc_ptr =
cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr);
sysinfo->core_mask = octeon_bootinfo->core_mask;
sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr;
sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz;
sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2;
sysinfo->board_type = octeon_bootinfo->board_type;
sysinfo->board_rev_major = octeon_bootinfo->board_rev_major;
sysinfo->board_rev_minor = octeon_bootinfo->board_rev_minor;
memcpy(sysinfo->mac_addr_base, octeon_bootinfo->mac_addr_base,
sizeof(sysinfo->mac_addr_base));
sysinfo->mac_addr_count = octeon_bootinfo->mac_addr_count;
memcpy(sysinfo->board_serial_number,
octeon_bootinfo->board_serial_number,
sizeof(sysinfo->board_serial_number));
sysinfo->compact_flash_common_base_addr =
octeon_bootinfo->compact_flash_common_base_addr;
sysinfo->compact_flash_attribute_base_addr =
octeon_bootinfo->compact_flash_attribute_base_addr;
sysinfo->led_display_base_addr = octeon_bootinfo->led_display_base_addr;
sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
octeon_check_cpu_bist();
octeon_uart = octeon_get_boot_uart();
/*
* Disable All CIU Interrupts. The ones we need will be
* enabled later. Read the SUM register so we know the write
* completed.
*/
cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2)), 0);
cvmx_write_csr(CVMX_CIU_INTX_EN0((coreid * 2 + 1)), 0);
cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2)), 0);
cvmx_write_csr(CVMX_CIU_INTX_EN1((coreid * 2 + 1)), 0);
cvmx_read_csr(CVMX_CIU_INTX_SUM0((coreid * 2)));
#ifdef CONFIG_SMP
octeon_write_lcd("LinuxSMP");
#else
octeon_write_lcd("Linux");
#endif
#ifdef CONFIG_CAVIUM_GDB
/*
* When debugging the linux kernel, force the cores to enter
* the debug exception handler to break in.
*/
if (octeon_get_boot_debug_flag()) {
cvmx_write_csr(CVMX_CIU_DINT, 1 << cvmx_get_core_num());
cvmx_read_csr(CVMX_CIU_DINT);
}
#endif
/*
* BIST should always be enabled when doing a soft reset. L2
* Cache locking for instance is not cleared unless BIST is
* enabled. Unfortunately due to a chip errata G-200 for
* Cn38XX and CN31XX, BIST msut be disabled on these parts.
*/
if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2) ||
OCTEON_IS_MODEL(OCTEON_CN31XX))
cvmx_write_csr(CVMX_CIU_SOFT_BIST, 0);
else
cvmx_write_csr(CVMX_CIU_SOFT_BIST, 1);
/* Default to 64MB in the simulator to speed things up */
if (octeon_is_simulation())
MAX_MEMORY = 64ull << 20;
arcs_cmdline[0] = 0;
argc = octeon_boot_desc_ptr->argc;
for (i = 0; i < argc; i++) {
const char *arg =
cvmx_phys_to_ptr(octeon_boot_desc_ptr->argv[i]);
if ((strncmp(arg, "MEM=", 4) == 0) ||
(strncmp(arg, "mem=", 4) == 0)) {
sscanf(arg + 4, "%llu", &MAX_MEMORY);
MAX_MEMORY <<= 20;
if (MAX_MEMORY == 0)
MAX_MEMORY = 32ull << 30;
} else if (strcmp(arg, "ecc_verbose") == 0) {
#ifdef CONFIG_CAVIUM_REPORT_SINGLE_BIT_ECC
__cvmx_interrupt_ecc_report_single_bit_errors = 1;
pr_notice("Reporting of single bit ECC errors is "
"turned on\n");
#endif
} else if (strlen(arcs_cmdline) + strlen(arg) + 1 <
sizeof(arcs_cmdline) - 1) {
strcat(arcs_cmdline, " ");
strcat(arcs_cmdline, arg);
}
}
if (strstr(arcs_cmdline, "console=") == NULL) {
#ifdef CONFIG_GDB_CONSOLE
strcat(arcs_cmdline, " console=gdb");
#else
#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
strcat(arcs_cmdline, " console=ttyS0,115200");
#else
if (octeon_uart == 1)
strcat(arcs_cmdline, " console=ttyS1,115200");
else
strcat(arcs_cmdline, " console=ttyS0,115200");
#endif
#endif
}
if (octeon_is_simulation()) {
/*
* The simulator uses a mtdram device pre filled with
* the filesystem. Also specify the calibration delay
* to avoid calculating it every time.
*/
strcat(arcs_cmdline, " rw root=1f00"
" lpj=60176 slram=root,0x40000000,+1073741824");
}
mips_hpt_frequency = octeon_get_clock_rate();
octeon_init_cvmcount();
_machine_restart = octeon_restart;
_machine_halt = octeon_halt;
memset(&octeon_port, 0, sizeof(octeon_port));
/*
* For early_serial_setup we don't set the port type or
* UPF_FIXED_TYPE.
*/
octeon_port.flags = ASYNC_SKIP_TEST | UPF_SHARE_IRQ;
octeon_port.iotype = UPIO_MEM;
/* I/O addresses are every 8 bytes */
octeon_port.regshift = 3;
/* Clock rate of the chip */
octeon_port.uartclk = mips_hpt_frequency;
octeon_port.fifosize = 64;
octeon_port.mapbase = 0x0001180000000800ull + (1024 * octeon_uart);
octeon_port.membase = cvmx_phys_to_ptr(octeon_port.mapbase);
octeon_port.serial_in = octeon_serial_in;
octeon_port.serial_out = octeon_serial_out;
#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
octeon_port.line = 0;
#else
octeon_port.line = octeon_uart;
#endif
octeon_port.irq = 42 + octeon_uart;
early_serial_setup(&octeon_port);
octeon_user_io_init();
register_smp_ops(&octeon_smp_ops);
}
void __init plat_mem_setup(void)
{
uint64_t mem_alloc_size;
uint64_t total;
int64_t memory;
total = 0;
/* First add the init memory we will be returning. */
memory = __pa_symbol(&__init_begin) & PAGE_MASK;
mem_alloc_size = (__pa_symbol(&__init_end) & PAGE_MASK) - memory;
if (mem_alloc_size > 0) {
add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM);
total += mem_alloc_size;
}
/*
* The Mips memory init uses the first memory location for
* some memory vectors. When SPARSEMEM is in use, it doesn't
* verify that the size is big enough for the final
* vectors. Making the smallest chuck 4MB seems to be enough
* to consistantly work.
*/
mem_alloc_size = 4 << 20;
if (mem_alloc_size > MAX_MEMORY)
mem_alloc_size = MAX_MEMORY;
/*
* When allocating memory, we want incrementing addresses from
* bootmem_alloc so the code in add_memory_region can merge
* regions next to each other.
*/
cvmx_bootmem_lock();
while ((boot_mem_map.nr_map < BOOT_MEM_MAP_MAX)
&& (total < MAX_MEMORY)) {
#if defined(CONFIG_64BIT) || defined(CONFIG_64BIT_PHYS_ADDR)
memory = cvmx_bootmem_phy_alloc(mem_alloc_size,
__pa_symbol(&__init_end), -1,
0x100000,
CVMX_BOOTMEM_FLAG_NO_LOCKING);
#elif defined(CONFIG_HIGHMEM)
memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 0, 1ull << 31,
0x100000,
CVMX_BOOTMEM_FLAG_NO_LOCKING);
#else
memory = cvmx_bootmem_phy_alloc(mem_alloc_size, 0, 512 << 20,
0x100000,
CVMX_BOOTMEM_FLAG_NO_LOCKING);
#endif
if (memory >= 0) {
/*
* This function automatically merges address
* regions next to each other if they are
* received in incrementing order.
*/
add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM);
total += mem_alloc_size;
} else {
break;
}
}
cvmx_bootmem_unlock();
#ifdef CONFIG_CAVIUM_RESERVE32
/*
* Now that we've allocated the kernel memory it is safe to
* free the reserved region. We free it here so that builtin
* drivers can use the memory.
*/
if (octeon_reserve32_memory)
cvmx_bootmem_free_named("CAVIUM_RESERVE32");
#endif /* CONFIG_CAVIUM_RESERVE32 */
if (total == 0)
panic("Unable to allocate memory from "
"cvmx_bootmem_phy_alloc\n");
}
int prom_putchar(char c)
{
uint64_t lsrval;
/* Spin until there is room */
do {
lsrval = cvmx_read_csr(CVMX_MIO_UARTX_LSR(octeon_uart));
} while ((lsrval & 0x20) == 0);
/* Write the byte */
cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c);
return 1;
}
void prom_free_prom_memory(void)
{
#ifdef CONFIG_CAVIUM_DECODE_RSL
cvmx_interrupt_rsl_enable();
/* Add an interrupt handler for general failures. */
if (request_irq(OCTEON_IRQ_RML, octeon_rlm_interrupt, IRQF_SHARED,
"RML/RSL", octeon_rlm_interrupt)) {
panic("Unable to request_irq(OCTEON_IRQ_RML)\n");
}
#endif
/* This call is here so that it is performed after any TLB
initializations. It needs to be after these in case the
CONFIG_CAVIUM_RESERVE32_USE_WIRED_TLB option is set */
octeon_hal_setup_reserved32();
}
static struct octeon_cf_data octeon_cf_data;
static int __init octeon_cf_device_init(void)
{
union cvmx_mio_boot_reg_cfgx mio_boot_reg_cfg;
unsigned long base_ptr, region_base, region_size;
struct platform_device *pd;
struct resource cf_resources[3];
unsigned int num_resources;
int i;
int ret = 0;
/* Setup octeon-cf platform device if present. */
base_ptr = 0;
if (octeon_bootinfo->major_version == 1
&& octeon_bootinfo->minor_version >= 1) {
if (octeon_bootinfo->compact_flash_common_base_addr)
base_ptr =
octeon_bootinfo->compact_flash_common_base_addr;
} else {
base_ptr = 0x1d000800;
}
if (!base_ptr)
return ret;
/* Find CS0 region. */
for (i = 0; i < 8; i++) {
mio_boot_reg_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i));
region_base = mio_boot_reg_cfg.s.base << 16;
region_size = (mio_boot_reg_cfg.s.size + 1) << 16;
if (mio_boot_reg_cfg.s.en && base_ptr >= region_base
&& base_ptr < region_base + region_size)
break;
}
if (i >= 7) {
/* i and i + 1 are CS0 and CS1, both must be less than 8. */
goto out;
}
octeon_cf_data.base_region = i;
octeon_cf_data.is16bit = mio_boot_reg_cfg.s.width;
octeon_cf_data.base_region_bias = base_ptr - region_base;
memset(cf_resources, 0, sizeof(cf_resources));
num_resources = 0;
cf_resources[num_resources].flags = IORESOURCE_MEM;
cf_resources[num_resources].start = region_base;
cf_resources[num_resources].end = region_base + region_size - 1;
num_resources++;
if (!(base_ptr & 0xfffful)) {
/*
* Boot loader signals availability of DMA (true_ide
* mode) by setting low order bits of base_ptr to
* zero.
*/
/* Asume that CS1 immediately follows. */
mio_boot_reg_cfg.u64 =
cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(i + 1));
region_base = mio_boot_reg_cfg.s.base << 16;
region_size = (mio_boot_reg_cfg.s.size + 1) << 16;
if (!mio_boot_reg_cfg.s.en)
goto out;
cf_resources[num_resources].flags = IORESOURCE_MEM;
cf_resources[num_resources].start = region_base;
cf_resources[num_resources].end = region_base + region_size - 1;
num_resources++;
octeon_cf_data.dma_engine = 0;
cf_resources[num_resources].flags = IORESOURCE_IRQ;
cf_resources[num_resources].start = OCTEON_IRQ_BOOTDMA;
cf_resources[num_resources].end = OCTEON_IRQ_BOOTDMA;
num_resources++;
} else {
octeon_cf_data.dma_engine = -1;
}
pd = platform_device_alloc("pata_octeon_cf", -1);
if (!pd) {
ret = -ENOMEM;
goto out;
}
pd->dev.platform_data = &octeon_cf_data;
ret = platform_device_add_resources(pd, cf_resources, num_resources);
if (ret)
goto fail;
ret = platform_device_add(pd);
if (ret)
goto fail;
return ret;
fail:
platform_device_put(pd);
out:
return ret;
}
device_initcall(octeon_cf_device_init);

Some files were not shown because too many files have changed in this diff Show More