Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (34 commits)
  powerpc/mpic: Fix mask/unmask timeout message
  powerpc/pseries: Add BNX2=m to defconfig
  powerpc: Enable 64kB pages and 1024 threads in pseries config
  powerpc: Disable mcount tracers in pseries defconfig
  powerpc/boot/dts: Install dts from the right directory
  powerpc: machine_check_generic is wrong on 64bit
  powerpc: Check RTAS extended log flag before checking length
  powerpc: Fix corruption when grabbing FWNMI data
  powerpc: Rework pseries machine check handler
  powerpc: Don't silently handle machine checks from userspace
  powerpc: Remove duplicate debugger hook in machine_check_exception
  powerpc: Never halt RTAS error logging after receiving an unrecoverable machine check
  powerpc: Don't force MSR_RI in machine_check_exception
  powerpc: Print 32 bits of DSISR in show_regs
  powerpc/kdump: Disable ftrace during kexec
  powerpc/kdump: Move crash_kexec_stop_spus to kdump crash handler
  powerpc/kexec: Remove empty ppc_md.machine_kexec_prepare
  powerpc/kexec: Don't initialise kexec hooks to default handlers
  powerpc/kdump: Remove ppc_md.machine_crash_shutdown
  powerpc/kexec: Remove ppc_md.machine_kexec
  ...
This commit is contained in:
Linus Torvalds 2011-01-21 13:23:52 -08:00
commit d41ad6df44
35 changed files with 335 additions and 347 deletions

View File

@ -368,7 +368,7 @@ INSTALL := install
extra-installed := $(patsubst $(obj)/%, $(DESTDIR)$(WRAPPER_OBJDIR)/%, $(extra-y))
hostprogs-installed := $(patsubst %, $(DESTDIR)$(WRAPPER_BINDIR)/%, $(hostprogs-y))
wrapper-installed := $(DESTDIR)$(WRAPPER_BINDIR)/wrapper
dts-installed := $(patsubst $(obj)/dts/%, $(DESTDIR)$(WRAPPER_DTSDIR)/%, $(wildcard $(obj)/dts/*.dts))
dts-installed := $(patsubst $(dtstree)/%, $(DESTDIR)$(WRAPPER_DTSDIR)/%, $(wildcard $(dtstree)/*.dts))
all-installed := $(extra-installed) $(hostprogs-installed) $(wrapper-installed) $(dts-installed)

View File

@ -109,7 +109,7 @@
#address-cells = <1>;
#size-cells = <1>;
device_type = "soc";
compatible = "fsl,mpc8315-immr", "simple-bus";
compatible = "fsl,mpc8308-immr", "simple-bus";
ranges = <0 0xe0000000 0x00100000>;
reg = <0xe0000000 0x00000200>;
bus-frequency = <0>;

View File

@ -291,13 +291,13 @@
ranges = <0x0 0xc100 0x200>;
cell-index = <1>;
dma00: dma-channel@0 {
compatible = "fsl,eloplus-dma-channel";
compatible = "fsl,ssi-dma-channel";
reg = <0x0 0x80>;
cell-index = <0>;
interrupts = <76 2>;
};
dma01: dma-channel@80 {
compatible = "fsl,eloplus-dma-channel";
compatible = "fsl,ssi-dma-channel";
reg = <0x80 0x80>;
cell-index = <1>;
interrupts = <77 2>;

View File

@ -2,7 +2,7 @@ CONFIG_PPC64=y
CONFIG_ALTIVEC=y
CONFIG_VSX=y
CONFIG_SMP=y
CONFIG_NR_CPUS=128
CONFIG_NR_CPUS=1024
CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
@ -45,6 +45,8 @@ CONFIG_KEXEC=y
CONFIG_IRQ_ALL_CPUS=y
CONFIG_MEMORY_HOTPLUG=y
CONFIG_MEMORY_HOTREMOVE=y
CONFIG_PPC_64K_PAGES=y
CONFIG_PPC_SUBPAGE_PROT=y
CONFIG_SCHED_SMT=y
CONFIG_HOTPLUG_PCI=m
CONFIG_HOTPLUG_PCI_RPA=m
@ -184,6 +186,7 @@ CONFIG_ACENIC_OMIT_TIGON_I=y
CONFIG_E1000=y
CONFIG_E1000E=y
CONFIG_TIGON3=y
CONFIG_BNX2=m
CONFIG_CHELSIO_T1=m
CONFIG_CHELSIO_T3=m
CONFIG_EHEA=y
@ -311,9 +314,7 @@ CONFIG_DEBUG_KERNEL=y
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_LATENCYTOP=y
CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_IRQSOFF_TRACER=y
CONFIG_SCHED_TRACER=y
CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_DEBUG_STACK_USAGE=y

View File

@ -37,18 +37,21 @@ label##2: \
.align 2; \
label##3:
#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \
label##4: \
.popsection; \
.pushsection sect,"a"; \
.align 3; \
label##5: \
FTR_ENTRY_LONG msk; \
FTR_ENTRY_LONG val; \
FTR_ENTRY_OFFSET label##1b-label##5b; \
FTR_ENTRY_OFFSET label##2b-label##5b; \
FTR_ENTRY_OFFSET label##3b-label##5b; \
FTR_ENTRY_OFFSET label##4b-label##5b; \
#define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \
label##4: \
.popsection; \
.pushsection sect,"a"; \
.align 3; \
label##5: \
FTR_ENTRY_LONG msk; \
FTR_ENTRY_LONG val; \
FTR_ENTRY_OFFSET label##1b-label##5b; \
FTR_ENTRY_OFFSET label##2b-label##5b; \
FTR_ENTRY_OFFSET label##3b-label##5b; \
FTR_ENTRY_OFFSET label##4b-label##5b; \
.ifgt (label##4b-label##3b)-(label##2b-label##1b); \
.error "Feature section else case larger than body"; \
.endif; \
.popsection;

View File

@ -467,13 +467,22 @@ struct qe_immap {
extern struct qe_immap __iomem *qe_immr;
extern phys_addr_t get_qe_base(void);
static inline unsigned long immrbar_virt_to_phys(void *address)
/*
* Returns the offset within the QE address space of the given pointer.
*
* Note that the QE does not support 36-bit physical addresses, so if
* get_qe_base() returns a number above 4GB, the caller will probably fail.
*/
static inline phys_addr_t immrbar_virt_to_phys(void *address)
{
if ( ((u32)address >= (u32)qe_immr) &&
((u32)address < ((u32)qe_immr + QE_IMMAP_SIZE)) )
return (unsigned long)(address - (u32)qe_immr +
(u32)get_qe_base());
return (unsigned long)virt_to_phys(address);
void *q = (void *)qe_immr;
/* Is it a MURAM address? */
if ((address >= q) && (address < (q + QE_IMMAP_SIZE)))
return get_qe_base() + (address - q);
/* It's an address returned by kmalloc */
return virt_to_phys(address);
}
#endif /* __KERNEL__ */

View File

@ -12,24 +12,44 @@
#else
#ifdef CONFIG_TRACE_IRQFLAGS
#ifdef CONFIG_IRQSOFF_TRACER
/*
* Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
* which is the stack frame here, we need to force a stack frame
* in case we came from user space.
*/
#define TRACE_WITH_FRAME_BUFFER(func) \
mflr r0; \
stdu r1, -32(r1); \
std r0, 16(r1); \
stdu r1, -32(r1); \
bl func; \
ld r1, 0(r1); \
ld r1, 0(r1);
#else
#define TRACE_WITH_FRAME_BUFFER(func) \
bl func;
#endif
/*
* Most of the CPU's IRQ-state tracing is done from assembly code; we
* have to call a C function so call a wrapper that saves all the
* C-clobbered registers.
*/
#define TRACE_ENABLE_INTS bl .trace_hardirqs_on
#define TRACE_DISABLE_INTS bl .trace_hardirqs_off
#define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip) \
cmpdi en,0; \
bne 95f; \
stb en,PACASOFTIRQEN(r13); \
bl .trace_hardirqs_off; \
b skip; \
95: bl .trace_hardirqs_on; \
#define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on)
#define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off)
#define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip) \
cmpdi en,0; \
bne 95f; \
stb en,PACASOFTIRQEN(r13); \
TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off) \
b skip; \
95: TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on) \
li en,1;
#define TRACE_AND_RESTORE_IRQ(en) \
TRACE_AND_RESTORE_IRQ_PARTIAL(en,96f); \
stb en,PACASOFTIRQEN(r13); \
stb en,PACASOFTIRQEN(r13); \
96:
#else
#define TRACE_ENABLE_INTS

View File

@ -116,9 +116,6 @@ struct machdep_calls {
* If for some reason there is no irq, but the interrupt
* shouldn't be counted as spurious, return NO_IRQ_IGNORE. */
unsigned int (*get_irq)(void);
#ifdef CONFIG_KEXEC
void (*kexec_cpu_down)(int crash_shutdown, int secondary);
#endif
/* PCI stuff */
/* Called after scanning the bus, before allocating resources */
@ -235,11 +232,7 @@ struct machdep_calls {
void (*machine_shutdown)(void);
#ifdef CONFIG_KEXEC
/* Called to do the minimal shutdown needed to run a kexec'd kernel
* to run successfully.
* XXX Should we move this one out of kexec scope?
*/
void (*machine_crash_shutdown)(struct pt_regs *regs);
void (*kexec_cpu_down)(int crash_shutdown, int secondary);
/* Called to do what every setup is needed on image and the
* reboot code buffer. Returns 0 on success.
@ -247,15 +240,6 @@ struct machdep_calls {
* claims to support kexec.
*/
int (*machine_kexec_prepare)(struct kimage *image);
/* Called to handle any machine specific cleanup on image */
void (*machine_kexec_cleanup)(struct kimage *image);
/* Called to perform the _real_ kexec.
* Do NOT allocate memory or fail here. We are past the point of
* no return.
*/
void (*machine_kexec)(struct kimage *image);
#endif /* CONFIG_KEXEC */
#ifdef CONFIG_SUSPEND

View File

@ -283,6 +283,7 @@
#define HID0_NOPTI (1<<0) /* No-op dcbt and dcbst instr. */
#define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */
#ifdef CONFIG_6xx
#define HID1_EMCP (1<<31) /* 7450 Machine Check Pin Enable */
#define HID1_DFS (1<<22) /* 7447A Dynamic Frequency Scaling */
#define HID1_PC0 (1<<16) /* 7450 PLL_CFG[0] */
@ -292,6 +293,7 @@
#define HID1_SYNCBE (1<<11) /* 7450 ABE for sync, eieio */
#define HID1_ABE (1<<10) /* 7450 Address Broadcast Enable */
#define HID1_PS (1<<16) /* 750FX PLL selection */
#endif
#define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */
#define SPRN_HID2_GEKKO 0x398 /* Gekko HID2 Register */
#define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */

View File

@ -246,6 +246,20 @@
store or cache line push */
#endif
/* Bit definitions for the HID1 */
#ifdef CONFIG_E500
/* e500v1/v2 */
#define HID1_PLL_CFG_MASK 0xfc000000 /* PLL_CFG input pins */
#define HID1_RFXE 0x00020000 /* Read fault exception enable */
#define HID1_R1DPE 0x00008000 /* R1 data bus parity enable */
#define HID1_R2DPE 0x00004000 /* R2 data bus parity enable */
#define HID1_ASTME 0x00002000 /* Address bus streaming mode enable */
#define HID1_ABE 0x00001000 /* Address broadcast enable */
#define HID1_MPXTT 0x00000400 /* MPX re-map transfer type */
#define HID1_ATS 0x00000080 /* Atomic status */
#define HID1_MID_MASK 0x0000000f /* MID input pins */
#endif
/* Bit definitions for the DBSR. */
/*
* DBSR bits which have conflicting definitions on true Book E versus IBM 40x.

View File

@ -203,14 +203,6 @@ void spu_irq_setaffinity(struct spu *spu, int cpu);
void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
void *code, int code_size);
#ifdef CONFIG_KEXEC
void crash_register_spus(struct list_head *list);
#else
static inline void crash_register_spus(struct list_head *list)
{
}
#endif
extern void spu_invalidate_slbs(struct spu *spu);
extern void spu_associate_mm(struct spu *spu, struct mm_struct *mm);
int spu_64k_pages_available(void);

View File

@ -64,6 +64,12 @@ _GLOBAL(__setup_cpu_e500v2)
bl __e500_icache_setup
bl __e500_dcache_setup
bl __setup_e500_ivors
#ifdef CONFIG_RAPIDIO
/* Ensure that RFXE is set */
mfspr r3,SPRN_HID1
oris r3,r3,HID1_RFXE@h
mtspr SPRN_HID1,r3
#endif
mtlr r4
blr
_GLOBAL(__setup_cpu_e500mc)

View File

@ -116,7 +116,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power3",
.oprofile_type = PPC_OPROFILE_RS64,
.machine_check = machine_check_generic,
.platform = "power3",
},
{ /* Power3+ */
@ -132,7 +131,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power3",
.oprofile_type = PPC_OPROFILE_RS64,
.machine_check = machine_check_generic,
.platform = "power3",
},
{ /* Northstar */
@ -148,7 +146,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/rs64",
.oprofile_type = PPC_OPROFILE_RS64,
.machine_check = machine_check_generic,
.platform = "rs64",
},
{ /* Pulsar */
@ -164,7 +161,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/rs64",
.oprofile_type = PPC_OPROFILE_RS64,
.machine_check = machine_check_generic,
.platform = "rs64",
},
{ /* I-star */
@ -180,7 +176,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/rs64",
.oprofile_type = PPC_OPROFILE_RS64,
.machine_check = machine_check_generic,
.platform = "rs64",
},
{ /* S-star */
@ -196,7 +191,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/rs64",
.oprofile_type = PPC_OPROFILE_RS64,
.machine_check = machine_check_generic,
.platform = "rs64",
},
{ /* Power4 */
@ -212,7 +206,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power4",
.oprofile_type = PPC_OPROFILE_POWER4,
.machine_check = machine_check_generic,
.platform = "power4",
},
{ /* Power4+ */
@ -228,7 +221,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/power4",
.oprofile_type = PPC_OPROFILE_POWER4,
.machine_check = machine_check_generic,
.platform = "power4",
},
{ /* PPC970 */
@ -247,7 +239,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_restore = __restore_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970",
.oprofile_type = PPC_OPROFILE_POWER4,
.machine_check = machine_check_generic,
.platform = "ppc970",
},
{ /* PPC970FX */
@ -266,7 +257,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_restore = __restore_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970",
.oprofile_type = PPC_OPROFILE_POWER4,
.machine_check = machine_check_generic,
.platform = "ppc970",
},
{ /* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */
@ -285,7 +275,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_restore = __restore_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970MP",
.oprofile_type = PPC_OPROFILE_POWER4,
.machine_check = machine_check_generic,
.platform = "ppc970",
},
{ /* PPC970MP */
@ -304,7 +293,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_restore = __restore_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970MP",
.oprofile_type = PPC_OPROFILE_POWER4,
.machine_check = machine_check_generic,
.platform = "ppc970",
},
{ /* PPC970GX */
@ -322,7 +310,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_setup = __setup_cpu_ppc970,
.oprofile_cpu_type = "ppc64/970",
.oprofile_type = PPC_OPROFILE_POWER4,
.machine_check = machine_check_generic,
.platform = "ppc970",
},
{ /* Power5 GR */
@ -343,7 +330,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
*/
.oprofile_mmcra_sihv = MMCRA_SIHV,
.oprofile_mmcra_sipr = MMCRA_SIPR,
.machine_check = machine_check_generic,
.platform = "power5",
},
{ /* Power5++ */
@ -360,7 +346,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_type = PPC_OPROFILE_POWER4,
.oprofile_mmcra_sihv = MMCRA_SIHV,
.oprofile_mmcra_sipr = MMCRA_SIPR,
.machine_check = machine_check_generic,
.platform = "power5+",
},
{ /* Power5 GS */
@ -378,7 +363,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_type = PPC_OPROFILE_POWER4,
.oprofile_mmcra_sihv = MMCRA_SIHV,
.oprofile_mmcra_sipr = MMCRA_SIPR,
.machine_check = machine_check_generic,
.platform = "power5+",
},
{ /* POWER6 in P5+ mode; 2.04-compliant processor */
@ -390,7 +374,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.machine_check = machine_check_generic,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
.oprofile_type = PPC_OPROFILE_POWER4,
.platform = "power5+",
@ -413,7 +396,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_mmcra_sipr = POWER6_MMCRA_SIPR,
.oprofile_mmcra_clear = POWER6_MMCRA_THRM |
POWER6_MMCRA_OTHER,
.machine_check = machine_check_generic,
.platform = "power6x",
},
{ /* 2.05-compliant processor, i.e. Power6 "architected" mode */
@ -425,7 +407,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.mmu_features = MMU_FTR_HPTE_TABLE,
.icache_bsize = 128,
.dcache_bsize = 128,
.machine_check = machine_check_generic,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
.oprofile_type = PPC_OPROFILE_POWER4,
.platform = "power6",
@ -440,7 +421,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
MMU_FTR_TLBIE_206,
.icache_bsize = 128,
.dcache_bsize = 128,
.machine_check = machine_check_generic,
.oprofile_type = PPC_OPROFILE_POWER4,
.oprofile_cpu_type = "ppc64/ibm-compat-v1",
.platform = "power7",
@ -492,7 +472,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.pmc_type = PPC_PMC_IBM,
.oprofile_cpu_type = "ppc64/cell-be",
.oprofile_type = PPC_OPROFILE_CELL,
.machine_check = machine_check_generic,
.platform = "ppc-cell-be",
},
{ /* PA Semi PA6T */
@ -510,7 +489,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_restore = __restore_cpu_pa6t,
.oprofile_cpu_type = "ppc64/pa6t",
.oprofile_type = PPC_OPROFILE_PA6T,
.machine_check = machine_check_generic,
.platform = "pa6t",
},
{ /* default match */
@ -524,7 +502,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
.dcache_bsize = 128,
.num_pmcs = 6,
.pmc_type = PPC_PMC_IBM,
.machine_check = machine_check_generic,
.platform = "power4",
}
#endif /* CONFIG_PPC_BOOK3S_64 */

View File

@ -48,7 +48,7 @@ int crashing_cpu = -1;
static cpumask_t cpus_in_crash = CPU_MASK_NONE;
cpumask_t cpus_in_sr = CPU_MASK_NONE;
#define CRASH_HANDLER_MAX 2
#define CRASH_HANDLER_MAX 3
/* NULL terminated list of shutdown handles */
static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1];
static DEFINE_SPINLOCK(crash_handlers_lock);
@ -125,7 +125,7 @@ static void crash_kexec_prepare_cpus(int cpu)
smp_wmb();
/*
* FIXME: Until we will have the way to stop other CPUSs reliabally,
* FIXME: Until we will have the way to stop other CPUs reliably,
* the crash CPU will send an IPI and wait for other CPUs to
* respond.
* Delay of at least 10 seconds.
@ -254,72 +254,6 @@ void crash_kexec_secondary(struct pt_regs *regs)
cpus_in_sr = CPU_MASK_NONE;
}
#endif
#ifdef CONFIG_SPU_BASE
#include <asm/spu.h>
#include <asm/spu_priv1.h>
struct crash_spu_info {
struct spu *spu;
u32 saved_spu_runcntl_RW;
u32 saved_spu_status_R;
u32 saved_spu_npc_RW;
u64 saved_mfc_sr1_RW;
u64 saved_mfc_dar;
u64 saved_mfc_dsisr;
};
#define CRASH_NUM_SPUS 16 /* Enough for current hardware */
static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS];
static void crash_kexec_stop_spus(void)
{
struct spu *spu;
int i;
u64 tmp;
for (i = 0; i < CRASH_NUM_SPUS; i++) {
if (!crash_spu_info[i].spu)
continue;
spu = crash_spu_info[i].spu;
crash_spu_info[i].saved_spu_runcntl_RW =
in_be32(&spu->problem->spu_runcntl_RW);
crash_spu_info[i].saved_spu_status_R =
in_be32(&spu->problem->spu_status_R);
crash_spu_info[i].saved_spu_npc_RW =
in_be32(&spu->problem->spu_npc_RW);
crash_spu_info[i].saved_mfc_dar = spu_mfc_dar_get(spu);
crash_spu_info[i].saved_mfc_dsisr = spu_mfc_dsisr_get(spu);
tmp = spu_mfc_sr1_get(spu);
crash_spu_info[i].saved_mfc_sr1_RW = tmp;
tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
spu_mfc_sr1_set(spu, tmp);
__delay(200);
}
}
void crash_register_spus(struct list_head *list)
{
struct spu *spu;
list_for_each_entry(spu, list, full_list) {
if (WARN_ON(spu->number >= CRASH_NUM_SPUS))
continue;
crash_spu_info[spu->number].spu = spu;
}
}
#else
static inline void crash_kexec_stop_spus(void)
{
}
#endif /* CONFIG_SPU_BASE */
/*
* Register a function to be called on shutdown. Only use this if you
@ -439,8 +373,6 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
crash_shutdown_cpu = -1;
__debugger_fault_handler = old_handler;
crash_kexec_stop_spus();
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 0);
}

View File

@ -880,7 +880,18 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
*/
andi. r10,r9,MSR_EE
beq 1f
/*
* Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
* which is the stack frame here, we need to force a stack frame
* in case we came from user space.
*/
stwu r1,-32(r1)
mflr r0
stw r0,4(r1)
stwu r1,-32(r1)
bl trace_hardirqs_on
lwz r1,0(r1)
lwz r1,0(r1)
lwz r9,_MSR(r1)
1:
#endif /* CONFIG_TRACE_IRQFLAGS */

View File

@ -15,6 +15,7 @@
#include <linux/memblock.h>
#include <linux/of.h>
#include <linux/irq.h>
#include <linux/ftrace.h>
#include <asm/machdep.h>
#include <asm/prom.h>
@ -44,10 +45,7 @@ void machine_kexec_mask_interrupts(void) {
void machine_crash_shutdown(struct pt_regs *regs)
{
if (ppc_md.machine_crash_shutdown)
ppc_md.machine_crash_shutdown(regs);
else
default_machine_crash_shutdown(regs);
default_machine_crash_shutdown(regs);
}
/*
@ -65,8 +63,6 @@ int machine_kexec_prepare(struct kimage *image)
void machine_kexec_cleanup(struct kimage *image)
{
if (ppc_md.machine_kexec_cleanup)
ppc_md.machine_kexec_cleanup(image);
}
void arch_crash_save_vmcoreinfo(void)
@ -87,10 +83,13 @@ void arch_crash_save_vmcoreinfo(void)
*/
void machine_kexec(struct kimage *image)
{
if (ppc_md.machine_kexec)
ppc_md.machine_kexec(image);
else
default_machine_kexec(image);
int save_ftrace_enabled;
save_ftrace_enabled = __ftrace_enabled_save();
default_machine_kexec(image);
__ftrace_enabled_restore(save_ftrace_enabled);
/* Fall back to normal restart if we're still alive. */
machine_restart(NULL);

View File

@ -631,7 +631,7 @@ void show_regs(struct pt_regs * regs)
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr);
#else
printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);
printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr);
#endif
printk("TASK = %p[%d] '%s' THREAD: %p",
current, task_pid_nr(current), current->comm, task_thread_info(current));

View File

@ -256,31 +256,16 @@ static ssize_t rtas_flash_read(struct file *file, char __user *buf,
struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
struct rtas_update_flash_t *uf;
char msg[RTAS_MSG_MAXLEN];
int msglen;
uf = (struct rtas_update_flash_t *) dp->data;
uf = dp->data;
if (!strcmp(dp->name, FIRMWARE_FLASH_NAME)) {
get_flash_status_msg(uf->status, msg);
} else { /* FIRMWARE_UPDATE_NAME */
sprintf(msg, "%d\n", uf->status);
}
msglen = strlen(msg);
if (msglen > count)
msglen = count;
if (ppos && *ppos != 0)
return 0; /* be cheap */
if (!access_ok(VERIFY_WRITE, buf, msglen))
return -EINVAL;
if (copy_to_user(buf, msg, msglen))
return -EFAULT;
if (ppos)
*ppos = msglen;
return msglen;
return simple_read_from_buffer(buf, count, ppos, msg, strlen(msg));
}
/* constructor for flash_block_cache */
@ -394,26 +379,13 @@ static ssize_t manage_flash_read(struct file *file, char __user *buf,
char msg[RTAS_MSG_MAXLEN];
int msglen;
args_buf = (struct rtas_manage_flash_t *) dp->data;
args_buf = dp->data;
if (args_buf == NULL)
return 0;
msglen = sprintf(msg, "%d\n", args_buf->status);
if (msglen > count)
msglen = count;
if (ppos && *ppos != 0)
return 0; /* be cheap */
if (!access_ok(VERIFY_WRITE, buf, msglen))
return -EINVAL;
if (copy_to_user(buf, msg, msglen))
return -EFAULT;
if (ppos)
*ppos = msglen;
return msglen;
return simple_read_from_buffer(buf, count, ppos, msg, msglen);
}
static ssize_t manage_flash_write(struct file *file, const char __user *buf,
@ -495,24 +467,11 @@ static ssize_t validate_flash_read(struct file *file, char __user *buf,
char msg[RTAS_MSG_MAXLEN];
int msglen;
args_buf = (struct rtas_validate_flash_t *) dp->data;
args_buf = dp->data;
if (ppos && *ppos != 0)
return 0; /* be cheap */
msglen = get_validate_flash_msg(args_buf, msg);
if (msglen > count)
msglen = count;
if (!access_ok(VERIFY_WRITE, buf, msglen))
return -EINVAL;
if (copy_to_user(buf, msg, msglen))
return -EFAULT;
if (ppos)
*ppos = msglen;
return msglen;
return simple_read_from_buffer(buf, count, ppos, msg, msglen);
}
static ssize_t validate_flash_write(struct file *file, const char __user *buf,

View File

@ -160,7 +160,7 @@ static int log_rtas_len(char * buf)
/* rtas fixed header */
len = 8;
err = (struct rtas_error_log *)buf;
if (err->extended_log_length) {
if (err->extended && err->extended_log_length) {
/* extended header */
len += err->extended_log_length;

View File

@ -265,11 +265,26 @@ void accumulate_stolen_time(void)
{
u64 sst, ust;
sst = scan_dispatch_log(get_paca()->starttime_user);
ust = scan_dispatch_log(get_paca()->starttime);
get_paca()->system_time -= sst;
get_paca()->user_time -= ust;
get_paca()->stolen_time += ust + sst;
u8 save_soft_enabled = local_paca->soft_enabled;
u8 save_hard_enabled = local_paca->hard_enabled;
/* We are called early in the exception entry, before
* soft/hard_enabled are sync'ed to the expected state
* for the exception. We are hard disabled but the PACA
* needs to reflect that so various debug stuff doesn't
* complain
*/
local_paca->soft_enabled = 0;
local_paca->hard_enabled = 0;
sst = scan_dispatch_log(local_paca->starttime_user);
ust = scan_dispatch_log(local_paca->starttime);
local_paca->system_time -= sst;
local_paca->user_time -= ust;
local_paca->stolen_time += ust + sst;
local_paca->soft_enabled = save_soft_enabled;
local_paca->hard_enabled = save_hard_enabled;
}
static inline u64 calculate_stolen_time(u64 stop_tb)

View File

@ -626,12 +626,6 @@ void machine_check_exception(struct pt_regs *regs)
if (recover > 0)
return;
if (user_mode(regs)) {
regs->msr |= MSR_RI;
_exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
return;
}
#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
/* the qspan pci read routines can cause machine checks -- Cort
*
@ -643,16 +637,12 @@ void machine_check_exception(struct pt_regs *regs)
return;
#endif
if (debugger_fault_handler(regs)) {
regs->msr |= MSR_RI;
if (debugger_fault_handler(regs))
return;
}
if (check_io_access(regs))
return;
if (debugger_fault_handler(regs))
return;
die("Machine check", regs, SIGBUS);
/* Must die if the interrupt is not recoverable */

View File

@ -172,6 +172,25 @@ globl(ftr_fixup_test6_expected)
3: or 3,3,3
#if 0
/* Test that if we have a larger else case the assembler spots it and
* reports an error. #if 0'ed so as not to break the build normally.
*/
ftr_fixup_test7:
or 1,1,1
BEGIN_FTR_SECTION
or 2,2,2
or 2,2,2
or 2,2,2
FTR_SECTION_ELSE
or 3,3,3
or 3,3,3
or 3,3,3
or 3,3,3
ALT_FTR_SECTION_END(0, 1)
or 1,1,1
#endif
#define MAKE_MACRO_TEST(TYPE) \
globl(ftr_fixup_test_ ##TYPE##_macros) \
or 1,1,1; \

View File

@ -35,6 +35,8 @@
/* system i/o configuration register high */
#define MPC83XX_SICRH_OFFS 0x118
#define MPC8308_SICRH_USB_MASK 0x000c0000
#define MPC8308_SICRH_USB_ULPI 0x00040000
#define MPC834X_SICRH_USB_UTMI 0x00020000
#define MPC831X_SICRH_USB_MASK 0x000000e0
#define MPC831X_SICRH_USB_ULPI 0x000000a0

View File

@ -127,7 +127,8 @@ int mpc831x_usb_cfg(void)
/* Configure clock */
immr_node = of_get_parent(np);
if (immr_node && of_device_is_compatible(immr_node, "fsl,mpc8315-immr"))
if (immr_node && (of_device_is_compatible(immr_node, "fsl,mpc8315-immr") ||
of_device_is_compatible(immr_node, "fsl,mpc8308-immr")))
clrsetbits_be32(immap + MPC83XX_SCCR_OFFS,
MPC8315_SCCR_USB_MASK,
MPC8315_SCCR_USB_DRCM_01);
@ -138,7 +139,11 @@ int mpc831x_usb_cfg(void)
/* Configure pin mux for ULPI. There is no pin mux for UTMI */
if (prop && !strcmp(prop, "ulpi")) {
if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) {
if (of_device_is_compatible(immr_node, "fsl,mpc8308-immr")) {
clrsetbits_be32(immap + MPC83XX_SICRH_OFFS,
MPC8308_SICRH_USB_MASK,
MPC8308_SICRH_USB_ULPI);
} else if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) {
clrsetbits_be32(immap + MPC83XX_SICRL_OFFS,
MPC8315_SICRL_USB_MASK,
MPC8315_SICRL_USB_ULPI);
@ -173,6 +178,9 @@ int mpc831x_usb_cfg(void)
!strcmp(prop, "utmi"))) {
u32 refsel;
if (of_device_is_compatible(immr_node, "fsl,mpc8308-immr"))
goto out;
if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr"))
refsel = CONTROL_REFSEL_24MHZ;
else
@ -186,9 +194,11 @@ int mpc831x_usb_cfg(void)
temp = CONTROL_PHY_CLK_SEL_ULPI;
#ifdef CONFIG_USB_OTG
/* Set OTG_PORT */
dr_mode = of_get_property(np, "dr_mode", NULL);
if (dr_mode && !strcmp(dr_mode, "otg"))
temp |= CONTROL_OTG_PORT;
if (!of_device_is_compatible(immr_node, "fsl,mpc8308-immr")) {
dr_mode = of_get_property(np, "dr_mode", NULL);
if (dr_mode && !strcmp(dr_mode, "otg"))
temp |= CONTROL_OTG_PORT;
}
#endif /* CONFIG_USB_OTG */
out_be32(usb_regs + FSL_USB2_CONTROL_OFFS, temp);
} else {
@ -196,6 +206,7 @@ int mpc831x_usb_cfg(void)
ret = -EINVAL;
}
out:
iounmap(usb_regs);
of_node_put(np);
return ret;

View File

@ -39,8 +39,6 @@ struct spu_gov_info_struct {
};
static DEFINE_PER_CPU(struct spu_gov_info_struct, spu_gov_info);
static struct workqueue_struct *kspugov_wq;
static int calc_freq(struct spu_gov_info_struct *info)
{
int cpu;
@ -71,14 +69,14 @@ static void spu_gov_work(struct work_struct *work)
__cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H);
delay = usecs_to_jiffies(info->poll_int);
queue_delayed_work_on(info->policy->cpu, kspugov_wq, &info->work, delay);
schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
}
static void spu_gov_init_work(struct spu_gov_info_struct *info)
{
int delay = usecs_to_jiffies(info->poll_int);
INIT_DELAYED_WORK_DEFERRABLE(&info->work, spu_gov_work);
queue_delayed_work_on(info->policy->cpu, kspugov_wq, &info->work, delay);
schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
}
static void spu_gov_cancel_work(struct spu_gov_info_struct *info)
@ -152,27 +150,15 @@ static int __init spu_gov_init(void)
{
int ret;
kspugov_wq = create_workqueue("kspugov");
if (!kspugov_wq) {
printk(KERN_ERR "creation of kspugov failed\n");
ret = -EFAULT;
goto out;
}
ret = cpufreq_register_governor(&spu_governor);
if (ret) {
if (ret)
printk(KERN_ERR "registration of governor failed\n");
destroy_workqueue(kspugov_wq);
goto out;
}
out:
return ret;
}
static void __exit spu_gov_exit(void)
{
cpufreq_unregister_governor(&spu_governor);
destroy_workqueue(kspugov_wq);
}

View File

@ -145,9 +145,4 @@ define_machine(qpace) {
.calibrate_decr = generic_calibrate_decr,
.progress = qpace_progress,
.init_IRQ = iic_init_IRQ,
#ifdef CONFIG_KEXEC
.machine_kexec = default_machine_kexec,
.machine_kexec_prepare = default_machine_kexec_prepare,
.machine_crash_shutdown = default_machine_crash_shutdown,
#endif
};

View File

@ -37,6 +37,7 @@
#include <asm/spu_csa.h>
#include <asm/xmon.h>
#include <asm/prom.h>
#include <asm/kexec.h>
const struct spu_management_ops *spu_management_ops;
EXPORT_SYMBOL_GPL(spu_management_ops);
@ -727,6 +728,75 @@ static ssize_t spu_stat_show(struct sys_device *sysdev,
static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL);
#ifdef CONFIG_KEXEC
struct crash_spu_info {
struct spu *spu;
u32 saved_spu_runcntl_RW;
u32 saved_spu_status_R;
u32 saved_spu_npc_RW;
u64 saved_mfc_sr1_RW;
u64 saved_mfc_dar;
u64 saved_mfc_dsisr;
};
#define CRASH_NUM_SPUS 16 /* Enough for current hardware */
static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS];
static void crash_kexec_stop_spus(void)
{
struct spu *spu;
int i;
u64 tmp;
for (i = 0; i < CRASH_NUM_SPUS; i++) {
if (!crash_spu_info[i].spu)
continue;
spu = crash_spu_info[i].spu;
crash_spu_info[i].saved_spu_runcntl_RW =
in_be32(&spu->problem->spu_runcntl_RW);
crash_spu_info[i].saved_spu_status_R =
in_be32(&spu->problem->spu_status_R);
crash_spu_info[i].saved_spu_npc_RW =
in_be32(&spu->problem->spu_npc_RW);
crash_spu_info[i].saved_mfc_dar = spu_mfc_dar_get(spu);
crash_spu_info[i].saved_mfc_dsisr = spu_mfc_dsisr_get(spu);
tmp = spu_mfc_sr1_get(spu);
crash_spu_info[i].saved_mfc_sr1_RW = tmp;
tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
spu_mfc_sr1_set(spu, tmp);
__delay(200);
}
}
static void crash_register_spus(struct list_head *list)
{
struct spu *spu;
int ret;
list_for_each_entry(spu, list, full_list) {
if (WARN_ON(spu->number >= CRASH_NUM_SPUS))
continue;
crash_spu_info[spu->number].spu = spu;
}
ret = crash_shutdown_register(&crash_kexec_stop_spus);
if (ret)
printk(KERN_ERR "Could not register SPU crash handler");
}
#else
static inline void crash_register_spus(struct list_head *list)
{
}
#endif
static int __init init_spu_base(void)
{
int i, ret = 0;

View File

@ -219,24 +219,17 @@ spufs_mem_write(struct file *file, const char __user *buffer,
loff_t pos = *ppos;
int ret;
if (pos < 0)
return -EINVAL;
if (pos > LS_SIZE)
return -EFBIG;
if (size > LS_SIZE - pos)
size = LS_SIZE - pos;
ret = spu_acquire(ctx);
if (ret)
return ret;
local_store = ctx->ops->get_ls(ctx);
ret = copy_from_user(local_store + pos, buffer, size);
size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size);
spu_release(ctx);
if (ret)
return -EFAULT;
*ppos = pos + size;
return size;
}
@ -574,18 +567,15 @@ spufs_regs_write(struct file *file, const char __user *buffer,
if (*pos >= sizeof(lscsa->gprs))
return -EFBIG;
size = min_t(ssize_t, sizeof(lscsa->gprs) - *pos, size);
*pos += size;
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
ret = copy_from_user((char *)lscsa->gprs + *pos - size,
buffer, size) ? -EFAULT : size;
size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos,
buffer, size);
spu_release_saved(ctx);
return ret;
return size;
}
static const struct file_operations spufs_regs_fops = {
@ -630,18 +620,15 @@ spufs_fpcr_write(struct file *file, const char __user * buffer,
if (*pos >= sizeof(lscsa->fpcr))
return -EFBIG;
size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
ret = spu_acquire_saved(ctx);
if (ret)
return ret;
*pos += size;
ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
buffer, size) ? -EFAULT : size;
size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos,
buffer, size);
spu_release_saved(ctx);
return ret;
return size;
}
static const struct file_operations spufs_fpcr_fops = {

View File

@ -75,14 +75,6 @@ static void gamecube_shutdown(void)
flipper_quiesce();
}
#ifdef CONFIG_KEXEC
static int gamecube_kexec_prepare(struct kimage *image)
{
return 0;
}
#endif /* CONFIG_KEXEC */
define_machine(gamecube) {
.name = "gamecube",
.probe = gamecube_probe,
@ -95,9 +87,6 @@ define_machine(gamecube) {
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
.machine_shutdown = gamecube_shutdown,
#ifdef CONFIG_KEXEC
.machine_kexec_prepare = gamecube_kexec_prepare,
#endif
};

View File

@ -18,7 +18,6 @@
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/seq_file.h>
#include <linux/kexec.h>
#include <linux/of_platform.h>
#include <linux/memblock.h>
#include <mm/mmu_decl.h>
@ -226,13 +225,6 @@ static void wii_shutdown(void)
flipper_quiesce();
}
#ifdef CONFIG_KEXEC
static int wii_machine_kexec_prepare(struct kimage *image)
{
return 0;
}
#endif /* CONFIG_KEXEC */
define_machine(wii) {
.name = "wii",
.probe = wii_probe,
@ -246,9 +238,6 @@ define_machine(wii) {
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
.machine_shutdown = wii_shutdown,
#ifdef CONFIG_KEXEC
.machine_kexec_prepare = wii_machine_kexec_prepare,
#endif
};
static struct of_device_id wii_of_bus[] = {

View File

@ -61,13 +61,3 @@ void __init setup_kexec_cpu_down_xics(void)
{
ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_xics;
}
static int __init pseries_kexec_setup(void)
{
ppc_md.machine_kexec = default_machine_kexec;
ppc_md.machine_kexec_prepare = default_machine_kexec_prepare;
ppc_md.machine_crash_shutdown = default_machine_crash_shutdown;
return 0;
}
machine_device_initcall(pseries, pseries_kexec_setup);

View File

@ -54,7 +54,8 @@
static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX];
static DEFINE_SPINLOCK(ras_log_buf_lock);
static char mce_data_buf[RTAS_ERROR_LOG_MAX];
static char global_mce_data_buf[RTAS_ERROR_LOG_MAX];
static DEFINE_PER_CPU(__u64, mce_data_buf);
static int ras_get_sensor_state_token;
static int ras_check_exception_token;
@ -196,12 +197,24 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
/* Get the error information for errors coming through the
/*
* Some versions of FWNMI place the buffer inside the 4kB page starting at
* 0x7000. Other versions place it inside the rtas buffer. We check both.
*/
#define VALID_FWNMI_BUFFER(A) \
((((A) >= 0x7000) && ((A) < 0x7ff0)) || \
(((A) >= rtas.base) && ((A) < (rtas.base + rtas.size - 16))))
/*
* Get the error information for errors coming through the
* FWNMI vectors. The pt_regs' r3 will be updated to reflect
* the actual r3 if possible, and a ptr to the error log entry
* will be returned if found.
*
* The mce_data_buf does not have any locks or protection around it,
* If the RTAS error is not of the extended type, then we put it in a per
* cpu 64bit buffer. If it is the extended type we use global_mce_data_buf.
*
* The global_mce_data_buf does not have any locks or protection around it,
* if a second machine check comes in, or a system reset is done
* before we have logged the error, then we will get corruption in the
* error log. This is preferable over holding off on calling
@ -210,20 +223,31 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
*/
static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
{
unsigned long errdata = regs->gpr[3];
struct rtas_error_log *errhdr = NULL;
unsigned long *savep;
struct rtas_error_log *h, *errhdr = NULL;
if ((errdata >= 0x7000 && errdata < 0x7fff0) ||
(errdata >= rtas.base && errdata < rtas.base + rtas.size - 16)) {
savep = __va(errdata);
regs->gpr[3] = savep[0]; /* restore original r3 */
memset(mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
memcpy(mce_data_buf, (char *)(savep + 1), RTAS_ERROR_LOG_MAX);
errhdr = (struct rtas_error_log *)mce_data_buf;
} else {
printk("FWNMI: corrupt r3\n");
if (!VALID_FWNMI_BUFFER(regs->gpr[3])) {
printk(KERN_ERR "FWNMI: corrupt r3\n");
return NULL;
}
savep = __va(regs->gpr[3]);
regs->gpr[3] = savep[0]; /* restore original r3 */
/* If it isn't an extended log we can use the per cpu 64bit buffer */
h = (struct rtas_error_log *)&savep[1];
if (!h->extended) {
memcpy(&__get_cpu_var(mce_data_buf), h, sizeof(__u64));
errhdr = (struct rtas_error_log *)&__get_cpu_var(mce_data_buf);
} else {
int len;
len = max_t(int, 8+h->extended_log_length, RTAS_ERROR_LOG_MAX);
memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX);
memcpy(global_mce_data_buf, h, len);
errhdr = (struct rtas_error_log *)global_mce_data_buf;
}
return errhdr;
}
@ -235,7 +259,7 @@ static void fwnmi_release_errinfo(void)
{
int ret = rtas_call(rtas_token("ibm,nmi-interlock"), 0, 1, NULL);
if (ret != 0)
printk("FWNMI: nmi-interlock failed: %d\n", ret);
printk(KERN_ERR "FWNMI: nmi-interlock failed: %d\n", ret);
}
int pSeries_system_reset_exception(struct pt_regs *regs)
@ -259,31 +283,43 @@ int pSeries_system_reset_exception(struct pt_regs *regs)
* Return 1 if corrected (or delivered a signal).
* Return 0 if there is nothing we can do.
*/
static int recover_mce(struct pt_regs *regs, struct rtas_error_log * err)
static int recover_mce(struct pt_regs *regs, struct rtas_error_log *err)
{
int nonfatal = 0;
int recovered = 0;
if (err->disposition == RTAS_DISP_FULLY_RECOVERED) {
if (!(regs->msr & MSR_RI)) {
/* If MSR_RI isn't set, we cannot recover */
recovered = 0;
} else if (err->disposition == RTAS_DISP_FULLY_RECOVERED) {
/* Platform corrected itself */
nonfatal = 1;
} else if ((regs->msr & MSR_RI) &&
user_mode(regs) &&
err->severity == RTAS_SEVERITY_ERROR_SYNC &&
err->disposition == RTAS_DISP_NOT_RECOVERED &&
err->target == RTAS_TARGET_MEMORY &&
err->type == RTAS_TYPE_ECC_UNCORR &&
!(current->pid == 0 || is_global_init(current))) {
/* Kill off a user process with an ECC error */
printk(KERN_ERR "MCE: uncorrectable ecc error for pid %d\n",
current->pid);
/* XXX something better for ECC error? */
_exception(SIGBUS, regs, BUS_ADRERR, regs->nip);
nonfatal = 1;
recovered = 1;
} else if (err->disposition == RTAS_DISP_LIMITED_RECOVERY) {
/* Platform corrected itself but could be degraded */
printk(KERN_ERR "MCE: limited recovery, system may "
"be degraded\n");
recovered = 1;
} else if (user_mode(regs) && !is_global_init(current) &&
err->severity == RTAS_SEVERITY_ERROR_SYNC) {
/*
* If we received a synchronous error when in userspace
* kill the task. Firmware may report details of the fail
* asynchronously, so we can't rely on the target and type
* fields being valid here.
*/
printk(KERN_ERR "MCE: uncorrectable error, killing task "
"%s:%d\n", current->comm, current->pid);
_exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
recovered = 1;
}
log_error((char *)err, ERR_TYPE_RTAS_LOG, !nonfatal);
log_error((char *)err, ERR_TYPE_RTAS_LOG, 0);
return nonfatal;
return recovered;
}
/*

View File

@ -1555,8 +1555,6 @@ int fsl_rio_setup(struct platform_device *dev)
saved_mcheck_exception = ppc_md.machine_check_exception;
ppc_md.machine_check_exception = fsl_rio_mcheck_exception;
#endif
/* Ensure that RFXE is set */
mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000));
return 0;
err:

View File

@ -674,7 +674,8 @@ void mpic_unmask_irq(unsigned int irq)
/* make sure mask gets to controller before we return to user */
do {
if (!loops--) {
printk(KERN_ERR "mpic_enable_irq timeout\n");
printk(KERN_ERR "%s: timeout on hwirq %u\n",
__func__, src);
break;
}
} while(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK);
@ -695,7 +696,8 @@ void mpic_mask_irq(unsigned int irq)
/* make sure mask gets to controller before we return to user */
do {
if (!loops--) {
printk(KERN_ERR "mpic_enable_irq timeout\n");
printk(KERN_ERR "%s: timeout on hwirq %u\n",
__func__, src);
break;
}
} while(!(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK));

View File

@ -443,7 +443,7 @@ static int fan_read_reg(int reg, unsigned char *buf, int nb)
tries = 0;
for (;;) {
nr = i2c_master_recv(fcu, buf, nb);
if (nr > 0 || (nr < 0 && nr != ENODEV) || tries >= 100)
if (nr > 0 || (nr < 0 && nr != -ENODEV) || tries >= 100)
break;
msleep(10);
++tries;
@ -464,7 +464,7 @@ static int fan_write_reg(int reg, const unsigned char *ptr, int nb)
tries = 0;
for (;;) {
nw = i2c_master_send(fcu, buf, nb);
if (nw > 0 || (nw < 0 && nw != EIO) || tries >= 100)
if (nw > 0 || (nw < 0 && nw != -EIO) || tries >= 100)
break;
msleep(10);
++tries;