Merge branch 'depends/rmk/devel-stable' into next/boards

This commit is contained in:
Olof Johansson 2011-12-16 14:50:01 -08:00
commit b94ee062b5
402 changed files with 3099 additions and 3598 deletions

View File

@ -51,15 +51,14 @@ ffc00000 ffefffff DMA memory mapping region. Memory returned
ff000000 ffbfffff Reserved for future expansion of DMA
mapping region.
VMALLOC_END feffffff Free for platform use, recommended.
VMALLOC_END must be aligned to a 2MB
boundary.
VMALLOC_START VMALLOC_END-1 vmalloc() / ioremap() space.
Memory returned by vmalloc/ioremap will
be dynamically placed in this region.
VMALLOC_START may be based upon the value
of the high_memory variable.
Machine specific static mappings are also
located here through iotable_init().
VMALLOC_START is based upon the value
of the high_memory variable, and VMALLOC_END
is equal to 0xff000000.
PAGE_OFFSET high_memory-1 Kernel direct-mapped RAM region.
This maps the platforms RAM, and typically

View File

@ -42,6 +42,10 @@ Optional
- interrupts : Interrupt source of the parent interrupt controller. Only
present on secondary GICs.
- cpu-offset : per-cpu offset within the distributor and cpu interface
regions, used when the GIC doesn't have banked registers. The offset is
cpu-offset * cpu-nr.
Example:
intc: interrupt-controller@fff11000 {

View File

@ -0,0 +1,29 @@
* ARM Vectored Interrupt Controller
One or more Vectored Interrupt Controllers (VIC's) can be connected in an ARM
system for interrupt routing. For multiple controllers they can either be
nested or have the outputs wire-OR'd together.
Required properties:
- compatible : should be one of
"arm,pl190-vic"
"arm,pl192-vic"
- interrupt-controller : Identifies the node as an interrupt controller
- #interrupt-cells : The number of cells to define the interrupts. Must be 1 as
the VIC has no configuration options for interrupt sources. The cell is a u32
and defines the interrupt number.
- reg : The register bank for the VIC.
Optional properties:
- interrupts : Interrupt source for parent controllers if the VIC is nested.
Example:
vic0: interrupt-controller@60000 {
compatible = "arm,pl192-vic";
interrupt-controller;
#interrupt-cells = <1>;
reg = <0x60000 0x1000>;
};

View File

@ -1971,7 +1971,7 @@ endchoice
config XIP_KERNEL
bool "Kernel Execute-In-Place from ROM"
depends on !ZBOOT_ROM
depends on !ZBOOT_ROM && !ARM_LPAE
help
Execute-In-Place allows the kernel to run from non-volatile storage
directly addressable by the CPU, such as NOR flash. This saves RAM
@ -2001,7 +2001,7 @@ config XIP_PHYS_ADDR
config KEXEC
bool "Kexec system call (EXPERIMENTAL)"
depends on EXPERIMENTAL
depends on EXPERIMENTAL && (!SMP || HOTPLUG_CPU)
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot

View File

@ -659,6 +659,7 @@ __armv7_mmu_cache_on:
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
#endif
mcr p15, 0, r0, c7, c5, 4 @ ISB
mcr p15, 0, r0, c1, c0, 0 @ load control register
mrc p15, 0, r0, c1, c0, 0 @ and read it back
mov r0, #0

View File

@ -1,8 +1,14 @@
config ARM_GIC
select IRQ_DOMAIN
select MULTI_IRQ_HANDLER
bool
config GIC_NON_BANKED
bool
config ARM_VIC
select IRQ_DOMAIN
select MULTI_IRQ_HANDLER
bool
config ARM_VIC_NR

View File

@ -40,13 +40,36 @@
#include <linux/slab.h>
#include <asm/irq.h>
#include <asm/exception.h>
#include <asm/mach/irq.h>
#include <asm/hardware/gic.h>
static DEFINE_RAW_SPINLOCK(irq_controller_lock);
union gic_base {
void __iomem *common_base;
void __percpu __iomem **percpu_base;
};
/* Address of GIC 0 CPU interface */
void __iomem *gic_cpu_base_addr __read_mostly;
struct gic_chip_data {
unsigned int irq_offset;
union gic_base dist_base;
union gic_base cpu_base;
#ifdef CONFIG_CPU_PM
u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
u32 __percpu *saved_ppi_enable;
u32 __percpu *saved_ppi_conf;
#endif
#ifdef CONFIG_IRQ_DOMAIN
struct irq_domain domain;
#endif
unsigned int gic_irqs;
#ifdef CONFIG_GIC_NON_BANKED
void __iomem *(*get_base)(union gic_base *);
#endif
};
static DEFINE_RAW_SPINLOCK(irq_controller_lock);
/*
* Supported arch specific GIC irq extension.
@ -67,16 +90,48 @@ struct irq_chip gic_arch_extn = {
static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
#ifdef CONFIG_GIC_NON_BANKED
static void __iomem *gic_get_percpu_base(union gic_base *base)
{
return *__this_cpu_ptr(base->percpu_base);
}
static void __iomem *gic_get_common_base(union gic_base *base)
{
return base->common_base;
}
static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
{
return data->get_base(&data->dist_base);
}
static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
{
return data->get_base(&data->cpu_base);
}
static inline void gic_set_base_accessor(struct gic_chip_data *data,
void __iomem *(*f)(union gic_base *))
{
data->get_base = f;
}
#else
#define gic_data_dist_base(d) ((d)->dist_base.common_base)
#define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
#define gic_set_base_accessor(d,f)
#endif
static inline void __iomem *gic_dist_base(struct irq_data *d)
{
struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
return gic_data->dist_base;
return gic_data_dist_base(gic_data);
}
static inline void __iomem *gic_cpu_base(struct irq_data *d)
{
struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
return gic_data->cpu_base;
return gic_data_cpu_base(gic_data);
}
static inline unsigned int gic_irq(struct irq_data *d)
@ -215,6 +270,32 @@ static int gic_set_wake(struct irq_data *d, unsigned int on)
#define gic_set_wake NULL
#endif
asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
{
u32 irqstat, irqnr;
struct gic_chip_data *gic = &gic_data[0];
void __iomem *cpu_base = gic_data_cpu_base(gic);
do {
irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
irqnr = irqstat & ~0x1c00;
if (likely(irqnr > 15 && irqnr < 1021)) {
irqnr = irq_domain_to_irq(&gic->domain, irqnr);
handle_IRQ(irqnr, regs);
continue;
}
if (irqnr < 16) {
writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
#ifdef CONFIG_SMP
handle_IPI(irqnr, regs);
#endif
continue;
}
break;
} while (1);
}
static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
{
struct gic_chip_data *chip_data = irq_get_handler_data(irq);
@ -225,7 +306,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
chained_irq_enter(chip, desc);
raw_spin_lock(&irq_controller_lock);
status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK);
status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
raw_spin_unlock(&irq_controller_lock);
gic_irq = (status & 0x3ff);
@ -270,7 +351,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
u32 cpumask;
unsigned int gic_irqs = gic->gic_irqs;
struct irq_domain *domain = &gic->domain;
void __iomem *base = gic->dist_base;
void __iomem *base = gic_data_dist_base(gic);
u32 cpu = 0;
#ifdef CONFIG_SMP
@ -330,8 +411,8 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
{
void __iomem *dist_base = gic->dist_base;
void __iomem *base = gic->cpu_base;
void __iomem *dist_base = gic_data_dist_base(gic);
void __iomem *base = gic_data_cpu_base(gic);
int i;
/*
@ -368,7 +449,7 @@ static void gic_dist_save(unsigned int gic_nr)
BUG();
gic_irqs = gic_data[gic_nr].gic_irqs;
dist_base = gic_data[gic_nr].dist_base;
dist_base = gic_data_dist_base(&gic_data[gic_nr]);
if (!dist_base)
return;
@ -403,7 +484,7 @@ static void gic_dist_restore(unsigned int gic_nr)
BUG();
gic_irqs = gic_data[gic_nr].gic_irqs;
dist_base = gic_data[gic_nr].dist_base;
dist_base = gic_data_dist_base(&gic_data[gic_nr]);
if (!dist_base)
return;
@ -439,8 +520,8 @@ static void gic_cpu_save(unsigned int gic_nr)
if (gic_nr >= MAX_GIC_NR)
BUG();
dist_base = gic_data[gic_nr].dist_base;
cpu_base = gic_data[gic_nr].cpu_base;
dist_base = gic_data_dist_base(&gic_data[gic_nr]);
cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
if (!dist_base || !cpu_base)
return;
@ -465,8 +546,8 @@ static void gic_cpu_restore(unsigned int gic_nr)
if (gic_nr >= MAX_GIC_NR)
BUG();
dist_base = gic_data[gic_nr].dist_base;
cpu_base = gic_data[gic_nr].cpu_base;
dist_base = gic_data_dist_base(&gic_data[gic_nr]);
cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
if (!dist_base || !cpu_base)
return;
@ -491,6 +572,11 @@ static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
int i;
for (i = 0; i < MAX_GIC_NR; i++) {
#ifdef CONFIG_GIC_NON_BANKED
/* Skip over unused GICs */
if (!gic_data[i].get_base)
continue;
#endif
switch (cmd) {
case CPU_PM_ENTER:
gic_cpu_save(i);
@ -564,8 +650,9 @@ const struct irq_domain_ops gic_irq_domain_ops = {
#endif
};
void __init gic_init(unsigned int gic_nr, int irq_start,
void __iomem *dist_base, void __iomem *cpu_base)
void __init gic_init_bases(unsigned int gic_nr, int irq_start,
void __iomem *dist_base, void __iomem *cpu_base,
u32 percpu_offset)
{
struct gic_chip_data *gic;
struct irq_domain *domain;
@ -575,8 +662,36 @@ void __init gic_init(unsigned int gic_nr, int irq_start,
gic = &gic_data[gic_nr];
domain = &gic->domain;
gic->dist_base = dist_base;
gic->cpu_base = cpu_base;
#ifdef CONFIG_GIC_NON_BANKED
if (percpu_offset) { /* Frankein-GIC without banked registers... */
unsigned int cpu;
gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
if (WARN_ON(!gic->dist_base.percpu_base ||
!gic->cpu_base.percpu_base)) {
free_percpu(gic->dist_base.percpu_base);
free_percpu(gic->cpu_base.percpu_base);
return;
}
for_each_possible_cpu(cpu) {
unsigned long offset = percpu_offset * cpu_logical_map(cpu);
*per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
*per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
}
gic_set_base_accessor(gic, gic_get_percpu_base);
} else
#endif
{ /* Normal, sane GIC... */
WARN(percpu_offset,
"GIC_NON_BANKED not enabled, ignoring %08x offset!",
percpu_offset);
gic->dist_base.common_base = dist_base;
gic->cpu_base.common_base = cpu_base;
gic_set_base_accessor(gic, gic_get_common_base);
}
/*
* For primary GICs, skip over SGIs.
@ -584,8 +699,6 @@ void __init gic_init(unsigned int gic_nr, int irq_start,
*/
domain->hwirq_base = 32;
if (gic_nr == 0) {
gic_cpu_base_addr = cpu_base;
if ((irq_start & 31) > 0) {
domain->hwirq_base = 16;
if (irq_start != -1)
@ -597,7 +710,7 @@ void __init gic_init(unsigned int gic_nr, int irq_start,
* Find out how many interrupts are supported.
* The GIC only supports up to 1020 interrupt sources.
*/
gic_irqs = readl_relaxed(dist_base + GIC_DIST_CTR) & 0x1f;
gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f;
gic_irqs = (gic_irqs + 1) * 32;
if (gic_irqs > 1020)
gic_irqs = 1020;
@ -645,7 +758,7 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
dsb();
/* this always happens on GIC0 */
writel_relaxed(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);
writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
}
#endif
@ -656,6 +769,7 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
{
void __iomem *cpu_base;
void __iomem *dist_base;
u32 percpu_offset;
int irq;
struct irq_domain *domain = &gic_data[gic_cnt].domain;
@ -668,9 +782,12 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
cpu_base = of_iomap(node, 1);
WARN(!cpu_base, "unable to map gic cpu registers\n");
if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
percpu_offset = 0;
domain->of_node = of_node_get(node);
gic_init(gic_cnt, -1, dist_base, cpu_base);
gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset);
if (parent) {
irq = irq_of_parse_and_map(node, 0);

View File

@ -19,17 +19,22 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/export.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/io.h>
#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/syscore_ops.h>
#include <linux/device.h>
#include <linux/amba/bus.h>
#include <asm/exception.h>
#include <asm/mach/irq.h>
#include <asm/hardware/vic.h>
#ifdef CONFIG_PM
/**
* struct vic_device - VIC PM device
* @irq: The IRQ number for the base of the VIC.
@ -40,6 +45,7 @@
* @int_enable: Save for VIC_INT_ENABLE.
* @soft_int: Save for VIC_INT_SOFT.
* @protect: Save for VIC_PROTECT.
* @domain: The IRQ domain for the VIC.
*/
struct vic_device {
void __iomem *base;
@ -50,13 +56,13 @@ struct vic_device {
u32 int_enable;
u32 soft_int;
u32 protect;
struct irq_domain domain;
};
/* we cannot allocate memory when VICs are initially registered */
static struct vic_device vic_devices[CONFIG_ARM_VIC_NR];
static int vic_id;
#endif /* CONFIG_PM */
/**
* vic_init2 - common initialisation code
@ -156,39 +162,50 @@ static int __init vic_pm_init(void)
return 0;
}
late_initcall(vic_pm_init);
#endif /* CONFIG_PM */
/**
* vic_pm_register - Register a VIC for later power management control
* vic_register() - Register a VIC.
* @base: The base address of the VIC.
* @irq: The base IRQ for the VIC.
* @resume_sources: bitmask of interrupts allowed for resume sources.
* @node: The device tree node associated with the VIC.
*
* Register the VIC with the system device tree so that it can be notified
* of suspend and resume requests and ensure that the correct actions are
* taken to re-instate the settings on resume.
*
* This also configures the IRQ domain for the VIC.
*/
static void __init vic_pm_register(void __iomem *base, unsigned int irq, u32 resume_sources)
static void __init vic_register(void __iomem *base, unsigned int irq,
u32 resume_sources, struct device_node *node)
{
struct vic_device *v;
if (vic_id >= ARRAY_SIZE(vic_devices))
if (vic_id >= ARRAY_SIZE(vic_devices)) {
printk(KERN_ERR "%s: too few VICs, increase CONFIG_ARM_VIC_NR\n", __func__);
else {
v = &vic_devices[vic_id];
v->base = base;
v->resume_sources = resume_sources;
v->irq = irq;
vic_id++;
return;
}
v = &vic_devices[vic_id];
v->base = base;
v->resume_sources = resume_sources;
v->irq = irq;
vic_id++;
v->domain.irq_base = irq;
v->domain.nr_irq = 32;
#ifdef CONFIG_OF_IRQ
v->domain.of_node = of_node_get(node);
v->domain.ops = &irq_domain_simple_ops;
#endif /* CONFIG_OF */
irq_domain_add(&v->domain);
}
#else
static inline void vic_pm_register(void __iomem *base, unsigned int irq, u32 arg1) { }
#endif /* CONFIG_PM */
static void vic_ack_irq(struct irq_data *d)
{
void __iomem *base = irq_data_get_irq_chip_data(d);
unsigned int irq = d->irq & 31;
unsigned int irq = d->hwirq;
writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
/* moreover, clear the soft-triggered, in case it was the reason */
writel(1 << irq, base + VIC_INT_SOFT_CLEAR);
@ -197,14 +214,14 @@ static void vic_ack_irq(struct irq_data *d)
static void vic_mask_irq(struct irq_data *d)
{
void __iomem *base = irq_data_get_irq_chip_data(d);
unsigned int irq = d->irq & 31;
unsigned int irq = d->hwirq;
writel(1 << irq, base + VIC_INT_ENABLE_CLEAR);
}
static void vic_unmask_irq(struct irq_data *d)
{
void __iomem *base = irq_data_get_irq_chip_data(d);
unsigned int irq = d->irq & 31;
unsigned int irq = d->hwirq;
writel(1 << irq, base + VIC_INT_ENABLE);
}
@ -226,7 +243,7 @@ static struct vic_device *vic_from_irq(unsigned int irq)
static int vic_set_wake(struct irq_data *d, unsigned int on)
{
struct vic_device *v = vic_from_irq(d->irq);
unsigned int off = d->irq & 31;
unsigned int off = d->hwirq;
u32 bit = 1 << off;
if (!v)
@ -301,7 +318,7 @@ static void __init vic_set_irq_sources(void __iomem *base,
* and 020 within the page. We call this "second block".
*/
static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
u32 vic_sources)
u32 vic_sources, struct device_node *node)
{
unsigned int i;
int vic_2nd_block = ((unsigned long)base & ~PAGE_MASK) != 0;
@ -328,17 +345,12 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
}
vic_set_irq_sources(base, irq_start, vic_sources);
vic_register(base, irq_start, 0, node);
}
/**
* vic_init - initialise a vectored interrupt controller
* @base: iomem base address
* @irq_start: starting interrupt number, must be muliple of 32
* @vic_sources: bitmask of interrupt sources to allow
* @resume_sources: bitmask of interrupt sources to allow for resume
*/
void __init vic_init(void __iomem *base, unsigned int irq_start,
u32 vic_sources, u32 resume_sources)
static void __init __vic_init(void __iomem *base, unsigned int irq_start,
u32 vic_sources, u32 resume_sources,
struct device_node *node)
{
unsigned int i;
u32 cellid = 0;
@ -356,7 +368,7 @@ void __init vic_init(void __iomem *base, unsigned int irq_start,
switch(vendor) {
case AMBA_VENDOR_ST:
vic_init_st(base, irq_start, vic_sources);
vic_init_st(base, irq_start, vic_sources, node);
return;
default:
printk(KERN_WARNING "VIC: unknown vendor, continuing anyways\n");
@ -375,5 +387,81 @@ void __init vic_init(void __iomem *base, unsigned int irq_start,
vic_set_irq_sources(base, irq_start, vic_sources);
vic_pm_register(base, irq_start, resume_sources);
vic_register(base, irq_start, resume_sources, node);
}
/**
* vic_init() - initialise a vectored interrupt controller
* @base: iomem base address
* @irq_start: starting interrupt number, must be muliple of 32
* @vic_sources: bitmask of interrupt sources to allow
* @resume_sources: bitmask of interrupt sources to allow for resume
*/
void __init vic_init(void __iomem *base, unsigned int irq_start,
u32 vic_sources, u32 resume_sources)
{
__vic_init(base, irq_start, vic_sources, resume_sources, NULL);
}
#ifdef CONFIG_OF
int __init vic_of_init(struct device_node *node, struct device_node *parent)
{
void __iomem *regs;
int irq_base;
if (WARN(parent, "non-root VICs are not supported"))
return -EINVAL;
regs = of_iomap(node, 0);
if (WARN_ON(!regs))
return -EIO;
irq_base = irq_alloc_descs(-1, 0, 32, numa_node_id());
if (WARN_ON(irq_base < 0))
goto out_unmap;
__vic_init(regs, irq_base, ~0, ~0, node);
return 0;
out_unmap:
iounmap(regs);
return -EIO;
}
#endif /* CONFIG OF */
/*
* Handle each interrupt in a single VIC. Returns non-zero if we've
* handled at least one interrupt. This does a single read of the
* status register and handles all interrupts in order from LSB first.
*/
static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
{
u32 stat, irq;
int handled = 0;
stat = readl_relaxed(vic->base + VIC_IRQ_STATUS);
while (stat) {
irq = ffs(stat) - 1;
handle_IRQ(irq_domain_to_irq(&vic->domain, irq), regs);
stat &= ~(1 << irq);
handled = 1;
}
return handled;
}
/*
* Keep iterating over all registered VIC's until there are no pending
* interrupts.
*/
asmlinkage void __exception_irq_entry vic_handle_irq(struct pt_regs *regs)
{
int i, handled;
do {
for (i = 0, handled = 0; i < vic_id; ++i)
handled |= handle_one_vic(&vic_devices[i], regs);
} while (handled);
}

View File

@ -186,6 +186,17 @@
#define ALT_UP_B(label) b label
#endif
/*
* Instruction barrier
*/
.macro instr_sync
#if __LINUX_ARM_ARCH__ >= 7
isb
#elif __LINUX_ARM_ARCH__ == 6
mcr p15, 0, r0, c7, c5, 4
#endif
.endm
/*
* SMP data memory barrier
*/

179
arch/arm/include/asm/cti.h Normal file
View File

@ -0,0 +1,179 @@
#ifndef __ASMARM_CTI_H
#define __ASMARM_CTI_H
#include <asm/io.h>
/* The registers' definition is from section 3.2 of
* Embedded Cross Trigger Revision: r0p0
*/
#define CTICONTROL 0x000
#define CTISTATUS 0x004
#define CTILOCK 0x008
#define CTIPROTECTION 0x00C
#define CTIINTACK 0x010
#define CTIAPPSET 0x014
#define CTIAPPCLEAR 0x018
#define CTIAPPPULSE 0x01c
#define CTIINEN 0x020
#define CTIOUTEN 0x0A0
#define CTITRIGINSTATUS 0x130
#define CTITRIGOUTSTATUS 0x134
#define CTICHINSTATUS 0x138
#define CTICHOUTSTATUS 0x13c
#define CTIPERIPHID0 0xFE0
#define CTIPERIPHID1 0xFE4
#define CTIPERIPHID2 0xFE8
#define CTIPERIPHID3 0xFEC
#define CTIPCELLID0 0xFF0
#define CTIPCELLID1 0xFF4
#define CTIPCELLID2 0xFF8
#define CTIPCELLID3 0xFFC
/* The below are from section 3.6.4 of
* CoreSight v1.0 Architecture Specification
*/
#define LOCKACCESS 0xFB0
#define LOCKSTATUS 0xFB4
/* write this value to LOCKACCESS will unlock the module, and
* other value will lock the module
*/
#define LOCKCODE 0xC5ACCE55
/**
* struct cti - cross trigger interface struct
* @base: mapped virtual address for the cti base
* @irq: irq number for the cti
* @trig_out_for_irq: triger out number which will cause
* the @irq happen
*
* cti struct used to operate cti registers.
*/
struct cti {
void __iomem *base;
int irq;
int trig_out_for_irq;
};
/**
* cti_init - initialize the cti instance
* @cti: cti instance
* @base: mapped virtual address for the cti base
* @irq: irq number for the cti
* @trig_out: triger out number which will cause
* the @irq happen
*
* called by machine code to pass the board dependent
* @base, @irq and @trig_out to cti.
*/
static inline void cti_init(struct cti *cti,
void __iomem *base, int irq, int trig_out)
{
cti->base = base;
cti->irq = irq;
cti->trig_out_for_irq = trig_out;
}
/**
* cti_map_trigger - use the @chan to map @trig_in to @trig_out
* @cti: cti instance
* @trig_in: trigger in number
* @trig_out: trigger out number
* @channel: channel number
*
* This function maps one trigger in of @trig_in to one trigger
* out of @trig_out using the channel @chan.
*/
static inline void cti_map_trigger(struct cti *cti,
int trig_in, int trig_out, int chan)
{
void __iomem *base = cti->base;
unsigned long val;
val = __raw_readl(base + CTIINEN + trig_in * 4);
val |= BIT(chan);
__raw_writel(val, base + CTIINEN + trig_in * 4);
val = __raw_readl(base + CTIOUTEN + trig_out * 4);
val |= BIT(chan);
__raw_writel(val, base + CTIOUTEN + trig_out * 4);
}
/**
* cti_enable - enable the cti module
* @cti: cti instance
*
* enable the cti module
*/
static inline void cti_enable(struct cti *cti)
{
__raw_writel(0x1, cti->base + CTICONTROL);
}
/**
* cti_disable - disable the cti module
* @cti: cti instance
*
* enable the cti module
*/
static inline void cti_disable(struct cti *cti)
{
__raw_writel(0, cti->base + CTICONTROL);
}
/**
* cti_irq_ack - clear the cti irq
* @cti: cti instance
*
* clear the cti irq
*/
static inline void cti_irq_ack(struct cti *cti)
{
void __iomem *base = cti->base;
unsigned long val;
val = __raw_readl(base + CTIINTACK);
val |= BIT(cti->trig_out_for_irq);
__raw_writel(val, base + CTIINTACK);
}
/**
* cti_unlock - unlock cti module
* @cti: cti instance
*
* unlock the cti module, or else any writes to the cti
* module is not allowed.
*/
static inline void cti_unlock(struct cti *cti)
{
void __iomem *base = cti->base;
unsigned long val;
val = __raw_readl(base + LOCKSTATUS);
if (val & 1) {
val = LOCKCODE;
__raw_writel(val, base + LOCKACCESS);
}
}
/**
* cti_lock - lock cti module
* @cti: cti instance
*
* lock the cti module, so any writes to the cti
* module will be not allowed.
*/
static inline void cti_lock(struct cti *cti)
{
void __iomem *base = cti->base;
unsigned long val;
val = __raw_readl(base + LOCKSTATUS);
if (!(val & 1)) {
val = ~LOCKCODE;
__raw_writel(val, base + LOCKACCESS);
}
}
#endif

View File

@ -1,57 +0,0 @@
/* arch/arm/include/asm/entry-macro-vic2.S
*
* Originally arch/arm/mach-s3c6400/include/mach/entry-macro.S
*
* Copyright 2008 Openmoko, Inc.
* Copyright 2008 Simtec Electronics
* http://armlinux.simtec.co.uk/
* Ben Dooks <ben@simtec.co.uk>
*
* Low-level IRQ helper macros for a device with two VICs
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
/* This should be included from <mach/entry-macro.S> with the necessary
* defines for virtual addresses and IRQ bases for the two vics.
*
* The code needs the following defined:
* IRQ_VIC0_BASE IRQ number of VIC0's first IRQ
* IRQ_VIC1_BASE IRQ number of VIC1's first IRQ
* VA_VIC0 Virtual address of VIC0
* VA_VIC1 Virtual address of VIC1
*
* Note, code assumes VIC0's virtual address is an ARM immediate constant
* away from VIC1.
*/
#include <asm/hardware/vic.h>
.macro disable_fiq
.endm
.macro get_irqnr_preamble, base, tmp
ldr \base, =VA_VIC0
.endm
.macro arch_ret_to_user, tmp1, tmp2
.endm
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
@ check the vic0
mov \irqnr, #IRQ_VIC0_BASE + 31
ldr \irqstat, [ \base, # VIC_IRQ_STATUS ]
teq \irqstat, #0
@ otherwise try vic1
addeq \tmp, \base, #(VA_VIC1 - VA_VIC0)
addeq \irqnr, \irqnr, #(IRQ_VIC1_BASE - IRQ_VIC0_BASE)
ldreq \irqstat, [ \tmp, # VIC_IRQ_STATUS ]
teqeq \irqstat, #0
clzne \irqstat, \irqstat
subne \irqnr, \irqnr, \irqstat
.endm

View File

@ -1,60 +0,0 @@
/*
* arch/arm/include/asm/hardware/entry-macro-gic.S
*
* Low-level IRQ helper macros for GIC
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <asm/hardware/gic.h>
#ifndef HAVE_GET_IRQNR_PREAMBLE
.macro get_irqnr_preamble, base, tmp
ldr \base, =gic_cpu_base_addr
ldr \base, [\base]
.endm
#endif
/*
* The interrupt numbering scheme is defined in the
* interrupt controller spec. To wit:
*
* Interrupts 0-15 are IPI
* 16-31 are local. We allow 30 to be used for the watchdog.
* 32-1020 are global
* 1021-1022 are reserved
* 1023 is "spurious" (no interrupt)
*
* A simple read from the controller will tell us the number of the highest
* priority enabled interrupt. We then just need to check whether it is in the
* valid range for an IRQ (30-1020 inclusive).
*/
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
ldr \irqstat, [\base, #GIC_CPU_INTACK]
/* bits 12-10 = src CPU, 9-0 = int # */
ldr \tmp, =1021
bic \irqnr, \irqstat, #0x1c00
cmp \irqnr, #15
cmpcc \irqnr, \irqnr
cmpne \irqnr, \tmp
cmpcs \irqnr, \irqnr
.endm
/* We assume that irqstat (the raw value of the IRQ acknowledge
* register) is preserved from the macro above.
* If there is an IPI, we immediately signal end of interrupt on the
* controller, since this requires the original irqstat value which
* we won't easily be able to recreate later.
*/
.macro test_for_ipi, irqnr, irqstat, base, tmp
bic \irqnr, \irqstat, #0x1c00
cmp \irqnr, #16
strcc \irqstat, [\base, #GIC_CPU_EOI]
cmpcs \irqnr, \irqnr
.endm

View File

@ -36,30 +36,22 @@
#include <linux/irqdomain.h>
struct device_node;
extern void __iomem *gic_cpu_base_addr;
extern struct irq_chip gic_arch_extn;
void gic_init(unsigned int, int, void __iomem *, void __iomem *);
void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
u32 offset);
int gic_of_init(struct device_node *node, struct device_node *parent);
void gic_secondary_init(unsigned int);
void gic_handle_irq(struct pt_regs *regs);
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
struct gic_chip_data {
void __iomem *dist_base;
void __iomem *cpu_base;
#ifdef CONFIG_CPU_PM
u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)];
u32 saved_spi_target[DIV_ROUND_UP(1020, 4)];
u32 __percpu *saved_ppi_enable;
u32 __percpu *saved_ppi_conf;
#endif
#ifdef CONFIG_IRQ_DOMAIN
struct irq_domain domain;
#endif
unsigned int gic_irqs;
};
static inline void gic_init(unsigned int nr, int start,
void __iomem *dist , void __iomem *cpu)
{
gic_init_bases(nr, start, dist, cpu, 0);
}
#endif
#endif

View File

@ -41,7 +41,15 @@
#define VIC_PL192_VECT_ADDR 0xF00
#ifndef __ASSEMBLY__
void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources);
#endif
#include <linux/compiler.h>
#include <linux/types.h>
struct device_node;
struct pt_regs;
void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources);
int vic_of_init(struct device_node *node, struct device_node *parent);
void vic_handle_irq(struct pt_regs *regs);
#endif /* __ASSEMBLY__ */
#endif

View File

@ -0,0 +1,14 @@
#ifndef __ASM_IDMAP_H
#define __ASM_IDMAP_H
#include <linux/compiler.h>
#include <asm/pgtable.h>
/* Tag a function as requiring to be executed via an identity mapping. */
#define __idmap __section(.idmap.text) noinline notrace
extern pgd_t *idmap_pgd;
void setup_mm_for_reboot(void);
#endif /* __ASM_IDMAP_H */

View File

@ -31,10 +31,10 @@ struct machine_desc {
unsigned int video_start; /* start of video RAM */
unsigned int video_end; /* end of video RAM */
unsigned int reserve_lp0 :1; /* never has lp0 */
unsigned int reserve_lp1 :1; /* never has lp1 */
unsigned int reserve_lp2 :1; /* never has lp2 */
unsigned int soft_reboot :1; /* soft reboot */
unsigned char reserve_lp0 :1; /* never has lp0 */
unsigned char reserve_lp1 :1; /* never has lp1 */
unsigned char reserve_lp2 :1; /* never has lp2 */
char restart_mode; /* default restart mode */
void (*fixup)(struct tag *, char **,
struct meminfo *);
void (*reserve)(void);/* reserve mem blocks */
@ -46,6 +46,7 @@ struct machine_desc {
#ifdef CONFIG_MULTI_IRQ_HANDLER
void (*handle_irq)(struct pt_regs *);
#endif
void (*restart)(char, const char *);
};
/*

View File

@ -151,7 +151,11 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, const void *from);
#ifdef CONFIG_ARM_LPAE
#include <asm/pgtable-3level-types.h>
#else
#include <asm/pgtable-2level-types.h>
#endif
#endif /* CONFIG_MMU */

View File

@ -32,7 +32,4 @@ enum arm_perf_pmu_ids {
extern enum arm_perf_pmu_ids
armpmu_get_pmu_id(void);
extern int
armpmu_get_max_events(void);
#endif /* __ARM_PERF_EVENT_H__ */

View File

@ -25,12 +25,34 @@
#define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER))
#define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL))
#ifdef CONFIG_ARM_LPAE
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT);
}
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
free_page((unsigned long)pmd);
}
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
{
set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
}
#else /* !CONFIG_ARM_LPAE */
/*
* Since we have only two-level page tables, these are trivial
*/
#define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(mm, pmd) do { } while (0)
#define pgd_populate(mm,pmd,pte) BUG()
#define pud_populate(mm,pmd,pte) BUG()
#endif /* CONFIG_ARM_LPAE */
extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
@ -109,7 +131,9 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
{
pmdval_t pmdval = (pte + PTE_HWTABLE_OFF) | prot;
pmdp[0] = __pmd(pmdval);
#ifndef CONFIG_ARM_LPAE
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
#endif
flush_pmd_entry(pmdp);
}

View File

@ -140,4 +140,45 @@
#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 0x0b) << 2) /* 1011 */
#define L_PTE_MT_MASK (_AT(pteval_t, 0x0f) << 2)
#ifndef __ASSEMBLY__
/*
* The "pud_xxx()" functions here are trivial when the pmd is folded into
* the pud: the pud entry is never bad, always exists, and can't be set or
* cleared.
*/
#define pud_none(pud) (0)
#define pud_bad(pud) (0)
#define pud_present(pud) (1)
#define pud_clear(pudp) do { } while (0)
#define set_pud(pud,pudp) do { } while (0)
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
{
return (pmd_t *)pud;
}
#define pmd_bad(pmd) (pmd_val(pmd) & 2)
#define copy_pmd(pmdpd,pmdps) \
do { \
pmdpd[0] = pmdps[0]; \
pmdpd[1] = pmdps[1]; \
flush_pmd_entry(pmdpd); \
} while (0)
#define pmd_clear(pmdp) \
do { \
pmdp[0] = __pmd(0); \
pmdp[1] = __pmd(0); \
clean_pmd_entry(pmdp); \
} while (0)
/* we don't need complex calculations here as the pmd is folded into the pgd */
#define pmd_addr_end(addr,end) (end)
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
#endif /* __ASSEMBLY__ */
#endif /* _ASM_PGTABLE_2LEVEL_H */

View File

@ -0,0 +1,77 @@
/*
* arch/arm/include/asm/pgtable-3level-hwdef.h
*
* Copyright (C) 2011 ARM Ltd.
* Author: Catalin Marinas <catalin.marinas@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_PGTABLE_3LEVEL_HWDEF_H
#define _ASM_PGTABLE_3LEVEL_HWDEF_H
/*
* Hardware page table definitions.
*
* + Level 1/2 descriptor
* - common
*/
#define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0)
#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
#define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0)
#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
#define PMD_BIT4 (_AT(pmdval_t, 0))
#define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
/*
* - section
*/
#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
#define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
#define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
#define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
#define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
#define PMD_SECT_TEX(x) (_AT(pmdval_t, 0))
/*
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
*/
#define PMD_SECT_UNCACHED (_AT(pmdval_t, 0) << 2) /* strongly ordered */
#define PMD_SECT_BUFFERED (_AT(pmdval_t, 1) << 2) /* normal non-cacheable */
#define PMD_SECT_WT (_AT(pmdval_t, 2) << 2) /* normal inner write-through */
#define PMD_SECT_WB (_AT(pmdval_t, 3) << 2) /* normal inner write-back */
#define PMD_SECT_WBWA (_AT(pmdval_t, 7) << 2) /* normal inner write-alloc */
/*
* + Level 3 descriptor (PTE)
*/
#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0)
#define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0)
#define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0)
#define PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */
#define PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */
#define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
#define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
#define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
#define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
/*
* 40-bit physical address supported.
*/
#define PHYS_MASK_SHIFT (40)
#define PHYS_MASK ((1ULL << PHYS_MASK_SHIFT) - 1)
#endif

View File

@ -0,0 +1,70 @@
/*
* arch/arm/include/asm/pgtable-3level-types.h
*
* Copyright (C) 2011 ARM Ltd.
* Author: Catalin Marinas <catalin.marinas@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_PGTABLE_3LEVEL_TYPES_H
#define _ASM_PGTABLE_3LEVEL_TYPES_H
#include <asm/types.h>
typedef u64 pteval_t;
typedef u64 pmdval_t;
typedef u64 pgdval_t;
#undef STRICT_MM_TYPECHECKS
#ifdef STRICT_MM_TYPECHECKS
/*
* These are used to make use of C type-checking..
*/
typedef struct { pteval_t pte; } pte_t;
typedef struct { pmdval_t pmd; } pmd_t;
typedef struct { pgdval_t pgd; } pgd_t;
typedef struct { pteval_t pgprot; } pgprot_t;
#define pte_val(x) ((x).pte)
#define pmd_val(x) ((x).pmd)
#define pgd_val(x) ((x).pgd)
#define pgprot_val(x) ((x).pgprot)
#define __pte(x) ((pte_t) { (x) } )
#define __pmd(x) ((pmd_t) { (x) } )
#define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } )
#else /* !STRICT_MM_TYPECHECKS */
typedef pteval_t pte_t;
typedef pmdval_t pmd_t;
typedef pgdval_t pgd_t;
typedef pteval_t pgprot_t;
#define pte_val(x) (x)
#define pmd_val(x) (x)
#define pgd_val(x) (x)
#define pgprot_val(x) (x)
#define __pte(x) (x)
#define __pmd(x) (x)
#define __pgd(x) (x)
#define __pgprot(x) (x)
#endif /* STRICT_MM_TYPECHECKS */
#endif /* _ASM_PGTABLE_3LEVEL_TYPES_H */

View File

@ -0,0 +1,155 @@
/*
* arch/arm/include/asm/pgtable-3level.h
*
* Copyright (C) 2011 ARM Ltd.
* Author: Catalin Marinas <catalin.marinas@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _ASM_PGTABLE_3LEVEL_H
#define _ASM_PGTABLE_3LEVEL_H
/*
* With LPAE, there are 3 levels of page tables. Each level has 512 entries of
* 8 bytes each, occupying a 4K page. The first level table covers a range of
* 512GB, each entry representing 1GB. Since we are limited to 4GB input
* address range, only 4 entries in the PGD are used.
*
* There are enough spare bits in a page table entry for the kernel specific
* state.
*/
#define PTRS_PER_PTE 512
#define PTRS_PER_PMD 512
#define PTRS_PER_PGD 4
#define PTE_HWTABLE_PTRS (PTRS_PER_PTE)
#define PTE_HWTABLE_OFF (0)
#define PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(u64))
/*
* PGDIR_SHIFT determines the size a top-level page table entry can map.
*/
#define PGDIR_SHIFT 30
/*
* PMD_SHIFT determines the size a middle-level page table entry can map.
*/
#define PMD_SHIFT 21
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
/*
* section address mask and size definitions.
*/
#define SECTION_SHIFT 21
#define SECTION_SIZE (1UL << SECTION_SHIFT)
#define SECTION_MASK (~(SECTION_SIZE-1))
#define USER_PTRS_PER_PGD (PAGE_OFFSET / PGDIR_SIZE)
/*
* "Linux" PTE definitions for LPAE.
*
* These bits overlap with the hardware bits but the naming is preserved for
* consistency with the classic page table format.
*/
#define L_PTE_PRESENT (_AT(pteval_t, 3) << 0) /* Valid */
#define L_PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !PRESENT */
#define L_PTE_BUFFERABLE (_AT(pteval_t, 1) << 2) /* AttrIndx[0] */
#define L_PTE_CACHEABLE (_AT(pteval_t, 1) << 3) /* AttrIndx[1] */
#define L_PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
#define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
#define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
#define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
#define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
#define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
#define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
/*
* To be used in assembly code with the upper page attributes.
*/
#define L_PTE_XN_HIGH (1 << (54 - 32))
#define L_PTE_DIRTY_HIGH (1 << (55 - 32))
/*
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
*/
#define L_PTE_MT_UNCACHED (_AT(pteval_t, 0) << 2) /* strongly ordered */
#define L_PTE_MT_BUFFERABLE (_AT(pteval_t, 1) << 2) /* normal non-cacheable */
#define L_PTE_MT_WRITETHROUGH (_AT(pteval_t, 2) << 2) /* normal inner write-through */
#define L_PTE_MT_WRITEBACK (_AT(pteval_t, 3) << 2) /* normal inner write-back */
#define L_PTE_MT_WRITEALLOC (_AT(pteval_t, 7) << 2) /* normal inner write-alloc */
#define L_PTE_MT_DEV_SHARED (_AT(pteval_t, 4) << 2) /* device */
#define L_PTE_MT_DEV_NONSHARED (_AT(pteval_t, 4) << 2) /* device */
#define L_PTE_MT_DEV_WC (_AT(pteval_t, 1) << 2) /* normal non-cacheable */
#define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 3) << 2) /* normal inner write-back */
#define L_PTE_MT_MASK (_AT(pteval_t, 7) << 2)
/*
* Software PGD flags.
*/
#define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */
#ifndef __ASSEMBLY__
#define pud_none(pud) (!pud_val(pud))
#define pud_bad(pud) (!(pud_val(pud) & 2))
#define pud_present(pud) (pud_val(pud))
#define pud_clear(pudp) \
do { \
*pudp = __pud(0); \
clean_pmd_entry(pudp); \
} while (0)
#define set_pud(pudp, pud) \
do { \
*pudp = pud; \
flush_pmd_entry(pudp); \
} while (0)
static inline pmd_t *pud_page_vaddr(pud_t pud)
{
return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK);
}
/* Find an entry in the second-level page table.. */
#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
{
return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
}
#define pmd_bad(pmd) (!(pmd_val(pmd) & 2))
#define copy_pmd(pmdpd,pmdps) \
do { \
*pmdpd = *pmdps; \
flush_pmd_entry(pmdpd); \
} while (0)
#define pmd_clear(pmdp) \
do { \
*pmdp = __pmd(0); \
clean_pmd_entry(pmdp); \
} while (0)
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext)))
#endif /* __ASSEMBLY__ */
#endif /* _ASM_PGTABLE_3LEVEL_H */

View File

@ -10,6 +10,10 @@
#ifndef _ASMARM_PGTABLE_HWDEF_H
#define _ASMARM_PGTABLE_HWDEF_H
#ifdef CONFIG_ARM_LPAE
#include <asm/pgtable-3level-hwdef.h>
#else
#include <asm/pgtable-2level-hwdef.h>
#endif
#endif

View File

@ -11,20 +11,24 @@
#define _ASMARM_PGTABLE_H
#include <linux/const.h>
#include <asm-generic/4level-fixup.h>
#include <asm/proc-fns.h>
#ifndef CONFIG_MMU
#include <asm-generic/4level-fixup.h>
#include "pgtable-nommu.h"
#else
#include <asm-generic/pgtable-nopud.h>
#include <asm/memory.h>
#include <mach/vmalloc.h>
#include <asm/pgtable-hwdef.h>
#ifdef CONFIG_ARM_LPAE
#include <asm/pgtable-3level.h>
#else
#include <asm/pgtable-2level.h>
#endif
/*
* Just any arbitrary offset to the start of the vmalloc VM area: the
@ -33,14 +37,16 @@
* any out-of-bounds memory accesses will hopefully be caught.
* The vmalloc() routines leaves a hole of 4kB between each vmalloced
* area for the same reason. ;)
*
* Note that platforms may override VMALLOC_START, but they must provide
* VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space,
* which may not overlap IO space.
*/
#ifndef VMALLOC_START
#define VMALLOC_OFFSET (8*1024*1024)
#define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
#define VMALLOC_END 0xff000000UL
/* This is a temporary hack until shmobile's DMA area size is sorted out */
#ifdef CONFIG_ARCH_SHMOBILE
#warning "SH-Mobile's consistent DMA size conflicts with VMALLOC_END by 144MB"
#undef VMALLOC_END
#define VMALLOC_END 0xF6000000UL
#endif
#define LIBRARY_TEXT_START 0x0c000000
@ -163,39 +169,8 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(addr) pgd_offset(&init_mm, addr)
/*
* The "pgd_xxx()" functions here are trivial for a folded two-level
* setup: the pgd is never bad, and a pmd always exists (as it's folded
* into the pgd entry)
*/
#define pgd_none(pgd) (0)
#define pgd_bad(pgd) (0)
#define pgd_present(pgd) (1)
#define pgd_clear(pgdp) do { } while (0)
#define set_pgd(pgd,pgdp) do { } while (0)
#define set_pud(pud,pudp) do { } while (0)
/* Find an entry in the second-level page table.. */
#define pmd_offset(dir, addr) ((pmd_t *)(dir))
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_present(pmd) (pmd_val(pmd))
#define pmd_bad(pmd) (pmd_val(pmd) & 2)
#define copy_pmd(pmdpd,pmdps) \
do { \
pmdpd[0] = pmdps[0]; \
pmdpd[1] = pmdps[1]; \
flush_pmd_entry(pmdpd); \
} while (0)
#define pmd_clear(pmdp) \
do { \
pmdp[0] = __pmd(0); \
pmdp[1] = __pmd(0); \
clean_pmd_entry(pmdp); \
} while (0)
static inline pte_t *pmd_page_vaddr(pmd_t pmd)
{
@ -204,10 +179,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
/* we don't need complex calculations here as the pmd is folded into the pgd */
#define pmd_addr_end(addr,end) (end)
#ifndef CONFIG_HIGHPTE
#define __pte_map(pmd) pmd_page_vaddr(*(pmd))
#define __pte_unmap(pte) do { } while (0)
@ -229,7 +200,6 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
#define mk_pte(page,prot) pfn_pte(page_to_pfn(page), prot)
#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
#define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0)
#if __LINUX_ARM_ARCH__ < 6
@ -346,9 +316,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define pgtable_cache_init() do { } while (0)
void identity_mapping_add(pgd_t *, unsigned long, unsigned long);
void identity_mapping_del(pgd_t *, unsigned long, unsigned long);
#endif /* !__ASSEMBLY__ */
#endif /* CONFIG_MMU */

View File

@ -27,13 +27,22 @@ enum arm_pmu_type {
/*
* struct arm_pmu_platdata - ARM PMU platform data
*
* @handle_irq: an optional handler which will be called from the interrupt and
* passed the address of the low level handler, and can be used to implement
* any platform specific handling before or after calling it.
* @handle_irq: an optional handler which will be called from the
* interrupt and passed the address of the low level handler,
* and can be used to implement any platform specific handling
* before or after calling it.
* @enable_irq: an optional handler which will be called after
* request_irq and be used to handle some platform specific
* irq enablement
* @disable_irq: an optional handler which will be called before
* free_irq and be used to handle some platform specific
* irq disablement
*/
struct arm_pmu_platdata {
irqreturn_t (*handle_irq)(int irq, void *dev,
irq_handler_t pmu_handler);
void (*enable_irq)(int irq);
void (*disable_irq)(int irq);
};
#ifdef CONFIG_CPU_HAS_PMU

View File

@ -65,7 +65,11 @@ extern struct processor {
* Set a possibly extended PTE. Non-extended PTEs should
* ignore 'ext'.
*/
#ifdef CONFIG_ARM_LPAE
void (*set_pte_ext)(pte_t *ptep, pte_t pte);
#else
void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext);
#endif
/* Suspend/resume */
unsigned int suspend_size;
@ -79,7 +83,11 @@ extern void cpu_proc_fin(void);
extern int cpu_do_idle(void);
extern void cpu_dcache_clean_area(void *, int);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
#ifdef CONFIG_ARM_LPAE
extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte);
#else
extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
#endif
extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
/* These three are private to arch/arm/kernel/suspend.c */
@ -107,6 +115,18 @@ extern void cpu_resume(void);
#define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm)
#ifdef CONFIG_ARM_LPAE
#define cpu_get_pgd() \
({ \
unsigned long pg, pg2; \
__asm__("mrrc p15, 0, %0, %1, c2" \
: "=r" (pg), "=r" (pg2) \
: \
: "cc"); \
pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \
(pgd_t *)phys_to_virt(pg); \
})
#else
#define cpu_get_pgd() \
({ \
unsigned long pg; \
@ -115,6 +135,7 @@ extern void cpu_resume(void);
pg &= ~0x3fff; \
(pgd_t *)phys_to_virt(pg); \
})
#endif
#endif

View File

@ -80,6 +80,14 @@ struct siginfo;
void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
unsigned long err, unsigned long trap);
#ifdef CONFIG_ARM_LPAE
#define FAULT_CODE_ALIGNMENT 33
#define FAULT_CODE_DEBUG 34
#else
#define FAULT_CODE_ALIGNMENT 1
#define FAULT_CODE_DEBUG 2
#endif
void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
struct pt_regs *),
int sig, int code, const char *name);
@ -101,6 +109,7 @@ extern int __pure cpu_architecture(void);
extern void cpu_init(void);
void arm_machine_restart(char mode, const char *cmd);
void soft_restart(unsigned long);
extern void (*arm_pm_restart)(char str, const char *cmd);
#define UDBG_UNDEFINED (1 << 0)

View File

@ -202,8 +202,18 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
tlb_remove_page(tlb, pte);
}
static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
unsigned long addr)
{
#ifdef CONFIG_ARM_LPAE
tlb_add_flush(tlb, addr);
tlb_remove_page(tlb, virt_to_page(pmdp));
#endif
}
#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
#define pmd_free_tlb(tlb, pmdp, addr) __pmd_free_tlb(tlb, pmdp, addr)
#define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
#define tlb_migrate_finish(mm) do { } while (0)

View File

@ -36,12 +36,11 @@
#ifdef CONFIG_MULTI_IRQ_HANDLER
ldr r1, =handle_arch_irq
mov r0, sp
ldr r1, [r1]
adr lr, BSYM(9997f)
teq r1, #0
movne pc, r1
#endif
ldr pc, [r1]
#else
arch_irq_handler_default
#endif
9997:
.endm

View File

@ -39,8 +39,14 @@
#error KERNEL_RAM_VADDR must start at 0xXXXX8000
#endif
#ifdef CONFIG_ARM_LPAE
/* LPAE requires an additional page for the PGD */
#define PG_DIR_SIZE 0x5000
#define PMD_ORDER 3
#else
#define PG_DIR_SIZE 0x4000
#define PMD_ORDER 2
#endif
.globl swapper_pg_dir
.equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
@ -164,17 +170,36 @@ __create_page_tables:
teq r0, r6
bne 1b
#ifdef CONFIG_ARM_LPAE
/*
* Build the PGD table (first level) to point to the PMD table. A PGD
* entry is 64-bit wide.
*/
mov r0, r4
add r3, r4, #0x1000 @ first PMD table address
orr r3, r3, #3 @ PGD block type
mov r6, #4 @ PTRS_PER_PGD
mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER
1: str r3, [r0], #4 @ set bottom PGD entry bits
str r7, [r0], #4 @ set top PGD entry bits
add r3, r3, #0x1000 @ next PMD table
subs r6, r6, #1
bne 1b
add r4, r4, #0x1000 @ point to the PMD tables
#endif
ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags
/*
* Create identity mapping to cater for __enable_mmu.
* This identity mapping will be removed by paging_init().
*/
adr r0, __enable_mmu_loc
adr r0, __turn_mmu_on_loc
ldmia r0, {r3, r5, r6}
sub r0, r0, r3 @ virt->phys offset
add r5, r5, r0 @ phys __enable_mmu
add r6, r6, r0 @ phys __enable_mmu_end
add r5, r5, r0 @ phys __turn_mmu_on
add r6, r6, r0 @ phys __turn_mmu_on_end
mov r5, r5, lsr #SECTION_SHIFT
mov r6, r6, lsr #SECTION_SHIFT
@ -219,8 +244,8 @@ __create_page_tables:
#endif
/*
* Then map boot params address in r2 or
* the first 1MB of ram if boot params address is not specified.
* Then map boot params address in r2 or the first 1MB (2MB with LPAE)
* of ram if boot params address is not specified.
*/
mov r0, r2, lsr #SECTION_SHIFT
movs r0, r0, lsl #SECTION_SHIFT
@ -251,7 +276,15 @@ __create_page_tables:
mov r3, r7, lsr #SECTION_SHIFT
ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags
orr r3, r7, r3, lsl #SECTION_SHIFT
#ifdef CONFIG_ARM_LPAE
mov r7, #1 << (54 - 32) @ XN
#else
orr r3, r3, #PMD_SECT_XN
#endif
1: str r3, [r0], #4
#ifdef CONFIG_ARM_LPAE
str r7, [r0], #4
#endif
add r3, r3, #1 << SECTION_SHIFT
cmp r0, r6
blo 1b
@ -282,15 +315,18 @@ __create_page_tables:
add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER)
str r3, [r0]
#endif
#endif
#ifdef CONFIG_ARM_LPAE
sub r4, r4, #0x1000 @ point to the PGD table
#endif
mov pc, lr
ENDPROC(__create_page_tables)
.ltorg
.align
__enable_mmu_loc:
__turn_mmu_on_loc:
.long .
.long __enable_mmu
.long __enable_mmu_end
.long __turn_mmu_on
.long __turn_mmu_on_end
#if defined(CONFIG_SMP)
__CPUINIT
@ -374,12 +410,17 @@ __enable_mmu:
#ifdef CONFIG_CPU_ICACHE_DISABLE
bic r0, r0, #CR_I
#endif
#ifdef CONFIG_ARM_LPAE
mov r5, #0
mcrr p15, 0, r4, r5, c2 @ load TTBR0
#else
mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
domain_val(DOMAIN_IO, DOMAIN_CLIENT))
mcr p15, 0, r5, c3, c0, 0 @ load domain access register
mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
#endif
b __turn_mmu_on
ENDPROC(__enable_mmu)
@ -398,15 +439,19 @@ ENDPROC(__enable_mmu)
* other registers depend on the function called upon completion
*/
.align 5
__turn_mmu_on:
.pushsection .idmap.text, "ax"
ENTRY(__turn_mmu_on)
mov r0, r0
instr_sync
mcr p15, 0, r0, c1, c0, 0 @ write control reg
mrc p15, 0, r3, c0, c0, 0 @ read id reg
instr_sync
mov r3, r3
mov r3, r13
mov pc, r3
__enable_mmu_end:
__turn_mmu_on_end:
ENDPROC(__turn_mmu_on)
.popsection
#ifdef CONFIG_SMP_ON_UP

View File

@ -1016,10 +1016,10 @@ static int __init arch_hw_breakpoint_init(void)
}
/* Register debug fault handler. */
hook_fault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT,
"watchpoint debug exception");
hook_ifault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT,
"breakpoint debug exception");
hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
TRAP_HWBKPT, "watchpoint debug exception");
hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
TRAP_HWBKPT, "breakpoint debug exception");
/* Register hotplug notifier. */
register_cpu_notifier(&dbg_reset_nb);

View File

@ -12,12 +12,11 @@
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/mach-types.h>
#include <asm/system.h>
extern const unsigned char relocate_new_kernel[];
extern const unsigned int relocate_new_kernel_size;
extern void setup_mm_for_reboot(char mode);
extern unsigned long kexec_start_address;
extern unsigned long kexec_indirection_page;
extern unsigned long kexec_mach_type;
@ -111,14 +110,6 @@ void machine_kexec(struct kimage *image)
if (kexec_reinit)
kexec_reinit();
local_irq_disable();
local_fiq_disable();
setup_mm_for_reboot(0); /* mode is not used, so just pass 0*/
flush_cache_all();
outer_flush_all();
outer_disable();
cpu_proc_fin();
outer_inv_all();
flush_cache_all();
cpu_reset(reboot_code_buffer_phys);
soft_restart(reboot_code_buffer_phys);
}

View File

@ -59,8 +59,7 @@ armpmu_get_pmu_id(void)
}
EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
int
armpmu_get_max_events(void)
int perf_num_counters(void)
{
int max_events = 0;
@ -69,12 +68,6 @@ armpmu_get_max_events(void)
return max_events;
}
EXPORT_SYMBOL_GPL(armpmu_get_max_events);
int perf_num_counters(void)
{
return armpmu_get_max_events();
}
EXPORT_SYMBOL_GPL(perf_num_counters);
#define HW_OP_UNSUPPORTED 0xFFFF
@ -380,6 +373,8 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
{
int i, irq, irqs;
struct platform_device *pmu_device = armpmu->plat_device;
struct arm_pmu_platdata *plat =
dev_get_platdata(&pmu_device->dev);
irqs = min(pmu_device->num_resources, num_possible_cpus());
@ -387,8 +382,11 @@ armpmu_release_hardware(struct arm_pmu *armpmu)
if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
continue;
irq = platform_get_irq(pmu_device, i);
if (irq >= 0)
if (irq >= 0) {
if (plat && plat->disable_irq)
plat->disable_irq(irq);
free_irq(irq, armpmu);
}
}
release_pmu(armpmu->type);
@ -448,7 +446,8 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
irq);
armpmu_release_hardware(armpmu);
return err;
}
} else if (plat && plat->enable_irq)
plat->enable_irq(irq);
cpumask_set_cpu(i, &armpmu->active_irqs);
}

View File

@ -65,13 +65,15 @@ enum armv6_counters {
* accesses/misses in hardware.
*/
static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT,
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT,
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6_PERFCTR_IBUF_STALL,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6_PERFCTR_LSU_FULL_STALL,
};
static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@ -218,13 +220,15 @@ enum armv6mpcore_perf_types {
* accesses/misses in hardware.
*/
static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT,
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6MPCORE_PERFCTR_IBUF_STALL,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6MPCORE_PERFCTR_LSU_FULL_STALL,
};
static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]

View File

@ -28,165 +28,87 @@ static struct arm_pmu armv7pmu;
* they are not available.
*/
enum armv7_perf_types {
ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
ARMV7_PERFCTR_IFETCH_MISS = 0x01,
ARMV7_PERFCTR_ITLB_MISS = 0x02,
ARMV7_PERFCTR_DCACHE_REFILL = 0x03, /* L1 */
ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, /* L1 */
ARMV7_PERFCTR_DTLB_REFILL = 0x05,
ARMV7_PERFCTR_DREAD = 0x06,
ARMV7_PERFCTR_DWRITE = 0x07,
ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
ARMV7_PERFCTR_EXC_TAKEN = 0x09,
ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
ARMV7_PERFCTR_CID_WRITE = 0x0B,
/* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
ARMV7_PERFCTR_L1_ICACHE_REFILL = 0x01,
ARMV7_PERFCTR_ITLB_REFILL = 0x02,
ARMV7_PERFCTR_L1_DCACHE_REFILL = 0x03,
ARMV7_PERFCTR_L1_DCACHE_ACCESS = 0x04,
ARMV7_PERFCTR_DTLB_REFILL = 0x05,
ARMV7_PERFCTR_MEM_READ = 0x06,
ARMV7_PERFCTR_MEM_WRITE = 0x07,
ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
ARMV7_PERFCTR_EXC_TAKEN = 0x09,
ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
ARMV7_PERFCTR_CID_WRITE = 0x0B,
/*
* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
* It counts:
* - all branch instructions,
* - all (taken) branch instructions,
* - instructions that explicitly write the PC,
* - exception generating instructions.
*/
ARMV7_PERFCTR_PC_WRITE = 0x0C,
ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F,
ARMV7_PERFCTR_PC_WRITE = 0x0C,
ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
/* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
ARMV7_PERFCTR_MEM_ACCESS = 0x13,
ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
ARMV7_PERFCTR_L2_DCACHE_ACCESS = 0x16,
ARMV7_PERFCTR_L2_DCACHE_REFILL = 0x17,
ARMV7_PERFCTR_L2_DCACHE_WB = 0x18,
ARMV7_PERFCTR_BUS_ACCESS = 0x19,
ARMV7_PERFCTR_MEMORY_ERROR = 0x1A,
ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
ARMV7_PERFCTR_MEM_ACCESS = 0x13,
ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
ARMV7_PERFCTR_L2_CACHE_ACCESS = 0x16,
ARMV7_PERFCTR_L2_CACHE_REFILL = 0x17,
ARMV7_PERFCTR_L2_CACHE_WB = 0x18,
ARMV7_PERFCTR_BUS_ACCESS = 0x19,
ARMV7_PERFCTR_MEM_ERROR = 0x1A,
ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
ARMV7_PERFCTR_CPU_CYCLES = 0xFF
ARMV7_PERFCTR_CPU_CYCLES = 0xFF
};
/* ARMv7 Cortex-A8 specific event types */
enum armv7_a8_perf_types {
ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40,
ARMV7_PERFCTR_L2_STORE_MERGED = 0x41,
ARMV7_PERFCTR_L2_STORE_BUFF = 0x42,
ARMV7_PERFCTR_L2_ACCESS = 0x43,
ARMV7_PERFCTR_L2_CACH_MISS = 0x44,
ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45,
ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46,
ARMV7_PERFCTR_MEMORY_REPLAY = 0x47,
ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48,
ARMV7_PERFCTR_L1_DATA_MISS = 0x49,
ARMV7_PERFCTR_L1_INST_MISS = 0x4A,
ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B,
ARMV7_PERFCTR_L1_NEON_DATA = 0x4C,
ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D,
ARMV7_PERFCTR_L2_NEON = 0x4E,
ARMV7_PERFCTR_L2_NEON_HIT = 0x4F,
ARMV7_PERFCTR_L1_INST = 0x50,
ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51,
ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52,
ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53,
ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54,
ARMV7_PERFCTR_OP_EXECUTED = 0x55,
ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56,
ARMV7_PERFCTR_CYCLES_INST = 0x57,
ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58,
ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59,
ARMV7_PERFCTR_NEON_CYCLES = 0x5A,
ARMV7_PERFCTR_PMU0_EVENTS = 0x70,
ARMV7_PERFCTR_PMU1_EVENTS = 0x71,
ARMV7_PERFCTR_PMU_EVENTS = 0x72,
ARMV7_A8_PERFCTR_L2_CACHE_ACCESS = 0x43,
ARMV7_A8_PERFCTR_L2_CACHE_REFILL = 0x44,
ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS = 0x50,
ARMV7_A8_PERFCTR_STALL_ISIDE = 0x56,
};
/* ARMv7 Cortex-A9 specific event types */
enum armv7_a9_perf_types {
ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40,
ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41,
ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42,
ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50,
ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51,
ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60,
ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61,
ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62,
ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63,
ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64,
ARMV7_PERFCTR_DATA_EVICTION = 0x65,
ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66,
ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67,
ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68,
ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E,
ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70,
ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71,
ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72,
ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73,
ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74,
ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80,
ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81,
ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82,
ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83,
ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84,
ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85,
ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86,
ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A,
ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B,
ARMV7_PERFCTR_ISB_INST = 0x90,
ARMV7_PERFCTR_DSB_INST = 0x91,
ARMV7_PERFCTR_DMB_INST = 0x92,
ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93,
ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0,
ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1,
ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2,
ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3,
ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4,
ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5
ARMV7_A9_PERFCTR_INSTR_CORE_RENAME = 0x68,
ARMV7_A9_PERFCTR_STALL_ICACHE = 0x60,
ARMV7_A9_PERFCTR_STALL_DISPATCH = 0x66,
};
/* ARMv7 Cortex-A5 specific event types */
enum armv7_a5_perf_types {
ARMV7_PERFCTR_IRQ_TAKEN = 0x86,
ARMV7_PERFCTR_FIQ_TAKEN = 0x87,
ARMV7_PERFCTR_EXT_MEM_RQST = 0xc0,
ARMV7_PERFCTR_NC_EXT_MEM_RQST = 0xc1,
ARMV7_PERFCTR_PREFETCH_LINEFILL = 0xc2,
ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
ARMV7_PERFCTR_ENTER_READ_ALLOC = 0xc4,
ARMV7_PERFCTR_READ_ALLOC = 0xc5,
ARMV7_PERFCTR_STALL_SB_FULL = 0xc9,
ARMV7_A5_PERFCTR_PREFETCH_LINEFILL = 0xc2,
ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
};
/* ARMv7 Cortex-A15 specific event types */
enum armv7_a15_perf_types {
ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS = 0x40,
ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS = 0x41,
ARMV7_PERFCTR_L1_DCACHE_READ_REFILL = 0x42,
ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL = 0x43,
ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40,
ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41,
ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ = 0x42,
ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE = 0x43,
ARMV7_PERFCTR_L1_DTLB_READ_REFILL = 0x4C,
ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL = 0x4D,
ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ = 0x4C,
ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE = 0x4D,
ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS = 0x50,
ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS = 0x51,
ARMV7_PERFCTR_L2_DCACHE_READ_REFILL = 0x52,
ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL = 0x53,
ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ = 0x50,
ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51,
ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ = 0x52,
ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE = 0x53,
ARMV7_PERFCTR_SPEC_PC_WRITE = 0x76,
ARMV7_A15_PERFCTR_PC_WRITE_SPEC = 0x76,
};
/*
@ -197,13 +119,15 @@ enum armv7_a15_perf_types {
* accesses/misses in hardware.
*/
static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
};
static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@ -217,12 +141,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
* combined.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@ -231,12 +155,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
[C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS,
[C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@ -245,12 +169,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
[C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS,
[C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@ -274,11 +198,11 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@ -287,14 +211,12 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@ -321,14 +243,15 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
* Cortex-A9 HW events mapping
*/
static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] =
ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_DCACHE_ACCESS,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_DCACHE_REFILL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH,
};
static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@ -342,12 +265,12 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
* combined.
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL,
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@ -357,11 +280,11 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@ -399,11 +322,11 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@ -412,14 +335,12 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
},
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@ -446,13 +367,15 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
* Cortex-A5 HW events mapping
*/
static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
};
static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@ -460,42 +383,34 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)]
= ARMV7_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_DCACHE_REFILL,
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)]
= ARMV7_PERFCTR_DCACHE_ACCESS,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_DCACHE_REFILL,
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)]
= ARMV7_PERFCTR_PREFETCH_LINEFILL,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
[C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
[C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
},
},
[C(L1I)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
/*
* The prefetch counters don't differentiate between the I
* side and the D side.
*/
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)]
= ARMV7_PERFCTR_PREFETCH_LINEFILL,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
[C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
[C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
},
},
[C(LL)] = {
@ -529,11 +444,11 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@ -543,13 +458,11 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@ -562,13 +475,15 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
* Cortex-A15 HW events mapping
*/
static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_SPEC_PC_WRITE,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
[PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
[PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
[PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
[PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
[PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
};
static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
@ -576,16 +491,12 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
[C(L1D)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)]
= ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_L1_DCACHE_READ_REFILL,
[C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
[C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)]
= ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL,
[C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
[C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@ -601,11 +512,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
*/
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@ -614,16 +525,12 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
},
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)]
= ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_L2_DCACHE_READ_REFILL,
[C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
[C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)]
= ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL,
[C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
[C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@ -633,13 +540,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(DTLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_L1_DTLB_READ_REFILL,
[C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL,
[C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@ -649,11 +554,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS,
[C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
@ -663,13 +568,11 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
[C(BPU)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
[C(RESULT_MISS)]
= ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
[C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
},
[C(OP_PREFETCH)] = {
[C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,

View File

@ -48,13 +48,15 @@ enum xscale_counters {
};
static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
[PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
[PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
[PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
[PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
[PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
[PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = XSCALE_PERFCTR_ICACHE_NO_DELIVER,
[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED,
};
static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]

View File

@ -57,7 +57,7 @@ static const char *isa_modes[] = {
"ARM" , "Thumb" , "Jazelle", "ThumbEE"
};
extern void setup_mm_for_reboot(char mode);
extern void setup_mm_for_reboot(void);
static volatile int hlt_counter;
@ -92,18 +92,24 @@ static int __init hlt_setup(char *__unused)
__setup("nohlt", nohlt_setup);
__setup("hlt", hlt_setup);
void arm_machine_restart(char mode, const char *cmd)
{
/* Disable interrupts first */
local_irq_disable();
local_fiq_disable();
extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
typedef void (*phys_reset_t)(unsigned long);
/*
* Tell the mm system that we are going to reboot -
* we may need it to insert some 1:1 mappings so that
* soft boot works.
*/
setup_mm_for_reboot(mode);
/*
* A temporary stack to use for CPU reset. This is static so that we
* don't clobber it with the identity mapping. When running with this
* stack, any references to the current task *will not work* so you
* should really do as little as possible before jumping to your reset
* code.
*/
static u64 soft_restart_stack[16];
static void __soft_restart(void *addr)
{
phys_reset_t phys_reset;
/* Take out a flat memory mapping. */
setup_mm_for_reboot();
/* Clean and invalidate caches */
flush_cache_all();
@ -114,18 +120,41 @@ void arm_machine_restart(char mode, const char *cmd)
/* Push out any further dirty data, and ensure cache is empty */
flush_cache_all();
/*
* Now call the architecture specific reboot code.
*/
arch_reset(mode, cmd);
/* Switch to the identity mapping. */
phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
phys_reset((unsigned long)addr);
/*
* Whoops - the architecture was unable to reboot.
* Tell the user!
*/
mdelay(1000);
printk("Reboot failed -- System halted\n");
while (1);
/* Should never get here. */
BUG();
}
void soft_restart(unsigned long addr)
{
u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
/* Disable interrupts first */
local_irq_disable();
local_fiq_disable();
/* Disable the L2 if we're the last man standing. */
if (num_online_cpus() == 1)
outer_disable();
/* Change to the new stack and continue with the reset. */
call_with_stack(__soft_restart, (void *)addr, (void *)stack);
/* Should never get here. */
BUG();
}
void arm_machine_restart(char mode, const char *cmd)
{
/* Disable interrupts first */
local_irq_disable();
local_fiq_disable();
/* Call the architecture specific reboot code. */
arch_reset(mode, cmd);
}
/*
@ -253,7 +282,15 @@ void machine_power_off(void)
void machine_restart(char *cmd)
{
machine_shutdown();
arm_pm_restart(reboot_mode, cmd);
/* Give a grace period for failure to restart of 1s */
mdelay(1000);
/* Whoops - the platform was unable to reboot. Tell the user! */
printk("Reboot failed -- System halted\n");
while (1);
}
void __show_regs(struct pt_regs *regs)

View File

@ -31,6 +31,7 @@
#include <linux/memblock.h>
#include <linux/bug.h>
#include <linux/compiler.h>
#include <linux/sort.h>
#include <asm/unified.h>
#include <asm/cpu.h>
@ -890,6 +891,12 @@ static struct machine_desc * __init setup_machine_tags(unsigned int nr)
return mdesc;
}
static int __init meminfo_cmp(const void *_a, const void *_b)
{
const struct membank *a = _a, *b = _b;
long cmp = bank_pfn_start(a) - bank_pfn_start(b);
return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
}
void __init setup_arch(char **cmdline_p)
{
@ -904,8 +911,8 @@ void __init setup_arch(char **cmdline_p)
machine_desc = mdesc;
machine_name = mdesc->name;
if (mdesc->soft_reboot)
reboot_setup("s");
if (mdesc->restart_mode)
reboot_setup(&mdesc->restart_mode);
init_mm.start_code = (unsigned long) _text;
init_mm.end_code = (unsigned long) _etext;
@ -918,12 +925,16 @@ void __init setup_arch(char **cmdline_p)
parse_early_param();
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
sanity_check_meminfo();
arm_memblock_init(&meminfo, mdesc);
paging_init(mdesc);
request_standard_resources(mdesc);
if (mdesc->restart)
arm_pm_restart = mdesc->restart;
unflatten_device_tree();
#ifdef CONFIG_SMP

View File

@ -54,14 +54,18 @@ ENDPROC(cpu_suspend_abort)
* r0 = control register value
*/
.align 5
.pushsection .idmap.text,"ax"
ENTRY(cpu_resume_mmu)
ldr r3, =cpu_resume_after_mmu
instr_sync
mcr p15, 0, r0, c1, c0, 0 @ turn on MMU, I-cache, etc
mrc p15, 0, r0, c0, c0, 0 @ read id reg
instr_sync
mov r0, r0
mov r0, r0
mov pc, r3 @ jump to virtual address
ENDPROC(cpu_resume_mmu)
.popsection
cpu_resume_after_mmu:
bl cpu_init @ restore the und/abt/irq banked regs
mov r0, #0 @ return zero on success

View File

@ -31,6 +31,7 @@
#include <asm/cpu.h>
#include <asm/cputype.h>
#include <asm/exception.h>
#include <asm/idmap.h>
#include <asm/topology.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
@ -61,7 +62,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
{
struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
struct task_struct *idle = ci->idle;
pgd_t *pgd;
int ret;
/*
@ -83,30 +83,12 @@ int __cpuinit __cpu_up(unsigned int cpu)
init_idle(idle, cpu);
}
/*
* Allocate initial page tables to allow the new CPU to
* enable the MMU safely. This essentially means a set
* of our "standard" page tables, with the addition of
* a 1:1 mapping for the physical address of the kernel.
*/
pgd = pgd_alloc(&init_mm);
if (!pgd)
return -ENOMEM;
if (PHYS_OFFSET != PAGE_OFFSET) {
#ifndef CONFIG_HOTPLUG_CPU
identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end));
#endif
identity_mapping_add(pgd, __pa(_stext), __pa(_etext));
identity_mapping_add(pgd, __pa(_sdata), __pa(_edata));
}
/*
* We need to tell the secondary core where to find
* its stack and the page tables.
*/
secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
secondary_data.pgdir = virt_to_phys(pgd);
secondary_data.pgdir = virt_to_phys(idmap_pgd);
secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir);
__cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data));
outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1));
@ -142,16 +124,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
secondary_data.stack = NULL;
secondary_data.pgdir = 0;
if (PHYS_OFFSET != PAGE_OFFSET) {
#ifndef CONFIG_HOTPLUG_CPU
identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end));
#endif
identity_mapping_del(pgd, __pa(_stext), __pa(_etext));
identity_mapping_del(pgd, __pa(_sdata), __pa(_edata));
}
pgd_free(&init_mm, pgd);
return ret;
}
@ -550,6 +522,10 @@ static void ipi_cpu_stop(unsigned int cpu)
local_fiq_disable();
local_irq_disable();
#ifdef CONFIG_HOTPLUG_CPU
platform_cpu_kill(cpu);
#endif
while (1)
cpu_relax();
}

View File

@ -1,13 +1,12 @@
#include <linux/init.h>
#include <asm/idmap.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/memory.h>
#include <asm/suspend.h>
#include <asm/tlbflush.h>
static pgd_t *suspend_pgd;
extern int __cpu_suspend(unsigned long, int (*)(unsigned long));
extern void cpu_resume_mmu(void);
@ -21,7 +20,7 @@ void __cpu_suspend_save(u32 *ptr, u32 ptrsz, u32 sp, u32 *save_ptr)
*save_ptr = virt_to_phys(ptr);
/* This must correspond to the LDM in cpu_resume() assembly */
*ptr++ = virt_to_phys(suspend_pgd);
*ptr++ = virt_to_phys(idmap_pgd);
*ptr++ = sp;
*ptr++ = virt_to_phys(cpu_do_resume);
@ -42,7 +41,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
struct mm_struct *mm = current->active_mm;
int ret;
if (!suspend_pgd)
if (!idmap_pgd)
return -EINVAL;
/*
@ -59,14 +58,3 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
return ret;
}
static int __init cpu_suspend_init(void)
{
suspend_pgd = pgd_alloc(&init_mm);
if (suspend_pgd) {
unsigned long addr = virt_to_phys(cpu_resume_mmu);
identity_mapping_add(suspend_pgd, addr, addr + SECTION_SIZE);
}
return suspend_pgd ? 0 : -ENOMEM;
}
core_initcall(cpu_suspend_init);

View File

@ -13,6 +13,12 @@
*(.proc.info.init) \
VMLINUX_SYMBOL(__proc_info_end) = .;
#define IDMAP_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__idmap_text_start) = .; \
*(.idmap.text) \
VMLINUX_SYMBOL(__idmap_text_end) = .;
#ifdef CONFIG_HOTPLUG_CPU
#define ARM_CPU_DISCARD(x)
#define ARM_CPU_KEEP(x) x
@ -92,6 +98,7 @@ SECTIONS
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
IDMAP_TEXT
#ifdef CONFIG_MMU
*(.fixup)
#endif

View File

@ -13,7 +13,8 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
testchangebit.o testclearbit.o testsetbit.o \
ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
ucmpdi2.o lib1funcs.o div64.o \
io-readsb.o io-writesb.o io-readsl.o io-writesl.o
io-readsb.o io-writesb.o io-readsl.o io-writesl.o \
call_with_stack.o
mmu-y := clear_user.o copy_page.o getuser.o putuser.o

View File

@ -0,0 +1,44 @@
/*
* arch/arm/lib/call_with_stack.S
*
* Copyright (C) 2011 ARM Ltd.
* Written by Will Deacon <will.deacon@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
/*
* void call_with_stack(void (*fn)(void *), void *arg, void *sp)
*
* Change the stack to that pointed at by sp, then invoke fn(arg) with
* the new stack.
*/
ENTRY(call_with_stack)
str sp, [r2, #-4]!
str lr, [r2, #-4]!
mov sp, r2
mov r2, r0
mov r0, r1
adr lr, BSYM(1f)
mov pc, r2
1: ldr lr, [sp]
ldr sp, [sp, #4]
mov pc, lr
ENDPROC(call_with_stack)

View File

@ -30,14 +30,6 @@
#ifndef __ASSEMBLY__
#ifndef CONFIG_ARCH_AT91X40
#define __arch_ioremap at91_ioremap
#define __arch_iounmap at91_iounmap
#endif
void __iomem *at91_ioremap(unsigned long phys, size_t size, unsigned int type);
void at91_iounmap(volatile void __iomem *addr);
static inline unsigned int at91_sys_read(unsigned int reg_offset)
{
void __iomem *addr = (void __iomem *)AT91_VA_BASE_SYS;

View File

@ -1,28 +0,0 @@
/*
* arch/arm/mach-at91/include/mach/vmalloc.h
*
* Copyright (C) 2003 SAN People
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H
#include <mach/hardware.h>
#define VMALLOC_END (AT91_VIRT_BASE & PGDIR_MASK)
#endif

View File

@ -73,24 +73,6 @@ static struct map_desc at91_io_desc __initdata = {
.type = MT_DEVICE,
};
void __iomem *at91_ioremap(unsigned long p, size_t size, unsigned int type)
{
if (p >= AT91_BASE_SYS && p <= (AT91_BASE_SYS + SZ_16K - 1))
return (void __iomem *)AT91_IO_P2V(p);
return __arm_ioremap_caller(p, size, type, __builtin_return_address(0));
}
EXPORT_SYMBOL(at91_ioremap);
void at91_iounmap(volatile void __iomem *addr)
{
unsigned long virt = (unsigned long)addr;
if (virt >= VMALLOC_START && virt < VMALLOC_END)
__iounmap(addr);
}
EXPORT_SYMBOL(at91_iounmap);
#define AT91_DBGU0 0xfffff200
#define AT91_DBGU1 0xffffee00

View File

@ -1615,7 +1615,7 @@ DMA_MemType_t dma_mem_type(void *addr)
{
unsigned long addrVal = (unsigned long)addr;
if (addrVal >= VMALLOC_END) {
if (addrVal >= CONSISTENT_BASE) {
/* NOTE: DMA virtual memory space starts at 0xFFxxxxxx */
/* dma_alloc_xxx pages are physically and virtually contiguous */

View File

@ -1,25 +0,0 @@
/*
*
* Copyright (C) 2000 Russell King.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* Move VMALLOC_END to 0xf0000000 so that the vm space can range from
* 0xe0000000 to 0xefffffff. This gives us 256 MB of vm space and handles
* larger physical memory designs better.
*/
#define VMALLOC_END 0xf0000000UL

View File

@ -4,7 +4,7 @@
# Object file lists.
obj-y := irq.o mm.o time.o
obj-y := common.o
obj-m :=
obj-n :=
obj- :=

View File

@ -1,7 +1,9 @@
/*
* linux/arch/arm/mach-clps711x/irq.c
* linux/arch/arm/mach-clps711x/core.c
*
* Copyright (C) 2000 Deep Blue Solutions Ltd.
* Core support for the CLPS711x-based machines.
*
* Copyright (C) 2001,2011 Deep Blue Solutions Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -17,16 +19,42 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/sched.h>
#include <linux/timex.h>
#include <asm/mach/irq.h>
#include <asm/sizes.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/leds.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
#include <asm/hardware/clps7111.h>
/*
* This maps the generic CLPS711x registers
*/
static struct map_desc clps711x_io_desc[] __initdata = {
{
.virtual = CLPS7111_VIRT_BASE,
.pfn = __phys_to_pfn(CLPS7111_PHYS_BASE),
.length = SZ_1M,
.type = MT_DEVICE
}
};
void __init clps711x_map_io(void)
{
iotable_init(clps711x_io_desc, ARRAY_SIZE(clps711x_io_desc));
}
static void int1_mask(struct irq_data *d)
{
u32 intmr1;
@ -112,15 +140,15 @@ void __init clps711x_init_irq(void)
for (i = 0; i < NR_IRQS; i++) {
if (INT1_IRQS & (1 << i)) {
irq_set_chip_and_handler(i, &int1_chip,
irq_set_chip_and_handler(i, &int1_chip,
handle_level_irq);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
}
if (INT2_IRQS & (1 << i)) {
irq_set_chip_and_handler(i, &int2_chip,
handle_level_irq);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
}
}
}
/*
@ -141,3 +169,54 @@ void __init clps711x_init_irq(void)
clps_writel(0, SYNCIO);
clps_writel(0, KBDEOI);
}
/*
* gettimeoffset() returns time since last timer tick, in usecs.
*
* 'LATCH' is hwclock ticks (see CLOCK_TICK_RATE in timex.h) per jiffy.
* 'tick' is usecs per jiffy.
*/
static unsigned long clps711x_gettimeoffset(void)
{
unsigned long hwticks;
hwticks = LATCH - (clps_readl(TC2D) & 0xffff); /* since last underflow */
return (hwticks * (tick_nsec / 1000)) / LATCH;
}
/*
* IRQ handler for the timer
*/
static irqreturn_t p720t_timer_interrupt(int irq, void *dev_id)
{
timer_tick();
return IRQ_HANDLED;
}
static struct irqaction clps711x_timer_irq = {
.name = "CLPS711x Timer Tick",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
.handler = p720t_timer_interrupt,
};
static void __init clps711x_timer_init(void)
{
struct timespec tv;
unsigned int syscon;
syscon = clps_readl(SYSCON1);
syscon |= SYSCON1_TC2S | SYSCON1_TC2M;
clps_writel(syscon, SYSCON1);
clps_writel(LATCH-1, TC2D); /* 512kHz / 100Hz - 1 */
setup_irq(IRQ_TC2OI, &clps711x_timer_irq);
tv.tv_nsec = 0;
tv.tv_sec = clps_readl(RTCDR);
do_settimeofday(&tv);
}
struct sys_timer clps711x_timer = {
.init = clps711x_timer_init,
.offset = clps711x_gettimeoffset,
};

View File

@ -34,7 +34,7 @@ static inline void arch_idle(void)
static inline void arch_reset(char mode, const char *cmd)
{
cpu_reset(0);
soft_restart(0);
}
#endif

View File

@ -1,20 +0,0 @@
/*
* arch/arm/mach-clps711x/include/mach/vmalloc.h
*
* Copyright (C) 2000 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define VMALLOC_END 0xd0000000UL

View File

@ -1,48 +0,0 @@
/*
* linux/arch/arm/mach-clps711x/mm.c
*
* Generic MM setup for the CLPS711x-based machines.
*
* Copyright (C) 2001 Deep Blue Solutions Ltd
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <asm/sizes.h>
#include <mach/hardware.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/mach/map.h>
#include <asm/hardware/clps7111.h>
/*
* This maps the generic CLPS711x registers
*/
static struct map_desc clps711x_io_desc[] __initdata = {
{
.virtual = CLPS7111_VIRT_BASE,
.pfn = __phys_to_pfn(CLPS7111_PHYS_BASE),
.length = SZ_1M,
.type = MT_DEVICE
}
};
void __init clps711x_map_io(void)
{
iotable_init(clps711x_io_desc, ARRAY_SIZE(clps711x_io_desc));
}

View File

@ -1,84 +0,0 @@
/*
* linux/arch/arm/mach-clps711x/time.c
*
* Copyright (C) 2001 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/timex.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/sched.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/leds.h>
#include <asm/hardware/clps7111.h>
#include <asm/mach/time.h>
/*
* gettimeoffset() returns time since last timer tick, in usecs.
*
* 'LATCH' is hwclock ticks (see CLOCK_TICK_RATE in timex.h) per jiffy.
* 'tick' is usecs per jiffy.
*/
static unsigned long clps711x_gettimeoffset(void)
{
unsigned long hwticks;
hwticks = LATCH - (clps_readl(TC2D) & 0xffff); /* since last underflow */
return (hwticks * (tick_nsec / 1000)) / LATCH;
}
/*
* IRQ handler for the timer
*/
static irqreturn_t
p720t_timer_interrupt(int irq, void *dev_id)
{
timer_tick();
return IRQ_HANDLED;
}
static struct irqaction clps711x_timer_irq = {
.name = "CLPS711x Timer Tick",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
.handler = p720t_timer_interrupt,
};
static void __init clps711x_timer_init(void)
{
struct timespec tv;
unsigned int syscon;
syscon = clps_readl(SYSCON1);
syscon |= SYSCON1_TC2S | SYSCON1_TC2M;
clps_writel(syscon, SYSCON1);
clps_writel(LATCH-1, TC2D); /* 512kHz / 100Hz - 1 */
setup_irq(IRQ_TC2OI, &clps711x_timer_irq);
tv.tv_nsec = 0;
tv.tv_sec = clps_readl(RTCDR);
do_settimeofday(&tv);
}
struct sys_timer clps711x_timer = {
.init = clps711x_timer_init,
.offset = clps711x_gettimeoffset,
};

View File

@ -26,6 +26,7 @@
#include <linux/mtd/partitions.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/hardware/gic.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/time.h>
@ -201,5 +202,6 @@ MACHINE_START(CNS3420VB, "Cavium Networks CNS3420 Validation Board")
.map_io = cns3420_map_io,
.init_irq = cns3xxx_init_irq,
.timer = &cns3xxx_timer,
.handle_irq = gic_handle_irq,
.init_machine = cns3420_init,
MACHINE_END

View File

@ -8,8 +8,6 @@
* published by the Free Software Foundation.
*/
#include <asm/hardware/entry-macro-gic.S>
.macro disable_fiq
.endm

View File

@ -1,11 +0,0 @@
/*
* Copyright 2000 Russell King.
* Copyright 2003 ARM Limited
* Copyright 2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*/
#define VMALLOC_END 0xd8000000UL

View File

@ -4,7 +4,7 @@
#
# Common objects
obj-y := time.o clock.o serial.o io.o psc.o \
obj-y := time.o clock.o serial.o psc.o \
dma.o usb.o common.o sram.o aemif.o
obj-$(CONFIG_DAVINCI_MUX) += mux.o

View File

@ -21,12 +21,4 @@
#define __mem_pci(a) (a)
#define __mem_isa(a) (a)
#ifndef __ASSEMBLER__
#define __arch_ioremap davinci_ioremap
#define __arch_iounmap davinci_iounmap
void __iomem *davinci_ioremap(unsigned long phys, size_t size,
unsigned int type);
void davinci_iounmap(volatile void __iomem *addr);
#endif
#endif /* __ASM_ARCH_IO_H */

View File

@ -1,14 +0,0 @@
/*
* DaVinci vmalloc definitions
*
* Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com>
*
* 2007 (c) MontaVista Software, Inc. This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*/
#include <mach/hardware.h>
/* Allow vmalloc range until the IO virtual range minus a 2M "hole" */
#define VMALLOC_END (IO_VIRT - (2<<20))

View File

@ -1,48 +0,0 @@
/*
* DaVinci I/O mapping code
*
* Copyright (C) 2005-2006 Texas Instruments
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/io.h>
#include <asm/tlb.h>
#include <asm/mach/map.h>
#include <mach/common.h>
/*
* Intercept ioremap() requests for addresses in our fixed mapping regions.
*/
void __iomem *davinci_ioremap(unsigned long p, size_t size, unsigned int type)
{
struct map_desc *desc = davinci_soc_info.io_desc;
int desc_num = davinci_soc_info.io_desc_num;
int i;
for (i = 0; i < desc_num; i++, desc++) {
unsigned long iophys = __pfn_to_phys(desc->pfn);
unsigned long iosize = desc->length;
if (p >= iophys && (p + size) <= (iophys + iosize))
return __io(desc->virtual + p - iophys);
}
return __arm_ioremap_caller(p, size, type,
__builtin_return_address(0));
}
EXPORT_SYMBOL(davinci_ioremap);
void davinci_iounmap(volatile void __iomem *addr)
{
unsigned long virt = (unsigned long)addr;
if (virt >= VMALLOC_START && virt < VMALLOC_END)
__iounmap(addr);
}
EXPORT_SYMBOL(davinci_iounmap);

View File

@ -11,8 +11,6 @@
#ifndef __ASM_ARCH_DOVE_H
#define __ASM_ARCH_DOVE_H
#include <mach/vmalloc.h>
/*
* Marvell Dove address maps.
*

View File

@ -1,5 +0,0 @@
/*
* arch/arm/mach-dove/include/mach/vmalloc.h
*/
#define VMALLOC_END 0xfd800000UL

View File

@ -283,7 +283,7 @@ MACHINE_START(EBSA110, "EBSA110")
.atag_offset = 0x400,
.reserve_lp0 = 1,
.reserve_lp2 = 1,
.soft_reboot = 1,
.restart_mode = 's',
.map_io = ebsa110_map_io,
.init_irq = ebsa110_init_irq,
.timer = &ebsa110_timer,

View File

@ -34,6 +34,6 @@ static inline void arch_idle(void)
asm volatile ("mcr p15, 0, ip, c15, c1, 2" : : : "cc");
}
#define arch_reset(mode, cmd) cpu_reset(0x80000000)
#define arch_reset(mode, cmd) soft_restart(0x80000000)
#endif

View File

@ -1,10 +0,0 @@
/*
* arch/arm/mach-ebsa110/include/mach/vmalloc.h
*
* Copyright (C) 1998 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define VMALLOC_END 0xdf000000UL

View File

@ -16,6 +16,7 @@
#include <mach/hardware.h>
#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@ -36,6 +37,7 @@ MACHINE_START(ADSSPHERE, "ADS Sphere board")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = adssphere_init_machine,
MACHINE_END

View File

@ -39,6 +39,7 @@
#include <mach/ep93xx_spi.h>
#include <mach/gpio-ep93xx.h>
#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@ -250,6 +251,7 @@ MACHINE_START(EDB9301, "Cirrus Logic EDB9301 Evaluation Board")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
MACHINE_END
@ -261,6 +263,7 @@ MACHINE_START(EDB9302, "Cirrus Logic EDB9302 Evaluation Board")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
MACHINE_END
@ -272,6 +275,7 @@ MACHINE_START(EDB9302A, "Cirrus Logic EDB9302A Evaluation Board")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
MACHINE_END
@ -283,6 +287,7 @@ MACHINE_START(EDB9307, "Cirrus Logic EDB9307 Evaluation Board")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
MACHINE_END
@ -294,6 +299,7 @@ MACHINE_START(EDB9307A, "Cirrus Logic EDB9307A Evaluation Board")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
MACHINE_END
@ -305,6 +311,7 @@ MACHINE_START(EDB9312, "Cirrus Logic EDB9312 Evaluation Board")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
MACHINE_END
@ -316,6 +323,7 @@ MACHINE_START(EDB9315, "Cirrus Logic EDB9315 Evaluation Board")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
MACHINE_END
@ -327,6 +335,7 @@ MACHINE_START(EDB9315A, "Cirrus Logic EDB9315A Evaluation Board")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = edb93xx_init_machine,
MACHINE_END

View File

@ -16,6 +16,7 @@
#include <mach/hardware.h>
#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@ -36,6 +37,7 @@ MACHINE_START(GESBC9312, "Glomation GESBC-9312-sx")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = gesbc9312_init_machine,
MACHINE_END

View File

@ -9,51 +9,9 @@
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*/
#include <mach/ep93xx-regs.h>
.macro disable_fiq
.endm
.macro get_irqnr_preamble, base, tmp
.endm
.macro arch_ret_to_user, tmp1, tmp2
.endm
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
ldr \base, =(EP93XX_AHB_VIRT_BASE)
orr \base, \base, #0x000b0000
mov \irqnr, #0
ldr \irqstat, [\base] @ lower 32 interrupts
cmp \irqstat, #0
bne 1001f
eor \base, \base, #0x00070000
ldr \irqstat, [\base] @ upper 32 interrupts
cmp \irqstat, #0
beq 1002f
mov \irqnr, #0x20
1001:
movs \tmp, \irqstat, lsl #16
movne \irqstat, \tmp
addeq \irqnr, \irqnr, #16
movs \tmp, \irqstat, lsl #8
movne \irqstat, \tmp
addeq \irqnr, \irqnr, #8
movs \tmp, \irqstat, lsl #4
movne \irqstat, \tmp
addeq \irqnr, \irqnr, #4
movs \tmp, \irqstat, lsl #2
movne \irqstat, \tmp
addeq \irqnr, \irqnr, #2
movs \tmp, \irqstat, lsl #1
addeq \irqnr, \irqnr, #1
orrs \base, \base, #1
1002:
.endm

View File

@ -11,8 +11,6 @@ static inline void arch_idle(void)
static inline void arch_reset(char mode, const char *cmd)
{
local_irq_disable();
/*
* Set then clear the SWRST bit to initiate a software reset
*/

View File

@ -1,5 +0,0 @@
/*
* arch/arm/mach-ep93xx/include/mach/vmalloc.h
*/
#define VMALLOC_END 0xfe800000UL

View File

@ -18,6 +18,7 @@
#include <mach/hardware.h>
#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@ -80,6 +81,7 @@ MACHINE_START(MICRO9, "Contec Micro9-High")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = micro9_init_machine,
MACHINE_END
@ -91,6 +93,7 @@ MACHINE_START(MICRO9M, "Contec Micro9-Mid")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = micro9_init_machine,
MACHINE_END
@ -102,6 +105,7 @@ MACHINE_START(MICRO9L, "Contec Micro9-Lite")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = micro9_init_machine,
MACHINE_END
@ -113,6 +117,7 @@ MACHINE_START(MICRO9S, "Contec Micro9-Slim")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = micro9_init_machine,
MACHINE_END

View File

@ -25,6 +25,7 @@
#include <mach/fb.h>
#include <mach/gpio-ep93xx.h>
#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@ -80,6 +81,7 @@ MACHINE_START(SIM_ONE, "Simplemachines Sim.One Board")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = simone_init_machine,
MACHINE_END

View File

@ -31,6 +31,7 @@
#include <mach/fb.h>
#include <mach/gpio-ep93xx.h>
#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
@ -177,6 +178,7 @@ MACHINE_START(SNAPPER_CL15, "Bluewater Systems Snapper CL15")
.atag_offset = 0x100,
.map_io = ep93xx_map_io,
.init_irq = ep93xx_init_irq,
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = snappercl15_init_machine,
MACHINE_END

View File

@ -23,6 +23,7 @@
#include <mach/hardware.h>
#include <mach/ts72xx.h>
#include <asm/hardware/vic.h>
#include <asm/mach-types.h>
#include <asm/mach/map.h>
#include <asm/mach/arch.h>
@ -247,6 +248,7 @@ MACHINE_START(TS72XX, "Technologic Systems TS-72xx SBC")
.atag_offset = 0x100,
.map_io = ts72xx_map_io,
.init_irq = ep93xx_init_irq,
.handle_irq = vic_handle_irq,
.timer = &ep93xx_timer,
.init_machine = ts72xx_init_machine,
MACHINE_END

View File

@ -15,6 +15,7 @@
#include <asm/mach/irq.h>
#include <asm/proc-fns.h>
#include <asm/exception.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/hardware/gic.h>
@ -33,8 +34,6 @@
#include <mach/regs-irq.h>
#include <mach/regs-pmu.h>
unsigned int gic_bank_offset __read_mostly;
extern int combiner_init(unsigned int combiner_nr, void __iomem *base,
unsigned int irq_start);
extern void combiner_cascade_irq(unsigned int combiner_nr, unsigned int irq);
@ -207,27 +206,14 @@ void __init exynos4_init_clocks(int xtal)
exynos4_setup_clocks();
}
static void exynos4_gic_irq_fix_base(struct irq_data *d)
{
struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d);
gic_data->cpu_base = S5P_VA_GIC_CPU +
(gic_bank_offset * smp_processor_id());
gic_data->dist_base = S5P_VA_GIC_DIST +
(gic_bank_offset * smp_processor_id());
}
void __init exynos4_init_irq(void)
{
int irq;
unsigned int gic_bank_offset;
gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
gic_init(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU);
gic_arch_extn.irq_eoi = exynos4_gic_irq_fix_base;
gic_arch_extn.irq_unmask = exynos4_gic_irq_fix_base;
gic_arch_extn.irq_mask = exynos4_gic_irq_fix_base;
gic_init_bases(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU, gic_bank_offset);
for (irq = 0; irq < MAX_COMBINER_NR; irq++) {

View File

@ -9,83 +9,8 @@
* warranty of any kind, whether express or implied.
*/
#include <mach/hardware.h>
#include <mach/map.h>
#include <asm/hardware/gic.h>
.macro disable_fiq
.endm
.macro get_irqnr_preamble, base, tmp
mov \tmp, #0
mrc p15, 0, \base, c0, c0, 5
and \base, \base, #3
cmp \base, #0
beq 1f
ldr \tmp, =gic_bank_offset
ldr \tmp, [\tmp]
cmp \base, #1
beq 1f
cmp \base, #2
addeq \tmp, \tmp, \tmp
addne \tmp, \tmp, \tmp, LSL #1
1: ldr \base, =gic_cpu_base_addr
ldr \base, [\base]
add \base, \base, \tmp
.endm
.macro arch_ret_to_user, tmp1, tmp2
.endm
/*
* The interrupt numbering scheme is defined in the
* interrupt controller spec. To wit:
*
* Interrupts 0-15 are IPI
* 16-28 are reserved
* 29-31 are local. We allow 30 to be used for the watchdog.
* 32-1020 are global
* 1021-1022 are reserved
* 1023 is "spurious" (no interrupt)
*
* For now, we ignore all local interrupts so only return an interrupt if it's
* between 30 and 1020. The test_for_ipi routine below will pick up on IPIs.
*
* A simple read from the controller will tell us the number of the highest
* priority enabled interrupt. We then just need to check whether it is in the
* valid range for an IRQ (30-1020 inclusive).
*/
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
ldr \irqstat, [\base, #GIC_CPU_INTACK] /* bits 12-10 = src CPU, 9-0 = int # */
ldr \tmp, =1021
bic \irqnr, \irqstat, #0x1c00
cmp \irqnr, #15
cmpcc \irqnr, \irqnr
cmpne \irqnr, \tmp
cmpcs \irqnr, \irqnr
addne \irqnr, \irqnr, #32
.endm
/* We assume that irqstat (the raw value of the IRQ acknowledge
* register) is preserved from the macro above.
* If there is an IPI, we immediately signal end of interrupt on the
* controller, since this requires the original irqstat value which
* we won't easily be able to recreate later.
*/
.macro test_for_ipi, irqnr, irqstat, base, tmp
bic \irqnr, \irqstat, #0x1c00
cmp \irqnr, #16
strcc \irqstat, [\base, #GIC_CPU_EOI]
cmpcs \irqnr, \irqnr
.endm

View File

@ -1,22 +0,0 @@
/* linux/arch/arm/mach-exynos4/include/mach/vmalloc.h
*
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Copyright 2010 Ben Dooks <ben-linux@fluff.org>
*
* Based on arch/arm/mach-s5p6440/include/mach/vmalloc.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* EXYNOS4 vmalloc definition
*/
#ifndef __ASM_ARCH_VMALLOC_H
#define __ASM_ARCH_VMALLOC_H __FILE__
#define VMALLOC_END 0xF6000000UL
#endif /* __ASM_ARCH_VMALLOC_H */

View File

@ -16,6 +16,7 @@
#include <linux/smsc911x.h>
#include <asm/mach/arch.h>
#include <asm/hardware/gic.h>
#include <asm/mach-types.h>
#include <plat/cpu.h>
@ -210,6 +211,7 @@ MACHINE_START(ARMLEX4210, "ARMLEX4210")
.atag_offset = 0x100,
.init_irq = exynos4_init_irq,
.map_io = armlex4210_map_io,
.handle_irq = gic_handle_irq,
.init_machine = armlex4210_machine_init,
.timer = &exynos4_timer,
MACHINE_END

View File

@ -32,6 +32,7 @@
#include <media/v4l2-mediabus.h>
#include <asm/mach/arch.h>
#include <asm/hardware/gic.h>
#include <asm/mach-types.h>
#include <plat/adc.h>
@ -1333,6 +1334,7 @@ MACHINE_START(NURI, "NURI")
.atag_offset = 0x100,
.init_irq = exynos4_init_irq,
.map_io = nuri_map_io,
.handle_irq = gic_handle_irq,
.init_machine = nuri_machine_init,
.timer = &exynos4_timer,
.reserve = &nuri_reserve,

View File

@ -22,6 +22,7 @@
#include <linux/lcd.h>
#include <asm/mach/arch.h>
#include <asm/hardware/gic.h>
#include <asm/mach-types.h>
#include <video/platform_lcd.h>
@ -694,6 +695,7 @@ MACHINE_START(ORIGEN, "ORIGEN")
.atag_offset = 0x100,
.init_irq = exynos4_init_irq,
.map_io = origen_map_io,
.handle_irq = gic_handle_irq,
.init_machine = origen_machine_init,
.timer = &exynos4_timer,
.reserve = &origen_reserve,

View File

@ -21,6 +21,7 @@
#include <linux/serial_core.h>
#include <asm/mach/arch.h>
#include <asm/hardware/gic.h>
#include <asm/mach-types.h>
#include <plat/backlight.h>
@ -287,6 +288,7 @@ MACHINE_START(SMDK4212, "SMDK4212")
.atag_offset = 0x100,
.init_irq = exynos4_init_irq,
.map_io = smdk4x12_map_io,
.handle_irq = gic_handle_irq,
.init_machine = smdk4x12_machine_init,
.timer = &exynos4_timer,
MACHINE_END
@ -297,6 +299,7 @@ MACHINE_START(SMDK4412, "SMDK4412")
.atag_offset = 0x100,
.init_irq = exynos4_init_irq,
.map_io = smdk4x12_map_io,
.handle_irq = gic_handle_irq,
.init_machine = smdk4x12_machine_init,
.timer = &exynos4_timer,
MACHINE_END

View File

@ -21,6 +21,7 @@
#include <linux/pwm_backlight.h>
#include <asm/mach/arch.h>
#include <asm/hardware/gic.h>
#include <asm/mach-types.h>
#include <video/platform_lcd.h>
@ -375,6 +376,7 @@ MACHINE_START(SMDKV310, "SMDKV310")
.atag_offset = 0x100,
.init_irq = exynos4_init_irq,
.map_io = smdkv310_map_io,
.handle_irq = gic_handle_irq,
.init_machine = smdkv310_machine_init,
.timer = &exynos4_timer,
.reserve = &smdkv310_reserve,
@ -385,6 +387,7 @@ MACHINE_START(SMDKC210, "SMDKC210")
.atag_offset = 0x100,
.init_irq = exynos4_init_irq,
.map_io = smdkv310_map_io,
.handle_irq = gic_handle_irq,
.init_machine = smdkv310_machine_init,
.timer = &exynos4_timer,
MACHINE_END

View File

@ -24,6 +24,7 @@
#include <linux/i2c/atmel_mxt_ts.h>
#include <asm/mach/arch.h>
#include <asm/hardware/gic.h>
#include <asm/mach-types.h>
#include <plat/regs-serial.h>
@ -1058,6 +1059,7 @@ MACHINE_START(UNIVERSAL_C210, "UNIVERSAL_C210")
.atag_offset = 0x100,
.init_irq = exynos4_init_irq,
.map_io = universal_map_io,
.handle_irq = gic_handle_irq,
.init_machine = universal_machine_init,
.timer = &exynos4_timer,
.reserve = &universal_reserve,

View File

@ -32,7 +32,6 @@
#include <plat/cpu.h>
extern unsigned int gic_bank_offset;
extern void exynos4_secondary_startup(void);
#define CPU1_BOOT_REG (samsung_rev() == EXYNOS4210_REV_1_1 ? \
@ -65,31 +64,6 @@ static void __iomem *scu_base_addr(void)
static DEFINE_SPINLOCK(boot_lock);
static void __cpuinit exynos4_gic_secondary_init(void)
{
void __iomem *dist_base = S5P_VA_GIC_DIST +
(gic_bank_offset * smp_processor_id());
void __iomem *cpu_base = S5P_VA_GIC_CPU +
(gic_bank_offset * smp_processor_id());
int i;
/*
* Deal with the banked PPI and SGI interrupts - disable all
* PPI interrupts, ensure all SGI interrupts are enabled.
*/
__raw_writel(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
__raw_writel(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
/*
* Set priority on PPI and SGI interrupts
*/
for (i = 0; i < 32; i += 4)
__raw_writel(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
__raw_writel(0xf0, cpu_base + GIC_CPU_PRIMASK);
__raw_writel(1, cpu_base + GIC_CPU_CTRL);
}
void __cpuinit platform_secondary_init(unsigned int cpu)
{
/*
@ -97,7 +71,7 @@ void __cpuinit platform_secondary_init(unsigned int cpu)
* core (e.g. timer irq), then they will not have been enabled
* for us: do so
*/
exynos4_gic_secondary_init();
gic_secondary_init(0);
/*
* let the primary processor know we're out of the

View File

@ -86,7 +86,7 @@ fixup_cats(struct tag *tags, char **cmdline, struct meminfo *mi)
MACHINE_START(CATS, "Chalice-CATS")
/* Maintainer: Philip Blundell */
.atag_offset = 0x100,
.soft_reboot = 1,
.restart_mode = 's',
.fixup = fixup_cats,
.map_io = footbridge_map_io,
.init_irq = footbridge_init_irq,

View File

@ -24,7 +24,7 @@ static inline void arch_reset(char mode, const char *cmd)
/*
* Jump into the ROM
*/
cpu_reset(0x41000000);
soft_restart(0x41000000);
} else {
if (machine_is_netwinder()) {
/* open up the SuperIO chip

View File

@ -1,10 +0,0 @@
/*
* arch/arm/mach-footbridge/include/mach/vmalloc.h
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define VMALLOC_END 0xf0000000UL

View File

@ -1,10 +0,0 @@
/*
* Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#define VMALLOC_END 0xf0000000UL

View File

@ -1,10 +0,0 @@
/*
* arch/arm/mach-h720x/include/mach/vmalloc.h
*/
#ifndef __ARCH_ARM_VMALLOC_H
#define __ARCH_ARM_VMALLOC_H
#define VMALLOC_END 0xd0000000UL
#endif

View File

@ -144,6 +144,7 @@ DT_MACHINE_START(HIGHBANK, "Highbank")
.map_io = highbank_map_io,
.init_irq = highbank_init_irq,
.timer = &highbank_timer,
.handle_irq = gic_handle_irq,
.init_machine = highbank_init,
.dt_compat = highbank_match,
MACHINE_END

View File

@ -1,5 +1,3 @@
#include <asm/hardware/entry-macro-gic.S>
.macro disable_fiq
.endm

View File

@ -1 +0,0 @@
#define VMALLOC_END 0xFEE00000UL

View File

@ -1,20 +0,0 @@
/*
* arch/arm/mach-integrator/include/mach/vmalloc.h
*
* Copyright (C) 2000 Russell King.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define VMALLOC_END 0xd0000000UL

View File

@ -1,4 +0,0 @@
#ifndef _VMALLOC_H_
#define _VMALLOC_H_
#define VMALLOC_END 0xfa000000UL
#endif

View File

@ -13,15 +13,8 @@
#include <asm/hardware/iop3xx.h>
extern void __iomem *__iop3xx_ioremap(unsigned long cookie, size_t size,
unsigned int mtype);
extern void __iop3xx_iounmap(void __iomem *addr);
#define IO_SPACE_LIMIT 0xffffffff
#define __io(p) ((void __iomem *)IOP3XX_PCI_IO_PHYS_TO_VIRT(p))
#define __mem_pci(a) (a)
#define __arch_ioremap __iop3xx_ioremap
#define __arch_iounmap __iop3xx_iounmap
#endif

View File

@ -18,8 +18,6 @@ static inline void arch_idle(void)
static inline void arch_reset(char mode, const char *cmd)
{
local_irq_disable();
if (machine_is_n2100()) {
gpio_line_set(N2100_HARDWARE_RESET, GPIO_LOW);
gpio_line_config(N2100_HARDWARE_RESET, GPIO_OUT);
@ -30,5 +28,5 @@ static inline void arch_reset(char mode, const char *cmd)
*IOP3XX_PCSR = 0x30;
/* Jump into ROM at address 0 */
cpu_reset(0);
soft_restart(0);
}

Some files were not shown because too many files have changed in this diff Show More