Merge with /pub/scm/linux/kernel/git/torvalds/linux-2.6.git

This commit is contained in:
Steve French 2005-11-15 16:49:04 -08:00
commit c12489ad40
184 changed files with 2731 additions and 13079 deletions

View File

@ -386,7 +386,7 @@ X!Edrivers/pnp/system.c
<chapter id="blkdev">
<title>Block Devices</title>
!Edrivers/block/ll_rw_blk.c
!Eblock/ll_rw_blk.c
</chapter>
<chapter id="miscdev">

View File

@ -1063,8 +1063,8 @@ Aside:
4.4 I/O contexts
I/O contexts provide a dynamically allocated per process data area. They may
be used in I/O schedulers, and in the block layer (could be used for IO statis,
priorities for example). See *io_context in drivers/block/ll_rw_blk.c, and
as-iosched.c for an example of usage in an i/o scheduler.
priorities for example). See *io_context in block/ll_rw_blk.c, and as-iosched.c
for an example of usage in an i/o scheduler.
5. Scalability related changes

View File

@ -140,3 +140,12 @@ What: EXPORT_SYMBOL(lookup_hash)
When: January 2006
Why: Too low-level interface. Use lookup_one_len or lookup_create instead.
Who: Christoph Hellwig <hch@lst.de>
---------------------------
What: START_ARRAY ioctl for md
When: July 2006
Files: drivers/md/md.c
Why: Not reliable by design - can fail when most needed.
Alternatives exist
Who: NeilBrown <neilb@suse.de>

View File

@ -32,7 +32,10 @@ the disk is not available then you have three options :-
has restarted. Messy but it is the only option if you have not
planned for a crash. Alternatively, you can take a picture of
the screen with a digital camera - not nice, but better than
nothing.
nothing. If the messages scroll off the top of the console, you
may find that booting with a higher resolution (eg, vga=791)
will allow you to read more of the text. (Caveat: This needs vesafb,
so won't help for 'early' oopses)
(2) Boot with a serial console (see Documentation/serial-console.txt),
run a null modem to a second machine and capture the output there

View File

@ -7,10 +7,12 @@ Machine check
mce=off disable machine check
mce=bootlog Enable logging of machine checks left over from booting.
Disabled by default because some BIOS leave bogus ones.
Disabled by default on AMD because some BIOS leave bogus ones.
If your BIOS doesn't do that it's a good idea to enable though
to make sure you log even machine check events that result
in a reboot.
in a reboot. On Intel systems it is enabled by default.
mce=nobootlog
Disable boot machine check logging.
mce=tolerancelevel (number)
0: always panic, 1: panic if deadlock possible,
2: try to avoid panic, 3: never panic or exit (for testing)
@ -122,6 +124,9 @@ SMP
cpumask=MASK only use cpus with bits set in mask
additional_cpus=NUM Allow NUM more CPUs for hotplug
(defaults are specified by the BIOS or half the available CPUs)
NUMA
numa=off Only set up a single NUMA node spanning all memory.
@ -188,6 +193,9 @@ Debugging
kstack=N Print that many words from the kernel stack in oops dumps.
pagefaulttrace Dump all page faults. Only useful for extreme debugging
and will create a lot of output.
Misc
noreplacement Don't replace instructions with more appropiate ones

View File

@ -6,7 +6,7 @@ Virtual memory map with 4 level page tables:
0000000000000000 - 00007fffffffffff (=47bits) user space, different per mm
hole caused by [48:63] sign extension
ffff800000000000 - ffff80ffffffffff (=40bits) guard hole
ffff810000000000 - ffffc0ffffffffff (=46bits) direct mapping of phys. memory
ffff810000000000 - ffffc0ffffffffff (=46bits) direct mapping of all phys. memory
ffffc10000000000 - ffffc1ffffffffff (=40bits) hole
ffffc20000000000 - ffffe1ffffffffff (=45bits) vmalloc/ioremap space
... unused hole ...
@ -14,6 +14,10 @@ ffffffff80000000 - ffffffff82800000 (=40MB) kernel text mapping, from phys 0
... unused hole ...
ffffffff88000000 - fffffffffff00000 (=1919MB) module mapping space
The direct mapping covers all memory in the system upto the highest
memory address (this means in some cases it can also include PCI memory
holes)
vmalloc space is lazily synchronized into the different PML4 pages of
the processes using the page fault handler, with init_level4_pgt as
reference.

View File

@ -652,25 +652,11 @@ endmenu
menu "Power management options"
config PM
bool "Power Management support"
---help---
"Power Management" means that parts of your computer are shut
off or put into a power conserving "sleep" mode if they are not
being used. There are two competing standards for doing this: APM
and ACPI. If you want to use either one, say Y here and then also
to the requisite support below.
Power Management is most important for battery powered laptop
computers; if you have a laptop, check out the Linux Laptop home
page on the WWW at <http://www.linux-on-laptops.com/> or
Tuxmobil - Linux on Mobile Computers at <http://www.tuxmobil.org/>
and the Battery Powered Linux mini-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
source "kernel/power/Kconfig"
config APM
tristate "Advanced Power Management Emulation"
depends on PM
depends on PM_LEGACY
---help---
APM is a BIOS specification for saving power using several different
techniques. This is mostly useful for battery powered laptops with

View File

@ -1266,7 +1266,7 @@ static void __exit sa1111_exit(void)
bus_unregister(&sa1111_bus_type);
}
module_init(sa1111_init);
subsys_initcall(sa1111_init);
module_exit(sa1111_exit);
MODULE_DESCRIPTION("Intel Corporation SA1111 core driver");

View File

@ -39,17 +39,14 @@
#ifdef CONFIG_X86_64
static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
}
extern void __init clustered_apic_check(void);
static inline int ioapic_setup_disabled(void)
{
return 0;
}
extern int gsi_irq_sharing(int gsi);
#include <asm/proto.h>
static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; }
#else /* X86 */
#ifdef CONFIG_X86_LOCAL_APIC
@ -57,6 +54,8 @@ static inline int ioapic_setup_disabled(void)
#include <mach_mpparse.h>
#endif /* CONFIG_X86_LOCAL_APIC */
static inline int gsi_irq_sharing(int gsi) { return gsi; }
#endif /* X86 */
#define BAD_MADT_ENTRY(entry, end) ( \
@ -459,7 +458,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
*irq = IO_APIC_VECTOR(gsi);
else
#endif
*irq = gsi;
*irq = gsi_irq_sharing(gsi);
return 0;
}
@ -543,7 +542,7 @@ acpi_scan_rsdp(unsigned long start, unsigned long length)
* RSDP signature.
*/
for (offset = 0; offset < length; offset += 16) {
if (strncmp((char *)(start + offset), "RSD PTR ", sig_len))
if (strncmp((char *)(phys_to_virt(start) + offset), "RSD PTR ", sig_len))
continue;
return (start + offset);
}

View File

@ -206,9 +206,9 @@ static void __init init_amd(struct cpuinfo_x86 *c)
display_cacheinfo(c);
if (cpuid_eax(0x80000000) >= 0x80000008) {
c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
if (c->x86_num_cores & (c->x86_num_cores - 1))
c->x86_num_cores = 1;
c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
if (c->x86_max_cores & (c->x86_max_cores - 1))
c->x86_max_cores = 1;
}
#ifdef CONFIG_X86_HT
@ -217,15 +217,15 @@ static void __init init_amd(struct cpuinfo_x86 *c)
* distingush the cores. Assumes number of cores is a power
* of two.
*/
if (c->x86_num_cores > 1) {
if (c->x86_max_cores > 1) {
int cpu = smp_processor_id();
unsigned bits = 0;
while ((1 << bits) < c->x86_num_cores)
while ((1 << bits) < c->x86_max_cores)
bits++;
cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1);
phys_proc_id[cpu] >>= bits;
printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
cpu, c->x86_num_cores, cpu_core_id[cpu]);
cpu, c->x86_max_cores, cpu_core_id[cpu]);
}
#endif
}

View File

@ -231,10 +231,10 @@ static void __init early_cpu_detect(void)
cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
c->x86 = (tfms >> 8) & 15;
c->x86_model = (tfms >> 4) & 15;
if (c->x86 == 0xf) {
if (c->x86 == 0xf)
c->x86 += (tfms >> 20) & 0xff;
if (c->x86 >= 0x6)
c->x86_model += ((tfms >> 16) & 0xF) << 4;
}
c->x86_mask = tfms & 15;
if (cap0 & (1<<19))
c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
@ -333,7 +333,7 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c)
c->x86_model = c->x86_mask = 0; /* So far unknown... */
c->x86_vendor_id[0] = '\0'; /* Unset */
c->x86_model_id[0] = '\0'; /* Unset */
c->x86_num_cores = 1;
c->x86_max_cores = 1;
memset(&c->x86_capability, 0, sizeof c->x86_capability);
if (!have_cpuid_p()) {
@ -443,52 +443,44 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c)
void __devinit detect_ht(struct cpuinfo_x86 *c)
{
u32 eax, ebx, ecx, edx;
int index_msb, tmp;
int index_msb, core_bits;
int cpu = smp_processor_id();
cpuid(1, &eax, &ebx, &ecx, &edx);
c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
return;
cpuid(1, &eax, &ebx, &ecx, &edx);
smp_num_siblings = (ebx & 0xff0000) >> 16;
if (smp_num_siblings == 1) {
printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
} else if (smp_num_siblings > 1 ) {
index_msb = 31;
if (smp_num_siblings > NR_CPUS) {
printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
smp_num_siblings = 1;
return;
}
tmp = smp_num_siblings;
while ((tmp & 0x80000000 ) == 0) {
tmp <<=1 ;
index_msb--;
}
if (smp_num_siblings & (smp_num_siblings - 1))
index_msb++;
index_msb = get_count_order(smp_num_siblings);
phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
phys_proc_id[cpu]);
smp_num_siblings = smp_num_siblings / c->x86_num_cores;
smp_num_siblings = smp_num_siblings / c->x86_max_cores;
tmp = smp_num_siblings;
index_msb = 31;
while ((tmp & 0x80000000) == 0) {
tmp <<=1 ;
index_msb--;
}
index_msb = get_count_order(smp_num_siblings) ;
if (smp_num_siblings & (smp_num_siblings - 1))
index_msb++;
core_bits = get_count_order(c->x86_max_cores);
cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
((1 << core_bits) - 1);
if (c->x86_num_cores > 1)
if (c->x86_max_cores > 1)
printk(KERN_INFO "CPU: Processor Core ID: %d\n",
cpu_core_id[cpu]);
}

View File

@ -158,7 +158,7 @@ static void __devinit init_intel(struct cpuinfo_x86 *c)
if ( p )
strcpy(c->x86_model_id, p);
c->x86_num_cores = num_cpu_cores(c);
c->x86_max_cores = num_cpu_cores(c);
detect_ht(c);

View File

@ -293,29 +293,45 @@ static struct _cpuid4_info *cpuid4_info[NR_CPUS];
#ifdef CONFIG_SMP
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
{
struct _cpuid4_info *this_leaf;
struct _cpuid4_info *this_leaf, *sibling_leaf;
unsigned long num_threads_sharing;
#ifdef CONFIG_X86_HT
struct cpuinfo_x86 *c = cpu_data + cpu;
#endif
int index_msb, i;
struct cpuinfo_x86 *c = cpu_data;
this_leaf = CPUID4_INFO_IDX(cpu, index);
num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
if (num_threads_sharing == 1)
cpu_set(cpu, this_leaf->shared_cpu_map);
#ifdef CONFIG_X86_HT
else if (num_threads_sharing == smp_num_siblings)
this_leaf->shared_cpu_map = cpu_sibling_map[cpu];
else if (num_threads_sharing == (c->x86_num_cores * smp_num_siblings))
this_leaf->shared_cpu_map = cpu_core_map[cpu];
else
printk(KERN_DEBUG "Number of CPUs sharing cache didn't match "
"any known set of CPUs\n");
#endif
else {
index_msb = get_count_order(num_threads_sharing);
for_each_online_cpu(i) {
if (c[i].apicid >> index_msb ==
c[cpu].apicid >> index_msb) {
cpu_set(i, this_leaf->shared_cpu_map);
if (i != cpu && cpuid4_info[i]) {
sibling_leaf = CPUID4_INFO_IDX(i, index);
cpu_set(cpu, sibling_leaf->shared_cpu_map);
}
}
}
}
}
static void __devinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
{
struct _cpuid4_info *this_leaf, *sibling_leaf;
int sibling;
this_leaf = CPUID4_INFO_IDX(cpu, index);
for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
sibling_leaf = CPUID4_INFO_IDX(sibling, index);
cpu_clear(cpu, sibling_leaf->shared_cpu_map);
}
}
#else
static void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
static void __init cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
#endif
static void free_cache_attributes(unsigned int cpu)
@ -574,8 +590,10 @@ static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
unsigned int cpu = sys_dev->id;
unsigned long i;
for (i = 0; i < num_cache_leaves; i++)
for (i = 0; i < num_cache_leaves; i++) {
cache_remove_shared_cpu_map(cpu, i);
kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
}
kobject_unregister(cache_kobject[cpu]);
cpuid4_cache_sysfs_exit(cpu);
return;

View File

@ -626,6 +626,14 @@ void __init mtrr_bp_init(void)
if (cpuid_eax(0x80000000) >= 0x80000008) {
u32 phys_addr;
phys_addr = cpuid_eax(0x80000008) & 0xff;
/* CPUID workaround for Intel 0F33/0F34 CPU */
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
boot_cpu_data.x86 == 0xF &&
boot_cpu_data.x86_model == 0x3 &&
(boot_cpu_data.x86_mask == 0x3 ||
boot_cpu_data.x86_mask == 0x4))
phys_addr = 36;
size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1);
size_and_mask = ~size_or_mask & 0xfff00000;
} else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&

View File

@ -94,12 +94,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (c->x86_cache_size >= 0)
seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
#ifdef CONFIG_X86_HT
if (c->x86_num_cores * smp_num_siblings > 1) {
if (c->x86_max_cores * smp_num_siblings > 1) {
seq_printf(m, "physical id\t: %d\n", phys_proc_id[n]);
seq_printf(m, "siblings\t: %d\n",
c->x86_num_cores * smp_num_siblings);
seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n]));
seq_printf(m, "core id\t\t: %d\n", cpu_core_id[n]);
seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores);
seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
}
#endif

View File

@ -21,6 +21,7 @@
#include <asm/hardirq.h>
#include <asm/nmi.h>
#include <asm/hw_irq.h>
#include <asm/apic.h>
#include <mach_ipi.h>
@ -147,6 +148,7 @@ static int crash_nmi_callback(struct pt_regs *regs, int cpu)
regs = &fixed_regs;
}
crash_save_this_cpu(regs, cpu);
disable_local_APIC();
atomic_dec(&waiting_for_crash_ipi);
/* Assume hlt works */
halt();
@ -186,6 +188,7 @@ static void nmi_shootdown_cpus(void)
}
/* Leave the nmi callback set */
disable_local_APIC();
}
#else
static void nmi_shootdown_cpus(void)
@ -210,5 +213,9 @@ void machine_crash_shutdown(struct pt_regs *regs)
/* Make a note of crashing cpu. Will be used in NMI callback.*/
crashing_cpu = smp_processor_id();
nmi_shootdown_cpus();
lapic_shutdown();
#if defined(CONFIG_X86_IO_APIC)
disable_IO_APIC();
#endif
crash_save_self(regs);
}

View File

@ -72,9 +72,11 @@ int phys_proc_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
/* Core ID of each logical CPU */
int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
/* representing HT siblings of each logical CPU */
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_sibling_map);
/* representing HT and core siblings of each logical CPU */
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
@ -442,35 +444,60 @@ static void __devinit smp_callin(void)
static int cpucount;
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;
static inline void
set_cpu_sibling_map(int cpu)
{
int i;
struct cpuinfo_x86 *c = cpu_data;
cpu_set(cpu, cpu_sibling_setup_map);
if (smp_num_siblings > 1) {
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_isset(i, cpu_callout_map))
continue;
if (cpu_core_id[cpu] == cpu_core_id[i]) {
for_each_cpu_mask(i, cpu_sibling_setup_map) {
if (phys_proc_id[cpu] == phys_proc_id[i] &&
cpu_core_id[cpu] == cpu_core_id[i]) {
cpu_set(i, cpu_sibling_map[cpu]);
cpu_set(cpu, cpu_sibling_map[i]);
cpu_set(i, cpu_core_map[cpu]);
cpu_set(cpu, cpu_core_map[i]);
}
}
} else {
cpu_set(cpu, cpu_sibling_map[cpu]);
}
if (current_cpu_data.x86_num_cores > 1) {
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_isset(i, cpu_callout_map))
continue;
if (phys_proc_id[cpu] == phys_proc_id[i]) {
cpu_set(i, cpu_core_map[cpu]);
cpu_set(cpu, cpu_core_map[i]);
}
}
} else {
if (current_cpu_data.x86_max_cores == 1) {
cpu_core_map[cpu] = cpu_sibling_map[cpu];
c[cpu].booted_cores = 1;
return;
}
for_each_cpu_mask(i, cpu_sibling_setup_map) {
if (phys_proc_id[cpu] == phys_proc_id[i]) {
cpu_set(i, cpu_core_map[cpu]);
cpu_set(cpu, cpu_core_map[i]);
/*
* Does this new cpu bringup a new core?
*/
if (cpus_weight(cpu_sibling_map[cpu]) == 1) {
/*
* for each core in package, increment
* the booted_cores for this new cpu
*/
if (first_cpu(cpu_sibling_map[i]) == i)
c[cpu].booted_cores++;
/*
* increment the core count for all
* the other cpus in this package
*/
if (i != cpu)
c[i].booted_cores++;
} else if (i != cpu && !c[cpu].booted_cores)
c[cpu].booted_cores = c[i].booted_cores;
}
}
}
@ -1095,11 +1122,8 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
current_thread_info()->cpu = 0;
smp_tune_scheduling();
cpus_clear(cpu_sibling_map[0]);
cpu_set(0, cpu_sibling_map[0]);
cpus_clear(cpu_core_map[0]);
cpu_set(0, cpu_core_map[0]);
set_cpu_sibling_map(0);
/*
* If we couldn't find an SMP configuration at boot time,
@ -1278,15 +1302,24 @@ static void
remove_siblinginfo(int cpu)
{
int sibling;
struct cpuinfo_x86 *c = cpu_data;
for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
cpu_clear(cpu, cpu_core_map[sibling]);
/*
* last thread sibling in this cpu core going down
*/
if (cpus_weight(cpu_sibling_map[cpu]) == 1)
c[sibling].booted_cores--;
}
for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
cpu_clear(cpu, cpu_sibling_map[sibling]);
for_each_cpu_mask(sibling, cpu_core_map[cpu])
cpu_clear(cpu, cpu_core_map[sibling]);
cpus_clear(cpu_sibling_map[cpu]);
cpus_clear(cpu_core_map[cpu]);
phys_proc_id[cpu] = BAD_APICID;
cpu_core_id[cpu] = BAD_APICID;
cpu_clear(cpu, cpu_sibling_setup_map);
}
int __cpu_disable(void)

View File

@ -137,8 +137,8 @@ static void __init parse_memory_affinity_structure (char *sratp)
"enabled and removable" : "enabled" ) );
}
#if MAX_NR_ZONES != 3
#error "MAX_NR_ZONES != 3, chunk_to_zone requires review"
#if MAX_NR_ZONES != 4
#error "MAX_NR_ZONES != 4, chunk_to_zone requires review"
#endif
/* Take a chunk of pages from page frame cstart to cend and count the number
* of pages in each zone, returned via zones[].

View File

@ -58,6 +58,10 @@ config IA64_UNCACHED_ALLOCATOR
bool
select GENERIC_ALLOCATOR
config ZONE_DMA_IS_DMA32
bool
default y
choice
prompt "System type"
default IA64_GENERIC

View File

@ -202,12 +202,9 @@ default_idle (void)
{
local_irq_enable();
while (!need_resched()) {
if (can_do_pal_halt) {
local_irq_disable();
if (!need_resched())
safe_halt();
local_irq_enable();
} else
if (can_do_pal_halt)
safe_halt();
else
cpu_relax();
}
}
@ -272,10 +269,14 @@ cpu_idle (void)
{
void (*mark_idle)(int) = ia64_mark_idle;
int cpu = smp_processor_id();
set_thread_flag(TIF_POLLING_NRFLAG);
/* endless idle loop with no priority at all */
while (1) {
if (can_do_pal_halt)
clear_thread_flag(TIF_POLLING_NRFLAG);
else
set_thread_flag(TIF_POLLING_NRFLAG);
if (!need_resched()) {
void (*idle)(void);
#ifdef CONFIG_SMP

View File

@ -261,7 +261,7 @@ config PPC_ISERIES
config EMBEDDED6xx
bool "Embedded 6xx/7xx/7xxx-based board"
depends on PPC32
depends on PPC32 && BROKEN
config APUS
bool "Amiga-APUS"
@ -305,7 +305,7 @@ config PPC_PMAC64
config PPC_PREP
bool " PowerPC Reference Platform (PReP) based machines"
depends on PPC_MULTIPLATFORM && PPC32
depends on PPC_MULTIPLATFORM && PPC32 && BROKEN
select PPC_I8259
select PPC_INDIRECT_PCI
default y
@ -932,6 +932,7 @@ source "arch/powerpc/oprofile/Kconfig"
config KPROBES
bool "Kprobes (EXPERIMENTAL)"
depends on PPC64
help
Kprobes allows you to trap at almost any kernel address and
execute a callback function. register_kprobe() establishes

View File

@ -187,7 +187,7 @@ archprepare: checkbin
# Temporary hack until we have migrated to asm-powerpc
include/asm: arch/$(ARCH)/include/asm
arch/$(ARCH)/include/asm:
arch/$(ARCH)/include/asm: FORCE
$(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi
$(Q)ln -fsn $(srctree)/include/asm-$(OLDARCH) arch/$(ARCH)/include/asm

View File

@ -1,18 +1,33 @@
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.14-rc4
# Thu Oct 20 08:32:17 2005
# Linux kernel version: 2.6.15-rc1
# Mon Nov 14 15:27:00 2005
#
CONFIG_PPC64=y
CONFIG_64BIT=y
CONFIG_PPC_MERGE=y
CONFIG_MMU=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_ISA_DMA=y
CONFIG_PPC=y
CONFIG_EARLY_PRINTK=y
CONFIG_COMPAT=y
CONFIG_SYSVIPC_COMPAT=y
CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
CONFIG_ARCH_MAY_HAVE_PC_FDC=y
CONFIG_FORCE_MAX_ZONEORDER=13
#
# Processor support
#
# CONFIG_POWER4_ONLY is not set
CONFIG_POWER3=y
CONFIG_POWER4=y
CONFIG_PPC_FPU=y
CONFIG_ALTIVEC=y
CONFIG_PPC_STD_MMU=y
CONFIG_SMP=y
CONFIG_NR_CPUS=128
#
# Code maturity level options
@ -68,75 +83,103 @@ CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_KMOD=y
CONFIG_STOP_MACHINE=y
CONFIG_SYSVIPC_COMPAT=y
#
# Block layer
#
#
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_DEADLINE is not set
# CONFIG_DEFAULT_CFQ is not set
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="anticipatory"
#
# Platform support
#
# CONFIG_PPC_ISERIES is not set
CONFIG_PPC_MULTIPLATFORM=y
# CONFIG_PPC_ISERIES is not set
# CONFIG_EMBEDDED6xx is not set
# CONFIG_APUS is not set
CONFIG_PPC_PSERIES=y
# CONFIG_PPC_BPA is not set
# CONFIG_PPC_PMAC is not set
# CONFIG_PPC_MAPLE is not set
CONFIG_PPC=y
CONFIG_PPC64=y
# CONFIG_PPC_CELL is not set
CONFIG_PPC_OF=y
CONFIG_XICS=y
CONFIG_MPIC=y
CONFIG_ALTIVEC=y
CONFIG_PPC_SPLPAR=y
CONFIG_KEXEC=y
CONFIG_IBMVIO=y
# CONFIG_U3_DART is not set
# CONFIG_BOOTX_TEXT is not set
# CONFIG_POWER4_ONLY is not set
CONFIG_IOMMU_VMERGE=y
CONFIG_SMP=y
CONFIG_NR_CPUS=128
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_SELECT_MEMORY_MODEL=y
# CONFIG_FLATMEM_MANUAL is not set
CONFIG_DISCONTIGMEM_MANUAL=y
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_DISCONTIGMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_NEED_MULTIPLE_NODES=y
# CONFIG_SPARSEMEM_STATIC is not set
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
CONFIG_NODES_SPAN_OTHER_NODES=y
CONFIG_NUMA=y
CONFIG_SCHED_SMT=y
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
# CONFIG_PREEMPT_BKL is not set
CONFIG_MPIC=y
CONFIG_PPC_RTAS=y
CONFIG_RTAS_ERROR_LOGGING=y
CONFIG_RTAS_PROC=y
CONFIG_RTAS_FLASH=m
# CONFIG_MMIO_NVRAM is not set
CONFIG_IBMVIO=y
# CONFIG_PPC_MPC106 is not set
# CONFIG_GENERIC_TBSYNC is not set
# CONFIG_CPU_FREQ is not set
# CONFIG_WANT_EARLY_SERIAL is not set
#
# Kernel options
#
# CONFIG_HZ_100 is not set
CONFIG_HZ_250=y
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
CONFIG_EEH=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_PPC_RTAS=y
CONFIG_RTAS_PROC=y
CONFIG_RTAS_FLASH=m
CONFIG_SCANLOG=m
CONFIG_LPARCFG=y
CONFIG_SECCOMP=y
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
# CONFIG_PREEMPT_BKL is not set
CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set
CONFIG_FORCE_MAX_ZONEORDER=13
CONFIG_IOMMU_VMERGE=y
CONFIG_HOTPLUG_CPU=y
CONFIG_KEXEC=y
# CONFIG_IRQ_ALL_CPUS is not set
CONFIG_PPC_SPLPAR=y
CONFIG_EEH=y
CONFIG_SCANLOG=m
CONFIG_LPARCFG=y
CONFIG_NUMA=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_SELECT_MEMORY_MODEL=y
# CONFIG_FLATMEM_MANUAL is not set
# CONFIG_DISCONTIGMEM_MANUAL is not set
CONFIG_SPARSEMEM_MANUAL=y
CONFIG_SPARSEMEM=y
CONFIG_NEED_MULTIPLE_NODES=y
CONFIG_HAVE_MEMORY_PRESENT=y
# CONFIG_SPARSEMEM_STATIC is not set
CONFIG_SPARSEMEM_EXTREME=y
# CONFIG_MEMORY_HOTPLUG is not set
CONFIG_SPLIT_PTLOCK_CPUS=4096
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
CONFIG_NODES_SPAN_OTHER_NODES=y
# CONFIG_PPC_64K_PAGES is not set
CONFIG_SCHED_SMT=y
CONFIG_PROC_DEVICETREE=y
# CONFIG_CMDLINE_BOOL is not set
# CONFIG_PM is not set
CONFIG_SECCOMP=y
CONFIG_ISA_DMA_API=y
#
# Bus Options
# Bus options
#
CONFIG_GENERIC_ISA_DMA=y
CONFIG_PPC_I8259=y
# CONFIG_PPC_INDIRECT_PCI is not set
CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
CONFIG_PCI_LEGACY_PROC=y
@ -156,6 +199,7 @@ CONFIG_HOTPLUG_PCI=m
# CONFIG_HOTPLUG_PCI_SHPC is not set
CONFIG_HOTPLUG_PCI_RPA=m
CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
CONFIG_KERNEL_START=0xc000000000000000
#
# Networking
@ -197,6 +241,10 @@ CONFIG_TCP_CONG_BIC=y
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_DEBUG is not set
#
# Core Netfilter Configuration
#
CONFIG_NETFILTER_NETLINK=y
CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NETFILTER_NETLINK_LOG=m
@ -299,6 +347,10 @@ CONFIG_LLC=y
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
#
# QoS and/or fair queueing
#
# CONFIG_NET_SCHED is not set
CONFIG_NET_CLS_ROUTE=y
@ -368,14 +420,6 @@ CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=65536
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CDROM_PKTCDVD is not set
#
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
# CONFIG_ATA_OVER_ETH is not set
#
@ -473,6 +517,7 @@ CONFIG_SCSI_ISCSI_ATTRS=m
#
# SCSI low-level drivers
#
# CONFIG_ISCSI_TCP is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
# CONFIG_SCSI_3W_9XXX is not set
# CONFIG_SCSI_ACARD is not set
@ -559,6 +604,7 @@ CONFIG_DM_MULTIPATH_EMC=m
#
# Macintosh device drivers
#
# CONFIG_WINDFARM is not set
#
# Network device support
@ -645,7 +691,6 @@ CONFIG_IXGB=m
# CONFIG_IXGB_NAPI is not set
CONFIG_S2IO=m
# CONFIG_S2IO_NAPI is not set
# CONFIG_2BUFF_MODE is not set
#
# Token Ring devices
@ -674,6 +719,7 @@ CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
# CONFIG_PPP_MPPE is not set
CONFIG_PPPOE=m
# CONFIG_SLIP is not set
# CONFIG_NET_FC is not set
@ -784,6 +830,8 @@ CONFIG_HVCS=m
#
# CONFIG_WATCHDOG is not set
# CONFIG_RTC is not set
CONFIG_GEN_RTC=y
# CONFIG_GEN_RTC_X is not set
# CONFIG_DTLK is not set
# CONFIG_R3964 is not set
# CONFIG_APPLICOM is not set
@ -801,6 +849,7 @@ CONFIG_MAX_RAW_DEVS=1024
# TPM devices
#
# CONFIG_TCG_TPM is not set
# CONFIG_TELCLOCK is not set
#
# I2C support
@ -852,6 +901,7 @@ CONFIG_I2C_ALGOBIT=y
# CONFIG_SENSORS_PCF8591 is not set
# CONFIG_SENSORS_RTC8564 is not set
# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_RTC_X1205_I2C is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
@ -893,7 +943,6 @@ CONFIG_FB=y
CONFIG_FB_CFB_FILLRECT=y
CONFIG_FB_CFB_COPYAREA=y
CONFIG_FB_CFB_IMAGEBLIT=y
CONFIG_FB_SOFT_CURSOR=y
CONFIG_FB_MACMODES=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
@ -905,6 +954,7 @@ CONFIG_FB_OF=y
# CONFIG_FB_ASILIANT is not set
# CONFIG_FB_IMSTT is not set
# CONFIG_FB_VGA16 is not set
# CONFIG_FB_S1D13XXX is not set
# CONFIG_FB_NVIDIA is not set
# CONFIG_FB_RIVA is not set
CONFIG_FB_MATROX=y
@ -927,7 +977,6 @@ CONFIG_FB_RADEON_I2C=y
# CONFIG_FB_VOODOO1 is not set
# CONFIG_FB_CYBLA is not set
# CONFIG_FB_TRIDENT is not set
# CONFIG_FB_S1D13XXX is not set
# CONFIG_FB_VIRTUAL is not set
#
@ -936,6 +985,7 @@ CONFIG_FB_RADEON_I2C=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_DUMMY_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE=y
# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
# CONFIG_FONTS is not set
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
@ -990,12 +1040,15 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
#
# USB Device Class drivers
#
# CONFIG_USB_BLUETOOTH_TTY is not set
# CONFIG_USB_ACM is not set
# CONFIG_USB_PRINTER is not set
#
# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
#
#
# may also be needed; see USB_STORAGE Help for more information
#
CONFIG_USB_STORAGE=y
# CONFIG_USB_STORAGE_DEBUG is not set
@ -1106,6 +1159,7 @@ CONFIG_INFINIBAND_MTHCA=m
# CONFIG_INFINIBAND_MTHCA_DEBUG is not set
CONFIG_INFINIBAND_IPOIB=m
# CONFIG_INFINIBAND_IPOIB_DEBUG is not set
# CONFIG_INFINIBAND_SRP is not set
#
# SN Devices
@ -1288,10 +1342,25 @@ CONFIG_NLS_ISO8859_1=y
# CONFIG_NLS_UTF8 is not set
#
# Profiling support
# Library routines
#
CONFIG_CRC_CCITT=m
# CONFIG_CRC16 is not set
CONFIG_CRC32=y
CONFIG_LIBCRC32C=m
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=m
CONFIG_TEXTSEARCH=y
CONFIG_TEXTSEARCH_KMP=m
CONFIG_TEXTSEARCH_BM=m
CONFIG_TEXTSEARCH_FSM=m
#
# Instrumentation Support
#
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
# CONFIG_KPROBES is not set
#
# Kernel hacking
@ -1308,14 +1377,15 @@ CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_DEBUG_KOBJECT is not set
# CONFIG_DEBUG_INFO is not set
CONFIG_DEBUG_FS=y
# CONFIG_DEBUG_VM is not set
# CONFIG_RCU_TORTURE_TEST is not set
CONFIG_DEBUG_STACKOVERFLOW=y
# CONFIG_KPROBES is not set
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUGGER=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
# CONFIG_PPCDBG is not set
CONFIG_IRQSTACKS=y
# CONFIG_BOOTX_TEXT is not set
#
# Security options
@ -1355,17 +1425,3 @@ CONFIG_CRYPTO_TEST=m
#
# Hardware crypto devices
#
#
# Library routines
#
CONFIG_CRC_CCITT=m
# CONFIG_CRC16 is not set
CONFIG_CRC32=y
CONFIG_LIBCRC32C=m
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=m
CONFIG_TEXTSEARCH=y
CONFIG_TEXTSEARCH_KMP=m
CONFIG_TEXTSEARCH_BM=m
CONFIG_TEXTSEARCH_FSM=m

View File

@ -25,7 +25,7 @@ obj-$(CONFIG_PPC_OF) += of_device.o
procfs-$(CONFIG_PPC64) := proc_ppc64.o
obj-$(CONFIG_PROC_FS) += $(procfs-y)
rtaspci-$(CONFIG_PPC64) := rtas_pci.o
obj-$(CONFIG_PPC_RTAS) += rtas.o $(rtaspci-y)
obj-$(CONFIG_PPC_RTAS) += rtas.o rtas-rtc.o $(rtaspci-y)
obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
obj-$(CONFIG_LPARCFG) += lparcfg.o
@ -49,12 +49,23 @@ extra-y += vmlinux.lds
obj-y += process.o init_task.o time.o \
prom.o traps.o setup-common.o
obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o systbl.o
obj-$(CONFIG_PPC64) += misc_64.o
obj-$(CONFIG_PPC64) += misc_64.o dma_64.o iommu.o
obj-$(CONFIG_PPC_OF) += prom_init.o
obj-$(CONFIG_MODULES) += ppc_ksyms.o
obj-$(CONFIG_BOOTX_TEXT) += btext.o
obj-$(CONFIG_6xx) += idle_6xx.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_KPROBES) += kprobes.o
module-$(CONFIG_PPC64) += module_64.o
obj-$(CONFIG_MODULES) += $(module-y)
pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o pci_iommu.o \
pci_direct_iommu.o iomap.o
obj-$(CONFIG_PCI) += $(pci64-y)
kexec64-$(CONFIG_PPC64) += machine_kexec_64.o
obj-$(CONFIG_KEXEC) += $(kexec64-y)
ifeq ($(CONFIG_PPC_ISERIES),y)
$(obj)/head_64.o: $(obj)/lparmap.s
@ -62,11 +73,8 @@ AFLAGS_head_64.o += -I$(obj)
endif
else
# stuff used from here for ARCH=ppc or ARCH=ppc64
# stuff used from here for ARCH=ppc
smpobj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_PPC64) += traps.o process.o init_task.o time.o \
setup-common.o $(smpobj-y)
endif

View File

@ -270,13 +270,15 @@ int main(void)
DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec));
DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec));
DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec));
DEFINE(TSPC64_TV_SEC, offsetof(struct timespec, tv_sec));
DEFINE(TSPC64_TV_NSEC, offsetof(struct timespec, tv_nsec));
DEFINE(TSPC32_TV_SEC, offsetof(struct compat_timespec, tv_sec));
DEFINE(TSPC32_TV_NSEC, offsetof(struct compat_timespec, tv_nsec));
#else
DEFINE(TVAL32_TV_SEC, offsetof(struct timeval, tv_sec));
DEFINE(TVAL32_TV_USEC, offsetof(struct timeval, tv_usec));
DEFINE(TSPEC32_TV_SEC, offsetof(struct timespec, tv_sec));
DEFINE(TSPEC32_TV_NSEC, offsetof(struct timespec, tv_nsec));
DEFINE(TSPC32_TV_SEC, offsetof(struct timespec, tv_sec));
DEFINE(TSPC32_TV_NSEC, offsetof(struct timespec, tv_nsec));
#endif
/* timeval/timezone offsets for use by vdso */
DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));

View File

@ -71,6 +71,11 @@
#include <asm/paca.h>
#endif
int __irq_offset_value;
#ifdef CONFIG_PPC32
EXPORT_SYMBOL(__irq_offset_value);
#endif
static int ppc_spurious_interrupts;
#if defined(CONFIG_PPC_ISERIES) && defined(CONFIG_SMP)
@ -98,7 +103,6 @@ extern atomic_t ipi_sent;
EXPORT_SYMBOL(irq_desc);
int distribute_irqs = 1;
int __irq_offset_value;
u64 ppc64_interrupt_controller;
#endif /* CONFIG_PPC64 */
@ -311,7 +315,6 @@ void __init init_IRQ(void)
}
#ifdef CONFIG_PPC64
#ifndef CONFIG_PPC_ISERIES
/*
* Virtual IRQ mapping code, used on systems with XICS interrupt controllers.
*/
@ -420,8 +423,6 @@ unsigned int real_irq_to_virt_slowpath(unsigned int real_irq)
}
#endif /* CONFIG_PPC_ISERIES */
#ifdef CONFIG_IRQSTACKS
struct thread_info *softirq_ctx[NR_CPUS];
struct thread_info *hardirq_ctx[NR_CPUS];

View File

@ -42,32 +42,6 @@
/* #define LPARCFG_DEBUG */
/* find a better place for this function... */
static void log_plpar_hcall_return(unsigned long rc, char *tag)
{
if (rc == 0) /* success, return */
return;
/* check for null tag ? */
if (rc == H_Hardware)
printk(KERN_INFO
"plpar-hcall (%s) failed with hardware fault\n", tag);
else if (rc == H_Function)
printk(KERN_INFO
"plpar-hcall (%s) failed; function not allowed\n", tag);
else if (rc == H_Authority)
printk(KERN_INFO
"plpar-hcall (%s) failed; not authorized to this function\n",
tag);
else if (rc == H_Parameter)
printk(KERN_INFO "plpar-hcall (%s) failed; Bad parameter(s)\n",
tag);
else
printk(KERN_INFO
"plpar-hcall (%s) failed with unexpected rc(0x%lx)\n",
tag, rc);
}
static struct proc_dir_entry *proc_ppc64_lparcfg;
#define LPARCFG_BUFF_SIZE 4096
@ -172,6 +146,31 @@ static int lparcfg_data(struct seq_file *m, void *v)
/*
* Methods used to fetch LPAR data when running on a pSeries platform.
*/
/* find a better place for this function... */
static void log_plpar_hcall_return(unsigned long rc, char *tag)
{
if (rc == 0) /* success, return */
return;
/* check for null tag ? */
if (rc == H_Hardware)
printk(KERN_INFO
"plpar-hcall (%s) failed with hardware fault\n", tag);
else if (rc == H_Function)
printk(KERN_INFO
"plpar-hcall (%s) failed; function not allowed\n", tag);
else if (rc == H_Authority)
printk(KERN_INFO
"plpar-hcall (%s) failed; not authorized to this function\n",
tag);
else if (rc == H_Parameter)
printk(KERN_INFO "plpar-hcall (%s) failed; Bad parameter(s)\n",
tag);
else
printk(KERN_INFO
"plpar-hcall (%s) failed with unexpected rc(0x%lx)\n",
tag, rc);
}
/*
* H_GET_PPP hcall returns info in 4 parms.

View File

@ -185,8 +185,8 @@ void kexec_copy_flush(struct kimage *image)
*/
void kexec_smp_down(void *arg)
{
if (ppc_md.cpu_irq_down)
ppc_md.cpu_irq_down(1);
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0, 1);
local_irq_disable();
kexec_smp_wait();
@ -233,8 +233,8 @@ static void kexec_prepare_cpus(void)
}
/* after we tell the others to go down */
if (ppc_md.cpu_irq_down)
ppc_md.cpu_irq_down(0);
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0, 0);
put_cpu();
@ -255,8 +255,8 @@ static void kexec_prepare_cpus(void)
* UP to an SMP kernel.
*/
smp_release_cpus();
if (ppc_md.cpu_irq_down)
ppc_md.cpu_irq_down(0);
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(0, 0);
local_irq_disable();
}
@ -305,3 +305,54 @@ void machine_kexec(struct kimage *image)
ppc_md.hpte_clear_all);
/* NOTREACHED */
}
/* Values we need to export to the second kernel via the device tree. */
static unsigned long htab_base, htab_size, kernel_end;
static struct property htab_base_prop = {
.name = "linux,htab-base",
.length = sizeof(unsigned long),
.value = (unsigned char *)&htab_base,
};
static struct property htab_size_prop = {
.name = "linux,htab-size",
.length = sizeof(unsigned long),
.value = (unsigned char *)&htab_size,
};
static struct property kernel_end_prop = {
.name = "linux,kernel-end",
.length = sizeof(unsigned long),
.value = (unsigned char *)&kernel_end,
};
static void __init export_htab_values(void)
{
struct device_node *node;
node = of_find_node_by_path("/chosen");
if (!node)
return;
kernel_end = __pa(_end);
prom_add_property(node, &kernel_end_prop);
/* On machines with no htab htab_address is NULL */
if (NULL == htab_address)
goto out;
htab_base = __pa(htab_address);
prom_add_property(node, &htab_base_prop);
htab_size = 1UL << ppc64_pft_size;
prom_add_property(node, &htab_size_prop);
out:
of_node_put(node);
}
void __init kexec_setup(void)
{
export_htab_values();
}

View File

@ -105,6 +105,13 @@ EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__strnlen_user);
#ifndef __powerpc64__
EXPORT_SYMBOL(__ide_mm_insl);
EXPORT_SYMBOL(__ide_mm_outsw);
EXPORT_SYMBOL(__ide_mm_insw);
EXPORT_SYMBOL(__ide_mm_outsl);
#endif
EXPORT_SYMBOL(_insb);
EXPORT_SYMBOL(_outsb);
EXPORT_SYMBOL(_insw);

View File

@ -1368,6 +1368,7 @@ prom_n_addr_cells(struct device_node* np)
/* No #address-cells property for the root node, default to 1 */
return 1;
}
EXPORT_SYMBOL(prom_n_addr_cells);
int
prom_n_size_cells(struct device_node* np)
@ -1383,6 +1384,7 @@ prom_n_size_cells(struct device_node* np)
/* No #size-cells property for the root node, default to 1 */
return 1;
}
EXPORT_SYMBOL(prom_n_size_cells);
/**
* Work out the sense (active-low level / active-high edge)

View File

@ -0,0 +1,105 @@
#include <linux/kernel.h>
#include <linux/time.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/rtc.h>
#include <linux/delay.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/time.h>
#define MAX_RTC_WAIT 5000 /* 5 sec */
#define RTAS_CLOCK_BUSY (-2)
unsigned long __init rtas_get_boot_time(void)
{
int ret[8];
int error, wait_time;
unsigned long max_wait_tb;
max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);
if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
wait_time = rtas_extended_busy_delay_time(error);
/* This is boot time so we spin. */
udelay(wait_time*1000);
error = RTAS_CLOCK_BUSY;
}
} while (error == RTAS_CLOCK_BUSY && (get_tb() < max_wait_tb));
if (error != 0 && printk_ratelimit()) {
printk(KERN_WARNING "error: reading the clock failed (%d)\n",
error);
return 0;
}
return mktime(ret[0], ret[1], ret[2], ret[3], ret[4], ret[5]);
}
/* NOTE: get_rtc_time will get an error if executed in interrupt context
* and if a delay is needed to read the clock. In this case we just
* silently return without updating rtc_tm.
*/
void rtas_get_rtc_time(struct rtc_time *rtc_tm)
{
int ret[8];
int error, wait_time;
unsigned long max_wait_tb;
max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);
if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
if (in_interrupt() && printk_ratelimit()) {
memset(&rtc_tm, 0, sizeof(struct rtc_time));
printk(KERN_WARNING "error: reading clock"
" would delay interrupt\n");
return; /* delay not allowed */
}
wait_time = rtas_extended_busy_delay_time(error);
msleep(wait_time);
error = RTAS_CLOCK_BUSY;
}
} while (error == RTAS_CLOCK_BUSY && (get_tb() < max_wait_tb));
if (error != 0 && printk_ratelimit()) {
printk(KERN_WARNING "error: reading the clock failed (%d)\n",
error);
return;
}
rtc_tm->tm_sec = ret[5];
rtc_tm->tm_min = ret[4];
rtc_tm->tm_hour = ret[3];
rtc_tm->tm_mday = ret[2];
rtc_tm->tm_mon = ret[1] - 1;
rtc_tm->tm_year = ret[0] - 1900;
}
int rtas_set_rtc_time(struct rtc_time *tm)
{
int error, wait_time;
unsigned long max_wait_tb;
max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
error = rtas_call(rtas_token("set-time-of-day"), 7, 1, NULL,
tm->tm_year + 1900, tm->tm_mon + 1,
tm->tm_mday, tm->tm_hour, tm->tm_min,
tm->tm_sec, 0);
if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
if (in_interrupt())
return 1; /* probably decrementer */
wait_time = rtas_extended_busy_delay_time(error);
msleep(wait_time);
error = RTAS_CLOCK_BUSY;
}
} while (error == RTAS_CLOCK_BUSY && (get_tb() < max_wait_tb));
if (error != 0 && printk_ratelimit())
printk(KERN_WARNING "error: setting the clock failed (%d)\n",
error);
return 0;
}

View File

@ -57,10 +57,6 @@ extern void power4_idle(void);
boot_infos_t *boot_infos;
struct ide_machdep_calls ppc_ide_md;
/* XXX should go elsewhere */
int __irq_offset_value;
EXPORT_SYMBOL(__irq_offset_value);
int boot_cpuid;
EXPORT_SYMBOL_GPL(boot_cpuid);
int boot_cpuid_phys;

View File

@ -59,6 +59,7 @@
#include <asm/firmware.h>
#include <asm/xmon.h>
#include <asm/udbg.h>
#include <asm/kexec.h>
#include "setup.h"
@ -415,6 +416,10 @@ void __init setup_system(void)
*/
unflatten_device_tree();
#ifdef CONFIG_KEXEC
kexec_setup(); /* requires unflattened device tree. */
#endif
/*
* Fill the ppc64_caches & systemcfg structures with informations
* retreived from the device-tree. Need to be called before

View File

@ -403,8 +403,6 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
ELF_NFPREG * sizeof(double)))
return 1;
current->thread.fpscr.val = 0; /* turn off all fp exceptions */
#ifdef CONFIG_ALTIVEC
/* save altivec registers */
if (current->thread.used_vr) {
@ -818,6 +816,9 @@ static int handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
goto badframe;
regs->link = (unsigned long) frame->tramp;
}
current->thread.fpscr.val = 0; /* turn off all fp exceptions */
if (put_user(regs->gpr[1], (u32 __user *)newsp))
goto badframe;
regs->gpr[1] = newsp;
@ -1097,6 +1098,8 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
regs->link = (unsigned long) frame->mctx.tramp;
}
current->thread.fpscr.val = 0; /* turn off all fp exceptions */
if (put_user(regs->gpr[1], (u32 __user *)newsp))
goto badframe;
regs->gpr[1] = newsp;

View File

@ -131,9 +131,6 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
flush_fp_to_thread(current);
/* Make sure signal doesn't get spurrious FP exceptions */
current->thread.fpscr.val = 0;
#ifdef CONFIG_ALTIVEC
err |= __put_user(v_regs, &sc->v_regs);
@ -423,6 +420,9 @@ static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
if (err)
goto badframe;
/* Make sure signal handler doesn't get spurious FP exceptions */
current->thread.fpscr.val = 0;
/* Set up to return from userspace. */
if (vdso64_rt_sigtramp && current->thread.vdso_base) {
regs->link = current->thread.vdso_base + vdso64_rt_sigtramp;

View File

@ -77,8 +77,9 @@ V_FUNCTION_BEGIN(__kernel_get_tbfreq)
mflr r12
.cfi_register lr,r12
bl __get_datapage@local
lwz r3,CFG_TB_TICKS_PER_SEC(r3)
lwz r4,(CFG_TB_TICKS_PER_SEC + 4)(r3)
lwz r3,CFG_TB_TICKS_PER_SEC(r3)
mtlr r12
blr
.cfi_endproc
V_FUNCTION_END(__kernel_get_tbfreq)

View File

@ -83,7 +83,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
/* Check for supported clock IDs */
cmpli cr0,r3,CLOCK_REALTIME
cmpli cr1,r3,CLOCK_MONOTONIC
cror cr0,cr0,cr1
cror cr0*4+eq,cr0*4+eq,cr1*4+eq
bne cr0,99f
mflr r12 /* r12 saves lr */
@ -91,7 +91,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
mr r10,r3 /* r10 saves id */
mr r11,r4 /* r11 saves tp */
bl __get_datapage@local /* get data page */
mr r9, r3 /* datapage ptr in r9 */
mr r9,r3 /* datapage ptr in r9 */
beq cr1,50f /* if monotonic -> jump there */
/*
@ -173,10 +173,14 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
add r4,r4,r7
lis r5,NSEC_PER_SEC@h
ori r5,r5,NSEC_PER_SEC@l
cmpli cr0,r4,r5
cmpl cr0,r4,r5
cmpli cr1,r4,0
blt 1f
subf r4,r5,r4
addi r3,r3,1
1: bge cr1,1f
addi r3,r3,-1
add r4,r4,r5
1: stw r3,TSPC32_TV_SEC(r11)
stw r4,TSPC32_TV_NSEC(r11)
@ -210,7 +214,7 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
/* Check for supported clock IDs */
cmpwi cr0,r3,CLOCK_REALTIME
cmpwi cr1,r3,CLOCK_MONOTONIC
cror cr0,cr0,cr1
cror cr0*4+eq,cr0*4+eq,cr1*4+eq
bne cr0,99f
li r3,0

View File

@ -80,5 +80,6 @@ V_FUNCTION_BEGIN(__kernel_get_tbfreq)
bl V_LOCAL_FUNC(__get_datapage)
ld r3,CFG_TB_TICKS_PER_SEC(r3)
mtlr r12
blr
.cfi_endproc
V_FUNCTION_END(__kernel_get_tbfreq)

View File

@ -1,4 +1,5 @@
/*
/*
* Userland implementation of gettimeofday() for 64 bits processes in a
* ppc64 kernel for use in the vDSO
*
@ -68,7 +69,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
/* Check for supported clock IDs */
cmpwi cr0,r3,CLOCK_REALTIME
cmpwi cr1,r3,CLOCK_MONOTONIC
cror cr0,cr0,cr1
cror cr0*4+eq,cr0*4+eq,cr1*4+eq
bne cr0,99f
mflr r12 /* r12 saves lr */
@ -84,16 +85,17 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */
lis r7,0x3b9a /* r7 = 1000000000 = NSEC_PER_SEC */
ori r7,r7,0xca00
lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */
ori r7,r7,16960
rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */
rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */
std r5,TSPC64_TV_SEC(r11) /* store sec in tv */
subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */
mulld r0,r0,r7 /* nsec = (xsec * NSEC_PER_SEC) /
mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) /
* XSEC_PER_SEC
*/
rldicl r0,r0,44,20
mulli r0,r0,1000 /* nsec = usec * 1000 */
std r0,TSPC64_TV_NSEC(r11) /* store nsec in tp */
mtlr r12
@ -106,15 +108,16 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
50: bl V_LOCAL_FUNC(__do_get_xsec) /* get xsec from tb & kernel */
lis r7,0x3b9a /* r7 = 1000000000 = NSEC_PER_SEC */
ori r7,r7,0xca00
lis r7,15 /* r7 = 1000000 = USEC_PER_SEC */
ori r7,r7,16960
rldicl r5,r4,44,20 /* r5 = sec = xsec / XSEC_PER_SEC */
rldicr r6,r5,20,43 /* r6 = sec * XSEC_PER_SEC */
subf r0,r6,r4 /* r0 = xsec = (xsec - r6) */
mulld r0,r0,r7 /* nsec = (xsec * NSEC_PER_SEC) /
mulld r0,r0,r7 /* usec = (xsec * USEC_PER_SEC) /
* XSEC_PER_SEC
*/
rldicl r6,r0,44,20
mulli r6,r6,1000 /* nsec = usec * 1000 */
/* now we must fixup using wall to monotonic. We need to snapshot
* that value and do the counter trick again. Fortunately, we still
@ -123,8 +126,8 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
* can be used
*/
lwz r4,WTOM_CLOCK_SEC(r9)
lwz r7,WTOM_CLOCK_NSEC(r9)
lwa r4,WTOM_CLOCK_SEC(r3)
lwa r7,WTOM_CLOCK_NSEC(r3)
/* We now have our result in r4,r7. We create a fake dependency
* on that result and re-check the counter
@ -144,10 +147,14 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
add r7,r7,r6
lis r9,NSEC_PER_SEC@h
ori r9,r9,NSEC_PER_SEC@l
cmpli cr0,r7,r9
cmpl cr0,r7,r9
cmpli cr1,r7,0
blt 1f
subf r7,r9,r7
addi r4,r4,1
1: bge cr1,1f
addi r4,r4,-1
add r7,r7,r9
1: std r4,TSPC64_TV_SEC(r11)
std r7,TSPC64_TV_NSEC(r11)
@ -181,7 +188,7 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
/* Check for supported clock IDs */
cmpwi cr0,r3,CLOCK_REALTIME
cmpwi cr1,r3,CLOCK_MONOTONIC
cror cr0,cr0,cr1
cror cr0*4+eq,cr0*4+eq,cr1*4+eq
bne cr0,99f
li r3,0

View File

@ -42,13 +42,6 @@
#include "irq.h"
#include "call_pci.h"
/* This maps virtual irq numbers to real irqs */
unsigned int virt_irq_to_real_map[NR_IRQS];
/* The next available virtual irq number */
/* Note: the pcnet32 driver assumes irq numbers < 2 aren't valid. :( */
static int next_virtual_irq = 2;
static long Pci_Interrupt_Count;
static long Pci_Event_Count;
@ -350,26 +343,14 @@ static hw_irq_controller iSeries_IRQ_handler = {
int __init iSeries_allocate_IRQ(HvBusNumber busNumber,
HvSubBusNumber subBusNumber, HvAgentId deviceId)
{
unsigned int realirq, virtirq;
int virtirq;
unsigned int realirq;
u8 idsel = (deviceId >> 4);
u8 function = deviceId & 7;
virtirq = next_virtual_irq++;
realirq = ((busNumber - 1) << 6) + ((idsel - 1) << 3) + function;
virt_irq_to_real_map[virtirq] = realirq;
virtirq = virt_irq_create_mapping(realirq);
irq_desc[virtirq].handler = &iSeries_IRQ_handler;
return virtirq;
}
int virt_irq_create_mapping(unsigned int real_irq)
{
BUG(); /* Don't call this on iSeries, yet */
return 0;
}
void virt_irq_init(void)
{
return;
}

View File

@ -39,7 +39,6 @@
#include <asm/sections.h>
#include <asm/iommu.h>
#include <asm/firmware.h>
#include <asm/systemcfg.h>
#include <asm/system.h>
#include <asm/time.h>
#include <asm/paca.h>
@ -548,8 +547,6 @@ static unsigned long __init build_iSeries_Memory_Map(void)
*/
static void __init iSeries_setup_arch(void)
{
unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;
if (get_paca()->lppaca.shared_proc) {
ppc_md.idle_loop = iseries_shared_idle;
printk(KERN_INFO "Using shared processor idle loop\n");
@ -565,9 +562,6 @@ static void __init iSeries_setup_arch(void)
itVpdAreas.xSlicMaxLogicalProcs);
printk("Max physical processors = %d\n",
itVpdAreas.xSlicMaxPhysicalProcs);
_systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR;
printk("Processor version = %x\n", _systemcfg->processor);
}
static void iSeries_show_cpuinfo(struct seq_file *m)

View File

@ -102,7 +102,7 @@ static unsigned long from_rtc_time(struct rtc_time *tm)
static unsigned long cuda_get_time(void)
{
struct adb_request req;
unsigned long now;
unsigned int now;
if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0)
return 0;
@ -113,7 +113,7 @@ static unsigned long cuda_get_time(void)
req.reply_len);
now = (req.reply[3] << 24) + (req.reply[4] << 16)
+ (req.reply[5] << 8) + req.reply[6];
return now - RTC_OFFSET;
return ((unsigned long)now) - RTC_OFFSET;
}
#define cuda_get_rtc_time(tm) to_rtc_time(cuda_get_time(), (tm))
@ -146,7 +146,7 @@ static int cuda_set_rtc_time(struct rtc_time *tm)
static unsigned long pmu_get_time(void)
{
struct adb_request req;
unsigned long now;
unsigned int now;
if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0)
return 0;
@ -156,7 +156,7 @@ static unsigned long pmu_get_time(void)
req.reply_len);
now = (req.reply[0] << 24) + (req.reply[1] << 16)
+ (req.reply[2] << 8) + req.reply[3];
return now - RTC_OFFSET;
return ((unsigned long)now) - RTC_OFFSET;
}
#define pmu_get_rtc_time(tm) to_rtc_time(pmu_get_time(), (tm))
@ -199,6 +199,7 @@ static unsigned long smu_get_time(void)
#define smu_set_rtc_time(tm, spin) 0
#endif
/* Can't be __init, it's called when suspending and resuming */
unsigned long pmac_get_boot_time(void)
{
/* Get the time from the RTC, used only at boot time */

View File

@ -4,4 +4,7 @@ obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_IBMVIO) += vio.o
obj-$(CONFIG_XICS) += xics.o
obj-$(CONFIG_SCANLOG) += scanlog.o
obj-$(CONFIG_EEH) += eeh.o eeh_event.o
obj-$(CONFIG_EEH) += eeh.o eeh_event.o
obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
obj-$(CONFIG_HVCS) += hvcserver.o

View File

@ -200,14 +200,12 @@ static void __init pSeries_setup_arch(void)
if (ppc64_interrupt_controller == IC_OPEN_PIC) {
ppc_md.init_IRQ = pSeries_init_mpic;
ppc_md.get_irq = mpic_get_irq;
ppc_md.cpu_irq_down = mpic_teardown_this_cpu;
/* Allocate the mpic now, so that find_and_init_phbs() can
* fill the ISUs */
pSeries_setup_mpic();
} else {
ppc_md.init_IRQ = xics_init_IRQ;
ppc_md.get_irq = xics_get_irq;
ppc_md.cpu_irq_down = xics_teardown_cpu;
}
#ifdef CONFIG_SMP
@ -595,6 +593,27 @@ static int pSeries_pci_probe_mode(struct pci_bus *bus)
return PCI_PROBE_NORMAL;
}
#ifdef CONFIG_KEXEC
static void pseries_kexec_cpu_down(int crash_shutdown, int secondary)
{
/* Don't risk a hypervisor call if we're crashing */
if (!crash_shutdown) {
unsigned long vpa = __pa(&get_paca()->lppaca);
if (unregister_vpa(hard_smp_processor_id(), vpa)) {
printk("VPA deregistration of cpu %u (hw_cpu_id %d) "
"failed\n", smp_processor_id(),
hard_smp_processor_id());
}
}
if (ppc64_interrupt_controller == IC_OPEN_PIC)
mpic_teardown_this_cpu(secondary);
else
xics_teardown_cpu(secondary);
}
#endif
struct machdep_calls __initdata pSeries_md = {
.probe = pSeries_probe,
.setup_arch = pSeries_setup_arch,
@ -617,4 +636,7 @@ struct machdep_calls __initdata pSeries_md = {
.check_legacy_ioport = pSeries_check_legacy_ioport,
.system_reset_exception = pSeries_system_reset_exception,
.machine_check_exception = pSeries_machine_check_exception,
#ifdef CONFIG_KEXEC
.kexec_cpu_down = pseries_kexec_cpu_down,
#endif
};

View File

@ -1,520 +0,0 @@
#
# For a description of the syntax of this configuration file,
# see Documentation/kbuild/kconfig-language.txt.
#
config 64BIT
def_bool y
config MMU
bool
default y
config PPC_STD_MMU
def_bool y
config UID16
bool
config RWSEM_GENERIC_SPINLOCK
bool
config RWSEM_XCHGADD_ALGORITHM
bool
default y
config GENERIC_CALIBRATE_DELAY
bool
default y
config GENERIC_ISA_DMA
bool
default y
config EARLY_PRINTK
bool
default y
config COMPAT
bool
default y
config SCHED_NO_NO_OMIT_FRAME_POINTER
bool
default y
config ARCH_MAY_HAVE_PC_FDC
bool
default y
config PPC_STD_MMU
bool
default y
# We optimistically allocate largepages from the VM, so make the limit
# large enough (16MB). This badly named config option is actually
# max order + 1
config FORCE_MAX_ZONEORDER
int
default "9" if PPC_64K_PAGES
default "13"
source "init/Kconfig"
config SYSVIPC_COMPAT
bool
depends on COMPAT && SYSVIPC
default y
menu "Platform support"
choice
prompt "Platform Type"
default PPC_MULTIPLATFORM
config PPC_ISERIES
bool "IBM Legacy iSeries"
config PPC_MULTIPLATFORM
bool "Generic"
endchoice
config PPC_PSERIES
depends on PPC_MULTIPLATFORM
bool " IBM pSeries & new iSeries"
default y
config PPC_BPA
bool " Broadband Processor Architecture"
depends on PPC_MULTIPLATFORM
config PPC_PMAC
depends on PPC_MULTIPLATFORM
bool " Apple G5 based machines"
default y
select U3_DART
select GENERIC_TBSYNC
config PPC_MAPLE
depends on PPC_MULTIPLATFORM
bool " Maple 970FX Evaluation Board"
select U3_DART
select MPIC_BROKEN_U3
select GENERIC_TBSYNC
default n
help
This option enables support for the Maple 970FX Evaluation Board.
For more informations, refer to <http://www.970eval.com>
config PPC
bool
default y
config PPC64
bool
default y
config PPC_OF
depends on PPC_MULTIPLATFORM
bool
default y
config XICS
depends on PPC_PSERIES
bool
default y
config MPIC
depends on PPC_PSERIES || PPC_PMAC || PPC_MAPLE
bool
default y
config PPC_I8259
depends on PPC_PSERIES
bool
default y
config BPA_IIC
depends on PPC_BPA
bool
default y
# VMX is pSeries only for now until somebody writes the iSeries
# exception vectors for it
config ALTIVEC
bool "Support for VMX (Altivec) vector unit"
depends on PPC_MULTIPLATFORM
default y
config PPC_SPLPAR
depends on PPC_PSERIES
bool "Support for shared-processor logical partitions"
default n
help
Enabling this option will make the kernel run more efficiently
on logically-partitioned pSeries systems which use shared
processors, that is, which share physical processors between
two or more partitions.
config KEXEC
bool "kexec system call (EXPERIMENTAL)"
depends on PPC_MULTIPLATFORM && EXPERIMENTAL
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
but it is indepedent of the system firmware. And like a reboot
you can start any kernel with it, not just Linux.
The name comes from the similiarity to the exec system call.
It is an ongoing process to be certain the hardware in a machine
is properly shutdown, so do not be surprised if this code does not
initially work for you. It may help to enable device hotplugging
support. As of this writing the exact hardware interface is
strongly in flux, so no good recommendation can be made.
source "drivers/cpufreq/Kconfig"
config CPU_FREQ_PMAC64
bool "Support for some Apple G5s"
depends on CPU_FREQ && PMAC_SMU && PPC64
select CPU_FREQ_TABLE
help
This adds support for frequency switching on Apple iMac G5,
and some of the more recent desktop G5 machines as well.
config IBMVIO
depends on PPC_PSERIES || PPC_ISERIES
bool
default y
config U3_DART
bool
depends on PPC_MULTIPLATFORM
default n
config MPIC_BROKEN_U3
bool
depends on PPC_MAPLE
default y
config GENERIC_TBSYNC
def_bool n
config PPC_PMAC64
bool
depends on PPC_PMAC
default y
config BOOTX_TEXT
bool "Support for early boot text console"
depends PPC_OF
help
Say Y here to see progress messages from the boot firmware in text
mode. Requires an Open Firmware compatible video card.
config POWER4
def_bool y
config PPC_FPU
def_bool y
config POWER4_ONLY
bool "Optimize for POWER4"
default n
---help---
Cause the compiler to optimize for POWER4 processors. The resulting
binary will not work on POWER3 or RS64 processors when compiled with
binutils 2.15 or later.
config IOMMU_VMERGE
bool "Enable IOMMU virtual merging (EXPERIMENTAL)"
depends on EXPERIMENTAL
default n
help
Cause IO segments sent to a device for DMA to be merged virtually
by the IOMMU when they happen to have been allocated contiguously.
This doesn't add pressure to the IOMMU allocator. However, some
drivers don't support getting large merged segments coming back
from *_map_sg(). Say Y if you know the drivers you are using are
properly handling this case.
config SMP
bool "Symmetric multi-processing support"
---help---
This enables support for systems with more than one CPU. If you have
a system with only one CPU, say N. If you have a system with more
than one CPU, say Y.
If you say N here, the kernel will run on single and multiprocessor
machines, but will use only one CPU of a multiprocessor machine. If
you say Y here, the kernel will run on single-processor machines.
On a single-processor machine, the kernel will run faster if you say
N here.
If you don't know what to do here, say Y.
config NR_CPUS
int "Maximum number of CPUs (2-128)"
range 2 128
depends on SMP
default "32"
config HMT
bool "Hardware multithreading"
depends on SMP && PPC_PSERIES && BROKEN
help
This option enables hardware multithreading on RS64 cpus.
pSeries systems p620 and p660 have such a cpu type.
config NUMA
bool "NUMA support"
default y if SMP && PPC_PSERIES
config ARCH_SELECT_MEMORY_MODEL
def_bool y
config ARCH_FLATMEM_ENABLE
def_bool y
depends on !NUMA
config ARCH_SPARSEMEM_ENABLE
def_bool y
config ARCH_SPARSEMEM_DEFAULT
def_bool y
depends on NUMA
source "mm/Kconfig"
config HAVE_ARCH_EARLY_PFN_TO_NID
def_bool y
depends on NEED_MULTIPLE_NODES
config ARCH_MEMORY_PROBE
def_bool y
depends on MEMORY_HOTPLUG
# Some NUMA nodes have memory ranges that span
# other nodes. Even though a pfn is valid and
# between a node's start and end pfns, it may not
# reside on that node.
#
# This is a relatively temporary hack that should
# be able to go away when sparsemem is fully in
# place
config NODES_SPAN_OTHER_NODES
def_bool y
depends on NEED_MULTIPLE_NODES
config PPC_64K_PAGES
bool "64k page size"
help
This option changes the kernel logical page size to 64k. On machines
without processor support for 64k pages, the kernel will simulate
them by loading each individual 4k page on demand transparently,
while on hardware with such support, it will be used to map
normal application pages.
config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support"
depends on SMP
default off
help
SMT scheduler support improves the CPU scheduler's decision making
when dealing with POWER5 cpus at a cost of slightly increased
overhead in some places. If unsure say N here.
source "kernel/Kconfig.preempt"
source kernel/Kconfig.hz
config EEH
bool "PCI Extended Error Handling (EEH)" if EMBEDDED
depends on PPC_PSERIES
default y if !EMBEDDED
#
# Use the generic interrupt handling code in kernel/irq/:
#
config GENERIC_HARDIRQS
bool
default y
config PPC_RTAS
bool
depends on PPC_PSERIES || PPC_BPA
default y
config RTAS_ERROR_LOGGING
bool
depends on PPC_RTAS
default y
config RTAS_PROC
bool "Proc interface to RTAS"
depends on PPC_RTAS
default y
config RTAS_FLASH
tristate "Firmware flash interface"
depends on RTAS_PROC
config SCANLOG
tristate "Scanlog dump interface"
depends on RTAS_PROC && PPC_PSERIES
config LPARCFG
tristate "LPAR Configuration Data"
depends on PPC_PSERIES || PPC_ISERIES
help
Provide system capacity information via human readable
<key word>=<value> pairs through a /proc/ppc64/lparcfg interface.
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS
default y
help
This kernel feature is useful for number crunching applications
that may need to compute untrusted bytecode during their
execution. By using pipes or other transports made available to
the process as file descriptors supporting the read/write
syscalls, it's possible to isolate those applications in
their own address space using seccomp. Once seccomp is
enabled via /proc/<pid>/seccomp, it cannot be disabled
and the task is only allowed to execute a few safe syscalls
defined by each seccomp mode.
If unsure, say Y. Only embedded should say N here.
source "fs/Kconfig.binfmt"
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
depends on SMP && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC)
select HOTPLUG
---help---
Say Y here to be able to turn CPUs off and on.
Say N if you are unsure.
config PROC_DEVICETREE
bool "Support for Open Firmware device tree in /proc"
help
This option adds a device-tree directory under /proc which contains
an image of the device tree that the kernel copies from Open
Firmware. If unsure, say Y here.
config CMDLINE_BOOL
bool "Default bootloader kernel arguments"
depends on !PPC_ISERIES
config CMDLINE
string "Initial kernel command string"
depends on CMDLINE_BOOL
default "console=ttyS0,9600 console=tty0 root=/dev/sda2"
help
On some platforms, there is currently no way for the boot loader to
pass arguments to the kernel. For these platforms, you can supply
some command-line options at build time by entering them here. In
most cases you will need to specify the root device here.
endmenu
config ISA_DMA_API
bool
default y
menu "Bus Options"
config ISA
bool
help
Find out whether you have ISA slots on your motherboard. ISA is the
name of a bus system, i.e. the way the CPU talks to the other stuff
inside your box. If you have an Apple machine, say N here; if you
have an IBM RS/6000 or pSeries machine or a PReP machine, say Y. If
you have an embedded board, consult your board documentation.
config SBUS
bool
config MCA
bool
config EISA
bool
config PCI
bool "support for PCI devices" if (EMBEDDED && PPC_ISERIES)
default y
help
Find out whether your system includes a PCI bus. PCI is the name of
a bus system, i.e. the way the CPU talks to the other stuff inside
your box. If you say Y here, the kernel will include drivers and
infrastructure code to support PCI bus devices.
config PCI_DOMAINS
bool
default PCI
source "drivers/pci/Kconfig"
source "drivers/pcmcia/Kconfig"
source "drivers/pci/hotplug/Kconfig"
endmenu
source "net/Kconfig"
source "drivers/Kconfig"
source "fs/Kconfig"
menu "iSeries device drivers"
depends on PPC_ISERIES
config VIOCONS
tristate "iSeries Virtual Console Support"
config VIODASD
tristate "iSeries Virtual I/O disk support"
help
If you are running on an iSeries system and you want to use
virtual disks created and managed by OS/400, say Y.
config VIOCD
tristate "iSeries Virtual I/O CD support"
help
If you are running Linux on an IBM iSeries system and you want to
read a CD drive owned by OS/400, say Y here.
config VIOTAPE
tristate "iSeries Virtual Tape Support"
help
If you are running Linux on an iSeries system and you want Linux
to read and/or write a tape drive owned by OS/400, say Y here.
endmenu
config VIOPATH
bool
depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH
default y
source "arch/powerpc/oprofile/Kconfig"
source "arch/ppc64/Kconfig.debug"
source "security/Kconfig"
config KEYS_COMPAT
bool
depends on COMPAT && KEYS
default y
source "crypto/Kconfig"
source "lib/Kconfig"

View File

@ -2,45 +2,6 @@
# Makefile for the linux ppc64 kernel.
#
ifneq ($(CONFIG_PPC_MERGE),y)
EXTRA_CFLAGS += -mno-minimal-toc
extra-y := head.o vmlinux.lds
obj-y := misc.o prom.o
endif
obj-y += idle.o dma.o \
align.o \
rtc.o \
iommu.o
pci-obj-$(CONFIG_PPC_MULTIPLATFORM) += pci_dn.o pci_direct_iommu.o
obj-$(CONFIG_PCI) += pci.o pci_iommu.o iomap.o $(pci-obj-y)
obj-y += idle.o align.o
obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o
ifneq ($(CONFIG_PPC_MERGE),y)
obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
endif
obj-$(CONFIG_KEXEC) += machine_kexec.o
obj-$(CONFIG_MODULES) += module.o
ifneq ($(CONFIG_PPC_MERGE),y)
obj-$(CONFIG_MODULES) += ppc_ksyms.o
endif
obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
ifneq ($(CONFIG_PPC_MERGE),y)
obj-$(CONFIG_BOOTX_TEXT) += btext.o
endif
obj-$(CONFIG_HVCS) += hvcserver.o
obj-$(CONFIG_KPROBES) += kprobes.o
ifneq ($(CONFIG_PPC_MERGE),y)
ifeq ($(CONFIG_PPC_ISERIES),y)
arch/ppc64/kernel/head.o: arch/powerpc/kernel/lparmap.s
AFLAGS_head.o += -Iarch/powerpc/kernel
endif
endif

View File

@ -1,195 +0,0 @@
/*
* This program is used to generate definitions needed by
* assembly language modules.
*
* We use the technique used in the OSF Mach kernel code:
* generate asm statements containing #defines,
* compile this file to assembler, and then extract the
* #defines from the assembly-language output.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/time.h>
#include <linux/hardirq.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/paca.h>
#include <asm/lppaca.h>
#include <asm/iseries/hv_lp_event.h>
#include <asm/rtas.h>
#include <asm/cputable.h>
#include <asm/cache.h>
#include <asm/systemcfg.h>
#include <asm/compat.h>
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define BLANK() asm volatile("\n->" : : )
int main(void)
{
/* thread struct on stack */
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
/* task_struct->thread */
DEFINE(THREAD, offsetof(struct task_struct, thread));
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
DEFINE(KSP, offsetof(struct thread_struct, ksp));
DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid));
#ifdef CONFIG_ALTIVEC
DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
#endif /* CONFIG_ALTIVEC */
DEFINE(MM, offsetof(struct task_struct, mm));
DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size));
DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size));
DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page));
DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
DEFINE(PLATFORM_LPAR, PLATFORM_LPAR);
/* paca */
DEFINE(PACA_SIZE, sizeof(struct paca_struct));
DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
DEFINE(PACACURRENT, offsetof(struct paca_struct, __current));
DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr));
DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled));
DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
#ifdef CONFIG_PPC_64K_PAGES
DEFINE(PACAPGDIR, offsetof(struct paca_struct, pgdir));
#endif
#ifdef CONFIG_HUGETLB_PAGE
DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
#endif /* CONFIG_HUGETLB_PAGE */
DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr));
DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
DEFINE(PACA_EXDSI, offsetof(struct paca_struct, exdsi));
DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
DEFINE(PACALPPACA, offsetof(struct paca_struct, lppaca));
DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
/* RTAS */
DEFINE(RTASBASE, offsetof(struct rtas_t, base));
DEFINE(RTASENTRY, offsetof(struct rtas_t, entry));
/* Interrupt register frame */
DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
/* 288 = # of volatile regs, int & fp, for leaf routines */
/* which do not stack a frame. See the PPC64 ABI. */
DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 288);
/* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0]));
DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1]));
DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2]));
DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3]));
DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4]));
DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5]));
DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6]));
DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7]));
DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8]));
DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9]));
DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10]));
DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11]));
DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12]));
DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13]));
/*
* Note: these symbols include _ because they overlap with special
* register names
*/
DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip));
DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr));
DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr));
DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link));
DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr));
DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer));
DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3));
DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe));
/* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */
DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs));
DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8);
DEFINE(CLONE_VM, CLONE_VM);
DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
/* About the CPU features table */
DEFINE(CPU_SPEC_ENTRY_SIZE, sizeof(struct cpu_spec));
DEFINE(CPU_SPEC_PVR_MASK, offsetof(struct cpu_spec, pvr_mask));
DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value));
DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
/* systemcfg offsets for use by vdso */
DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct systemcfg, tb_orig_stamp));
DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct systemcfg, tb_ticks_per_sec));
DEFINE(CFG_TB_TO_XS, offsetof(struct systemcfg, tb_to_xs));
DEFINE(CFG_STAMP_XSEC, offsetof(struct systemcfg, stamp_xsec));
DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct systemcfg, tb_update_count));
DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct systemcfg, tz_minuteswest));
DEFINE(CFG_TZ_DSTTIME, offsetof(struct systemcfg, tz_dsttime));
DEFINE(CFG_SYSCALL_MAP32, offsetof(struct systemcfg, syscall_map_32));
DEFINE(CFG_SYSCALL_MAP64, offsetof(struct systemcfg, syscall_map_64));
/* timeval/timezone offsets for use by vdso */
DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec));
DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec));
DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec));
DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec));
DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
return 0;
}

View File

@ -1,792 +0,0 @@
/*
* Procedures for drawing on the screen early on in the boot process.
*
* Benjamin Herrenschmidt <benh@kernel.crashing.org>
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <asm/sections.h>
#include <asm/prom.h>
#include <asm/btext.h>
#include <asm/prom.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/lmb.h>
#include <asm/processor.h>
#include <asm/udbg.h>
#undef NO_SCROLL
#ifndef NO_SCROLL
static void scrollscreen(void);
#endif
static void draw_byte(unsigned char c, long locX, long locY);
static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb);
static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb);
static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb);
static int g_loc_X;
static int g_loc_Y;
static int g_max_loc_X;
static int g_max_loc_Y;
static int dispDeviceRowBytes;
static int dispDeviceDepth;
static int dispDeviceRect[4];
static unsigned char *dispDeviceBase, *logicalDisplayBase;
unsigned long disp_BAT[2] __initdata = {0, 0};
#define cmapsz (16*256)
static unsigned char vga_font[cmapsz];
int boot_text_mapped;
int force_printk_to_btext = 0;
/* Here's a small text engine to use during early boot
* or for debugging purposes
*
* todo:
*
* - build some kind of vgacon with it to enable early printk
* - move to a separate file
* - add a few video driver hooks to keep in sync with display
* changes.
*/
void map_boot_text(void)
{
unsigned long base, offset, size;
unsigned char *vbase;
/* By default, we are no longer mapped */
boot_text_mapped = 0;
if (dispDeviceBase == 0)
return;
base = ((unsigned long) dispDeviceBase) & 0xFFFFF000UL;
offset = ((unsigned long) dispDeviceBase) - base;
size = dispDeviceRowBytes * dispDeviceRect[3] + offset
+ dispDeviceRect[0];
vbase = __ioremap(base, size, _PAGE_NO_CACHE);
if (vbase == 0)
return;
logicalDisplayBase = vbase + offset;
boot_text_mapped = 1;
}
int btext_initialize(struct device_node *np)
{
unsigned int width, height, depth, pitch;
unsigned long address = 0;
u32 *prop;
prop = (u32 *)get_property(np, "width", NULL);
if (prop == NULL)
return -EINVAL;
width = *prop;
prop = (u32 *)get_property(np, "height", NULL);
if (prop == NULL)
return -EINVAL;
height = *prop;
prop = (u32 *)get_property(np, "depth", NULL);
if (prop == NULL)
return -EINVAL;
depth = *prop;
pitch = width * ((depth + 7) / 8);
prop = (u32 *)get_property(np, "linebytes", NULL);
if (prop)
pitch = *prop;
if (pitch == 1)
pitch = 0x1000;
prop = (u32 *)get_property(np, "address", NULL);
if (prop)
address = *prop;
/* FIXME: Add support for PCI reg properties */
if (address == 0)
return -EINVAL;
g_loc_X = 0;
g_loc_Y = 0;
g_max_loc_X = width / 8;
g_max_loc_Y = height / 16;
logicalDisplayBase = (unsigned char *)address;
dispDeviceBase = (unsigned char *)address;
dispDeviceRowBytes = pitch;
dispDeviceDepth = depth;
dispDeviceRect[0] = dispDeviceRect[1] = 0;
dispDeviceRect[2] = width;
dispDeviceRect[3] = height;
map_boot_text();
return 0;
}
static void btext_putc(unsigned char c)
{
btext_drawchar(c);
}
void __init init_boot_display(void)
{
char *name;
struct device_node *np = NULL;
int rc = -ENODEV;
printk("trying to initialize btext ...\n");
name = (char *)get_property(of_chosen, "linux,stdout-path", NULL);
if (name != NULL) {
np = of_find_node_by_path(name);
if (np != NULL) {
if (strcmp(np->type, "display") != 0) {
printk("boot stdout isn't a display !\n");
of_node_put(np);
np = NULL;
}
}
}
if (np)
rc = btext_initialize(np);
if (rc) {
for (np = NULL; (np = of_find_node_by_type(np, "display"));) {
if (get_property(np, "linux,opened", NULL)) {
printk("trying %s ...\n", np->full_name);
rc = btext_initialize(np);
printk("result: %d\n", rc);
}
if (rc == 0)
break;
}
}
if (rc == 0 && udbg_putc == NULL)
udbg_putc = btext_putc;
}
/* Calc the base address of a given point (x,y) */
static unsigned char * calc_base(int x, int y)
{
unsigned char *base;
base = logicalDisplayBase;
if (base == 0)
base = dispDeviceBase;
base += (x + dispDeviceRect[0]) * (dispDeviceDepth >> 3);
base += (y + dispDeviceRect[1]) * dispDeviceRowBytes;
return base;
}
/* Adjust the display to a new resolution */
void btext_update_display(unsigned long phys, int width, int height,
int depth, int pitch)
{
if (dispDeviceBase == 0)
return;
/* check it's the same frame buffer (within 256MB) */
if ((phys ^ (unsigned long)dispDeviceBase) & 0xf0000000)
return;
dispDeviceBase = (__u8 *) phys;
dispDeviceRect[0] = 0;
dispDeviceRect[1] = 0;
dispDeviceRect[2] = width;
dispDeviceRect[3] = height;
dispDeviceDepth = depth;
dispDeviceRowBytes = pitch;
if (boot_text_mapped) {
iounmap(logicalDisplayBase);
boot_text_mapped = 0;
}
map_boot_text();
g_loc_X = 0;
g_loc_Y = 0;
g_max_loc_X = width / 8;
g_max_loc_Y = height / 16;
}
void btext_clearscreen(void)
{
unsigned long *base = (unsigned long *)calc_base(0, 0);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
(dispDeviceDepth >> 3)) >> 3;
int i,j;
for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++)
{
unsigned long *ptr = base;
for(j=width; j; --j)
*(ptr++) = 0;
base += (dispDeviceRowBytes >> 3);
}
}
#ifndef NO_SCROLL
static void scrollscreen(void)
{
unsigned long *src = (unsigned long *)calc_base(0,16);
unsigned long *dst = (unsigned long *)calc_base(0,0);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
(dispDeviceDepth >> 3)) >> 3;
int i,j;
for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++)
{
unsigned long *src_ptr = src;
unsigned long *dst_ptr = dst;
for(j=width; j; --j)
*(dst_ptr++) = *(src_ptr++);
src += (dispDeviceRowBytes >> 3);
dst += (dispDeviceRowBytes >> 3);
}
for (i=0; i<16; i++)
{
unsigned long *dst_ptr = dst;
for(j=width; j; --j)
*(dst_ptr++) = 0;
dst += (dispDeviceRowBytes >> 3);
}
}
#endif /* ndef NO_SCROLL */
void btext_drawchar(char c)
{
int cline = 0;
#ifdef NO_SCROLL
int x;
#endif
if (!boot_text_mapped)
return;
switch (c) {
case '\b':
if (g_loc_X > 0)
--g_loc_X;
break;
case '\t':
g_loc_X = (g_loc_X & -8) + 8;
break;
case '\r':
g_loc_X = 0;
break;
case '\n':
g_loc_X = 0;
g_loc_Y++;
cline = 1;
break;
default:
draw_byte(c, g_loc_X++, g_loc_Y);
}
if (g_loc_X >= g_max_loc_X) {
g_loc_X = 0;
g_loc_Y++;
cline = 1;
}
#ifndef NO_SCROLL
while (g_loc_Y >= g_max_loc_Y) {
scrollscreen();
g_loc_Y--;
}
#else
/* wrap around from bottom to top of screen so we don't
waste time scrolling each line. -- paulus. */
if (g_loc_Y >= g_max_loc_Y)
g_loc_Y = 0;
if (cline) {
for (x = 0; x < g_max_loc_X; ++x)
draw_byte(' ', x, g_loc_Y);
}
#endif
}
void btext_drawstring(const char *c)
{
if (!boot_text_mapped)
return;
while (*c)
btext_drawchar(*c++);
}
void btext_drawhex(unsigned long v)
{
char *hex_table = "0123456789abcdef";
if (!boot_text_mapped)
return;
btext_drawchar(hex_table[(v >> 60) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 56) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 52) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 48) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 44) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 40) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 36) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 32) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 28) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 24) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 20) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 16) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 12) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 8) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 4) & 0x0000000FUL]);
btext_drawchar(hex_table[(v >> 0) & 0x0000000FUL]);
btext_drawchar(' ');
}
static void draw_byte(unsigned char c, long locX, long locY)
{
unsigned char *base = calc_base(locX << 3, locY << 4);
unsigned char *font = &vga_font[((unsigned int)c) * 16];
int rb = dispDeviceRowBytes;
switch(dispDeviceDepth) {
case 24:
case 32:
draw_byte_32(font, (unsigned int *)base, rb);
break;
case 15:
case 16:
draw_byte_16(font, (unsigned int *)base, rb);
break;
case 8:
draw_byte_8(font, (unsigned int *)base, rb);
break;
}
}
static unsigned int expand_bits_8[16] = {
0x00000000,
0x000000ff,
0x0000ff00,
0x0000ffff,
0x00ff0000,
0x00ff00ff,
0x00ffff00,
0x00ffffff,
0xff000000,
0xff0000ff,
0xff00ff00,
0xff00ffff,
0xffff0000,
0xffff00ff,
0xffffff00,
0xffffffff
};
static unsigned int expand_bits_16[4] = {
0x00000000,
0x0000ffff,
0xffff0000,
0xffffffff
};
static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0xFFFFFFFFUL;
int bg = 0x00000000UL;
for (l = 0; l < 16; ++l)
{
bits = *font++;
base[0] = (-(bits >> 7) & fg) ^ bg;
base[1] = (-((bits >> 6) & 1) & fg) ^ bg;
base[2] = (-((bits >> 5) & 1) & fg) ^ bg;
base[3] = (-((bits >> 4) & 1) & fg) ^ bg;
base[4] = (-((bits >> 3) & 1) & fg) ^ bg;
base[5] = (-((bits >> 2) & 1) & fg) ^ bg;
base[6] = (-((bits >> 1) & 1) & fg) ^ bg;
base[7] = (-(bits & 1) & fg) ^ bg;
base = (unsigned int *) ((char *)base + rb);
}
}
static void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0xFFFFFFFFUL;
int bg = 0x00000000UL;
unsigned int *eb = (int *)expand_bits_16;
for (l = 0; l < 16; ++l)
{
bits = *font++;
base[0] = (eb[bits >> 6] & fg) ^ bg;
base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg;
base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg;
base[3] = (eb[bits & 3] & fg) ^ bg;
base = (unsigned int *) ((char *)base + rb);
}
}
static void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0x0F0F0F0FUL;
int bg = 0x00000000UL;
unsigned int *eb = (int *)expand_bits_8;
for (l = 0; l < 16; ++l)
{
bits = *font++;
base[0] = (eb[bits >> 4] & fg) ^ bg;
base[1] = (eb[bits & 0xf] & fg) ^ bg;
base = (unsigned int *) ((char *)base + rb);
}
}
static unsigned char vga_font[cmapsz] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x81, 0xa5, 0x81, 0x81, 0xbd,
0x99, 0x81, 0x81, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xff,
0xdb, 0xff, 0xff, 0xc3, 0xe7, 0xff, 0xff, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x6c, 0xfe, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe,
0x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18,
0x3c, 0x3c, 0xe7, 0xe7, 0xe7, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0xff, 0xff, 0x7e, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xe7, 0xc3, 0xc3, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x42, 0x42, 0x66, 0x3c, 0x00,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x99, 0xbd,
0xbd, 0x99, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1e, 0x0e,
0x1a, 0x32, 0x78, 0xcc, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x7e, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x33, 0x3f, 0x30, 0x30, 0x30,
0x30, 0x70, 0xf0, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x63,
0x7f, 0x63, 0x63, 0x63, 0x63, 0x67, 0xe7, 0xe6, 0xc0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x18, 0x18, 0xdb, 0x3c, 0xe7, 0x3c, 0xdb, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfe, 0xf8,
0xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0e,
0x1e, 0x3e, 0xfe, 0x3e, 0x1e, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
0x66, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xdb,
0xdb, 0xdb, 0x7b, 0x1b, 0x1b, 0x1b, 0x1b, 0x1b, 0x00, 0x00, 0x00, 0x00,
0x00, 0x7c, 0xc6, 0x60, 0x38, 0x6c, 0xc6, 0xc6, 0x6c, 0x38, 0x0c, 0xc6,
0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xfe, 0xfe, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x7e, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x18, 0x0c, 0xfe, 0x0c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x60, 0xfe, 0x60, 0x30, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0,
0xc0, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x24, 0x66, 0xff, 0x66, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x38, 0x7c, 0x7c, 0xfe, 0xfe, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0x7c, 0x7c,
0x38, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x24, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c,
0x6c, 0xfe, 0x6c, 0x6c, 0x6c, 0xfe, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
0x18, 0x18, 0x7c, 0xc6, 0xc2, 0xc0, 0x7c, 0x06, 0x06, 0x86, 0xc6, 0x7c,
0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc6, 0x0c, 0x18,
0x30, 0x60, 0xc6, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c,
0x6c, 0x38, 0x76, 0xdc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x30, 0x30, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x18,
0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0xff, 0x3c, 0x66, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x02, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xce, 0xde, 0xf6, 0xe6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x78, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0x06, 0x06, 0x3c, 0x06, 0x06, 0x06, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x3c, 0x6c, 0xcc, 0xfe,
0x0c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
0xc0, 0xc0, 0xfc, 0x06, 0x06, 0x06, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x38, 0x60, 0xc0, 0xc0, 0xfc, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x06, 0x06, 0x0c, 0x18,
0x30, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
0xc6, 0xc6, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x06, 0x06, 0x0c, 0x78,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00,
0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x06,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00,
0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60,
0x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0xc6, 0x0c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xde, 0xde,
0xde, 0xdc, 0xc0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38,
0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x66, 0x66, 0xfc,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xc0,
0xc0, 0xc2, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x6c,
0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x6c, 0xf8, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x62, 0x66, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68,
0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
0xc2, 0xc0, 0xc0, 0xde, 0xc6, 0xc6, 0x66, 0x3a, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x0c,
0x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xe6, 0x66, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0x66, 0xe6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x60, 0x60, 0x60, 0x60, 0x60,
0x60, 0x62, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe7,
0xff, 0xff, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0xc6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66,
0x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xde, 0x7c,
0x0c, 0x0e, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x6c,
0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
0xc6, 0x60, 0x38, 0x0c, 0x06, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xdb, 0x99, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x66,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x18,
0x3c, 0x66, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
0xc3, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xc3, 0x86, 0x0c, 0x18, 0x30, 0x60, 0xc1, 0xc3, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
0xc0, 0xe0, 0x70, 0x38, 0x1c, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00,
0x30, 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0c, 0x7c,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x60,
0x60, 0x78, 0x6c, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc0, 0xc0, 0xc0, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x0c, 0x0c, 0x3c, 0x6c, 0xcc,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xf0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0xcc, 0x78, 0x00, 0x00, 0x00, 0xe0, 0x60,
0x60, 0x6c, 0x76, 0x66, 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x0e, 0x06, 0x06,
0x06, 0x06, 0x06, 0x06, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0xe0, 0x60,
0x60, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0xff, 0xdb,
0xdb, 0xdb, 0xdb, 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66,
0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x76, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0x0c, 0x1e, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x76, 0x66, 0x60, 0x60, 0x60, 0xf0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x60,
0x38, 0x0c, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x30,
0x30, 0xfc, 0x30, 0x30, 0x30, 0x30, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3,
0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0xc3,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xfe, 0xcc, 0x18, 0x30, 0x60, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x0e, 0x18, 0x18, 0x18, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18,
0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x18,
0x18, 0x18, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6,
0xc6, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
0xc2, 0xc0, 0xc0, 0xc0, 0xc2, 0x66, 0x3c, 0x0c, 0x06, 0x7c, 0x00, 0x00,
0x00, 0x00, 0xcc, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x7c, 0xc6, 0xfe,
0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c,
0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xcc, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x78, 0x0c, 0x7c,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38,
0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x0c, 0x06,
0x3c, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xfe,
0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x60, 0x30, 0x18, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x38, 0x18, 0x18,
0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x66,
0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x60, 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6,
0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 0x00,
0x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x18, 0x30, 0x60, 0x00, 0xfe, 0x66, 0x60, 0x7c, 0x60, 0x60, 0x66, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x3b, 0x1b,
0x7e, 0xd8, 0xdc, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x6c,
0xcc, 0xcc, 0xfe, 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0x00, 0x00, 0x00, 0x00,
0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0x7c, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18,
0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x30, 0x78, 0xcc, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0xcc, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0x78, 0x00,
0x00, 0xc6, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
0xc3, 0xc0, 0xc0, 0xc0, 0xc3, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00,
0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xe6, 0xfc,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0xff, 0x18,
0xff, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66,
0x7c, 0x62, 0x66, 0x6f, 0x66, 0x66, 0x66, 0xf3, 0x00, 0x00, 0x00, 0x00,
0x00, 0x0e, 0x1b, 0x18, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18,
0xd8, 0x70, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0x78, 0x0c, 0x7c,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30,
0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x18, 0x30, 0x60, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0xcc, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc,
0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
0x76, 0xdc, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x6c, 0x6c, 0x3e, 0x00, 0x7e, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
0x38, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x30, 0x30, 0x00, 0x30, 0x30, 0x60, 0xc0, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xfe, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 0x60, 0xce, 0x9b, 0x06,
0x0c, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30,
0x66, 0xce, 0x96, 0x3e, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18,
0x00, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x6c, 0xd8, 0x6c, 0x36, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x6c, 0x36,
0x6c, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x44, 0x11, 0x44,
0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44,
0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa,
0x55, 0xaa, 0x55, 0xaa, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77,
0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0xf8,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
0x36, 0xf6, 0x06, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x06, 0xf6,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0xf6, 0x06, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x37, 0x30, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf7, 0x00, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xff, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36,
0x36, 0xf7, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x3f,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
0x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0xff, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xf0, 0xf0,
0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
0x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x76, 0xdc, 0xd8, 0xd8, 0xd8, 0xdc, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x78, 0xcc, 0xcc, 0xcc, 0xd8, 0xcc, 0xc6, 0xc6, 0xc6, 0xcc,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0xc6, 0xc0, 0xc0, 0xc0,
0xc0, 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xfe, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xfe, 0xc6, 0x60, 0x30, 0x18, 0x30, 0x60, 0xc6, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xd8, 0xd8,
0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x66, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xc0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x18, 0x3c, 0x66, 0x66,
0x66, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0x6c, 0x38, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xc6, 0x6c, 0x6c, 0x6c, 0x6c, 0xee,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x30, 0x18, 0x0c, 0x3e, 0x66,
0x66, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x7e, 0xdb, 0xdb, 0xdb, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x03, 0x06, 0x7e, 0xdb, 0xdb, 0xf3, 0x7e, 0x60, 0xc0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x30, 0x60, 0x60, 0x7c, 0x60,
0x60, 0x60, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c,
0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18,
0x18, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x7e,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x1b, 0x1b, 0x1b, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x18, 0x18, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00,
0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0c, 0x0c,
0x0c, 0x0c, 0x0c, 0xec, 0x6c, 0x6c, 0x3c, 0x1c, 0x00, 0x00, 0x00, 0x00,
0x00, 0xd8, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xd8, 0x30, 0x60, 0xc8, 0xf8, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
};

File diff suppressed because it is too large Load Diff

View File

@ -1,940 +0,0 @@
/*
* arch/ppc/kernel/misc.S
*
*
*
* This file contains miscellaneous low-level functions.
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
* and Paul Mackerras.
* Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
* PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <linux/sys.h>
#include <asm/unistd.h>
#include <asm/errno.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
.text
/*
* Returns (address we were linked at) - (address we are running at)
* for use before the text and data are mapped to KERNELBASE.
*/
_GLOBAL(reloc_offset)
mflr r0
bl 1f
1: mflr r3
LOADADDR(r4,1b)
sub r3,r4,r3
mtlr r0
blr
_GLOBAL(get_msr)
mfmsr r3
blr
_GLOBAL(get_dar)
mfdar r3
blr
_GLOBAL(get_srr0)
mfsrr0 r3
blr
_GLOBAL(get_srr1)
mfsrr1 r3
blr
_GLOBAL(get_sp)
mr r3,r1
blr
#ifdef CONFIG_IRQSTACKS
_GLOBAL(call_do_softirq)
mflr r0
std r0,16(r1)
stdu r1,THREAD_SIZE-112(r3)
mr r1,r3
bl .__do_softirq
ld r1,0(r1)
ld r0,16(r1)
mtlr r0
blr
_GLOBAL(call___do_IRQ)
mflr r0
std r0,16(r1)
stdu r1,THREAD_SIZE-112(r5)
mr r1,r5
bl .__do_IRQ
ld r1,0(r1)
ld r0,16(r1)
mtlr r0
blr
#endif /* CONFIG_IRQSTACKS */
/*
* To be called by C code which needs to do some operations with MMU
* disabled. Note that interrupts have to be disabled by the caller
* prior to calling us. The code called _MUST_ be in the RMO of course
* and part of the linear mapping as we don't attempt to translate the
* stack pointer at all. The function is called with the stack switched
* to this CPU emergency stack
*
* prototype is void *call_with_mmu_off(void *func, void *data);
*
* the called function is expected to be of the form
*
* void *called(void *data);
*/
_GLOBAL(call_with_mmu_off)
mflr r0 /* get link, save it on stackframe */
std r0,16(r1)
mr r1,r5 /* save old stack ptr */
ld r1,PACAEMERGSP(r13) /* get emerg. stack */
subi r1,r1,STACK_FRAME_OVERHEAD
std r0,16(r1) /* save link on emerg. stack */
std r5,0(r1) /* save old stack ptr in backchain */
ld r3,0(r3) /* get to real function ptr (assume same TOC) */
bl 2f /* we need LR to return, continue at label 2 */
ld r0,16(r1) /* we return here from the call, get LR and */
ld r1,0(r1) /* .. old stack ptr */
mtspr SPRN_SRR0,r0 /* and get back to virtual mode with these */
mfmsr r4
ori r4,r4,MSR_IR|MSR_DR
mtspr SPRN_SRR1,r4
rfid
2: mtspr SPRN_SRR0,r3 /* coming from above, enter real mode */
mr r3,r4 /* get parameter */
mfmsr r0
ori r0,r0,MSR_IR|MSR_DR
xori r0,r0,MSR_IR|MSR_DR
mtspr SPRN_SRR1,r0
rfid
.section ".toc","aw"
PPC64_CACHES:
.tc ppc64_caches[TC],ppc64_caches
.section ".text"
/*
* Write any modified data cache blocks out to memory
* and invalidate the corresponding instruction cache blocks.
*
* flush_icache_range(unsigned long start, unsigned long stop)
*
* flush all bytes from start through stop-1 inclusive
*/
_KPROBE(__flush_icache_range)
/*
* Flush the data cache to memory
*
* Different systems have different cache line sizes
* and in some cases i-cache and d-cache line sizes differ from
* each other.
*/
ld r10,PPC64_CACHES@toc(r2)
lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
addi r5,r7,-1
andc r6,r3,r5 /* round low to line bdy */
subf r8,r6,r4 /* compute length */
add r8,r8,r5 /* ensure we get enough */
lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */
srw. r8,r8,r9 /* compute line count */
beqlr /* nothing to do? */
mtctr r8
1: dcbst 0,r6
add r6,r6,r7
bdnz 1b
sync
/* Now invalidate the instruction cache */
lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */
addi r5,r7,-1
andc r6,r3,r5 /* round low to line bdy */
subf r8,r6,r4 /* compute length */
add r8,r8,r5
lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */
srw. r8,r8,r9 /* compute line count */
beqlr /* nothing to do? */
mtctr r8
2: icbi 0,r6
add r6,r6,r7
bdnz 2b
isync
blr
.text
/*
* Like above, but only do the D-cache.
*
* flush_dcache_range(unsigned long start, unsigned long stop)
*
* flush all bytes from start to stop-1 inclusive
*/
_GLOBAL(flush_dcache_range)
/*
* Flush the data cache to memory
*
* Different systems have different cache line sizes
*/
ld r10,PPC64_CACHES@toc(r2)
lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
addi r5,r7,-1
andc r6,r3,r5 /* round low to line bdy */
subf r8,r6,r4 /* compute length */
add r8,r8,r5 /* ensure we get enough */
lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
srw. r8,r8,r9 /* compute line count */
beqlr /* nothing to do? */
mtctr r8
0: dcbst 0,r6
add r6,r6,r7
bdnz 0b
sync
blr
/*
* Like above, but works on non-mapped physical addresses.
* Use only for non-LPAR setups ! It also assumes real mode
* is cacheable. Used for flushing out the DART before using
* it as uncacheable memory
*
* flush_dcache_phys_range(unsigned long start, unsigned long stop)
*
* flush all bytes from start to stop-1 inclusive
*/
_GLOBAL(flush_dcache_phys_range)
ld r10,PPC64_CACHES@toc(r2)
lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
addi r5,r7,-1
andc r6,r3,r5 /* round low to line bdy */
subf r8,r6,r4 /* compute length */
add r8,r8,r5 /* ensure we get enough */
lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
srw. r8,r8,r9 /* compute line count */
beqlr /* nothing to do? */
mfmsr r5 /* Disable MMU Data Relocation */
ori r0,r5,MSR_DR
xori r0,r0,MSR_DR
sync
mtmsr r0
sync
isync
mtctr r8
0: dcbst 0,r6
add r6,r6,r7
bdnz 0b
sync
isync
mtmsr r5 /* Re-enable MMU Data Relocation */
sync
isync
blr
_GLOBAL(flush_inval_dcache_range)
ld r10,PPC64_CACHES@toc(r2)
lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
addi r5,r7,-1
andc r6,r3,r5 /* round low to line bdy */
subf r8,r6,r4 /* compute length */
add r8,r8,r5 /* ensure we get enough */
lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
srw. r8,r8,r9 /* compute line count */
beqlr /* nothing to do? */
sync
isync
mtctr r8
0: dcbf 0,r6
add r6,r6,r7
bdnz 0b
sync
isync
blr
/*
* Flush a particular page from the data cache to RAM.
* Note: this is necessary because the instruction cache does *not*
* snoop from the data cache.
*
* void __flush_dcache_icache(void *page)
*/
_GLOBAL(__flush_dcache_icache)
/*
* Flush the data cache to memory
*
* Different systems have different cache line sizes
*/
/* Flush the dcache */
ld r7,PPC64_CACHES@toc(r2)
clrrdi r3,r3,PAGE_SHIFT /* Page align */
lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */
lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
mr r6,r3
mtctr r4
0: dcbst 0,r6
add r6,r6,r5
bdnz 0b
sync
/* Now invalidate the icache */
lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */
lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */
mtctr r4
1: icbi 0,r3
add r3,r3,r5
bdnz 1b
isync
blr
/*
* I/O string operations
*
* insb(port, buf, len)
* outsb(port, buf, len)
* insw(port, buf, len)
* outsw(port, buf, len)
* insl(port, buf, len)
* outsl(port, buf, len)
* insw_ns(port, buf, len)
* outsw_ns(port, buf, len)
* insl_ns(port, buf, len)
* outsl_ns(port, buf, len)
*
* The *_ns versions don't do byte-swapping.
*/
_GLOBAL(_insb)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,1
blelr-
00: lbz r5,0(r3)
eieio
stbu r5,1(r4)
bdnz 00b
twi 0,r5,0
isync
blr
_GLOBAL(_outsb)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,1
blelr-
00: lbzu r5,1(r4)
stb r5,0(r3)
bdnz 00b
sync
blr
_GLOBAL(_insw)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,2
blelr-
00: lhbrx r5,0,r3
eieio
sthu r5,2(r4)
bdnz 00b
twi 0,r5,0
isync
blr
_GLOBAL(_outsw)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,2
blelr-
00: lhzu r5,2(r4)
sthbrx r5,0,r3
bdnz 00b
sync
blr
_GLOBAL(_insl)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,4
blelr-
00: lwbrx r5,0,r3
eieio
stwu r5,4(r4)
bdnz 00b
twi 0,r5,0
isync
blr
_GLOBAL(_outsl)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,4
blelr-
00: lwzu r5,4(r4)
stwbrx r5,0,r3
bdnz 00b
sync
blr
/* _GLOBAL(ide_insw) now in drivers/ide/ide-iops.c */
_GLOBAL(_insw_ns)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,2
blelr-
00: lhz r5,0(r3)
eieio
sthu r5,2(r4)
bdnz 00b
twi 0,r5,0
isync
blr
/* _GLOBAL(ide_outsw) now in drivers/ide/ide-iops.c */
_GLOBAL(_outsw_ns)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,2
blelr-
00: lhzu r5,2(r4)
sth r5,0(r3)
bdnz 00b
sync
blr
_GLOBAL(_insl_ns)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,4
blelr-
00: lwz r5,0(r3)
eieio
stwu r5,4(r4)
bdnz 00b
twi 0,r5,0
isync
blr
_GLOBAL(_outsl_ns)
cmpwi 0,r5,0
mtctr r5
subi r4,r4,4
blelr-
00: lwzu r5,4(r4)
stw r5,0(r3)
bdnz 00b
sync
blr
/*
* identify_cpu and calls setup_cpu
* In: r3 = base of the cpu_specs array
* r4 = address of cur_cpu_spec
* r5 = relocation offset
*/
_GLOBAL(identify_cpu)
mfpvr r7
1:
lwz r8,CPU_SPEC_PVR_MASK(r3)
and r8,r8,r7
lwz r9,CPU_SPEC_PVR_VALUE(r3)
cmplw 0,r9,r8
beq 1f
addi r3,r3,CPU_SPEC_ENTRY_SIZE
b 1b
1:
add r0,r3,r5
std r0,0(r4)
ld r4,CPU_SPEC_SETUP(r3)
sub r4,r4,r5
ld r4,0(r4)
sub r4,r4,r5
mtctr r4
/* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */
mr r4,r3
mr r3,r5
bctr
/*
* do_cpu_ftr_fixups - goes through the list of CPU feature fixups
* and writes nop's over sections of code that don't apply for this cpu.
* r3 = data offset (not changed)
*/
_GLOBAL(do_cpu_ftr_fixups)
/* Get CPU 0 features */
LOADADDR(r6,cur_cpu_spec)
sub r6,r6,r3
ld r4,0(r6)
sub r4,r4,r3
ld r4,CPU_SPEC_FEATURES(r4)
/* Get the fixup table */
LOADADDR(r6,__start___ftr_fixup)
sub r6,r6,r3
LOADADDR(r7,__stop___ftr_fixup)
sub r7,r7,r3
/* Do the fixup */
1: cmpld r6,r7
bgelr
addi r6,r6,32
ld r8,-32(r6) /* mask */
and r8,r8,r4
ld r9,-24(r6) /* value */
cmpld r8,r9
beq 1b
ld r8,-16(r6) /* section begin */
ld r9,-8(r6) /* section end */
subf. r9,r8,r9
beq 1b
/* write nops over the section of code */
/* todo: if large section, add a branch at the start of it */
srwi r9,r9,2
mtctr r9
sub r8,r8,r3
lis r0,0x60000000@h /* nop */
3: stw r0,0(r8)
andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
beq 2f
dcbst 0,r8 /* suboptimal, but simpler */
sync
icbi 0,r8
2: addi r8,r8,4
bdnz 3b
sync /* additional sync needed on g4 */
isync
b 1b
#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
/*
* Do an IO access in real mode
*/
_GLOBAL(real_readb)
mfmsr r7
ori r0,r7,MSR_DR
xori r0,r0,MSR_DR
sync
mtmsrd r0
sync
isync
mfspr r6,SPRN_HID4
rldicl r5,r6,32,0
ori r5,r5,0x100
rldicl r5,r5,32,0
sync
mtspr SPRN_HID4,r5
isync
slbia
isync
lbz r3,0(r3)
sync
mtspr SPRN_HID4,r6
isync
slbia
isync
mtmsrd r7
sync
isync
blr
/*
* Do an IO access in real mode
*/
_GLOBAL(real_writeb)
mfmsr r7
ori r0,r7,MSR_DR
xori r0,r0,MSR_DR
sync
mtmsrd r0
sync
isync
mfspr r6,SPRN_HID4
rldicl r5,r6,32,0
ori r5,r5,0x100
rldicl r5,r5,32,0
sync
mtspr SPRN_HID4,r5
isync
slbia
isync
stb r3,0(r4)
sync
mtspr SPRN_HID4,r6
isync
slbia
isync
mtmsrd r7
sync
isync
blr
#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
/*
* SCOM access functions for 970 (FX only for now)
*
* unsigned long scom970_read(unsigned int address);
* void scom970_write(unsigned int address, unsigned long value);
*
* The address passed in is the 24 bits register address. This code
* is 970 specific and will not check the status bits, so you should
* know what you are doing.
*/
_GLOBAL(scom970_read)
/* interrupts off */
mfmsr r4
ori r0,r4,MSR_EE
xori r0,r0,MSR_EE
mtmsrd r0,1
/* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
* (including parity). On current CPUs they must be 0'd,
* and finally or in RW bit
*/
rlwinm r3,r3,8,0,15
ori r3,r3,0x8000
/* do the actual scom read */
sync
mtspr SPRN_SCOMC,r3
isync
mfspr r3,SPRN_SCOMD
isync
mfspr r0,SPRN_SCOMC
isync
/* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah
* that's the best we can do). Not implemented yet as we don't use
* the scom on any of the bogus CPUs yet, but may have to be done
* ultimately
*/
/* restore interrupts */
mtmsrd r4,1
blr
_GLOBAL(scom970_write)
/* interrupts off */
mfmsr r5
ori r0,r5,MSR_EE
xori r0,r0,MSR_EE
mtmsrd r0,1
/* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
* (including parity). On current CPUs they must be 0'd.
*/
rlwinm r3,r3,8,0,15
sync
mtspr SPRN_SCOMD,r4 /* write data */
isync
mtspr SPRN_SCOMC,r3 /* write command */
isync
mfspr 3,SPRN_SCOMC
isync
/* restore interrupts */
mtmsrd r5,1
blr
/*
* Create a kernel thread
* kernel_thread(fn, arg, flags)
*/
_GLOBAL(kernel_thread)
std r29,-24(r1)
std r30,-16(r1)
stdu r1,-STACK_FRAME_OVERHEAD(r1)
mr r29,r3
mr r30,r4
ori r3,r5,CLONE_VM /* flags */
oris r3,r3,(CLONE_UNTRACED>>16)
li r4,0 /* new sp (unused) */
li r0,__NR_clone
sc
cmpdi 0,r3,0 /* parent or child? */
bne 1f /* return if parent */
li r0,0
stdu r0,-STACK_FRAME_OVERHEAD(r1)
ld r2,8(r29)
ld r29,0(r29)
mtlr r29 /* fn addr in lr */
mr r3,r30 /* load arg and call fn */
blrl
li r0,__NR_exit /* exit after child exits */
li r3,0
sc
1: addi r1,r1,STACK_FRAME_OVERHEAD
ld r29,-24(r1)
ld r30,-16(r1)
blr
/*
* disable_kernel_fp()
* Disable the FPU.
*/
_GLOBAL(disable_kernel_fp)
mfmsr r3
rldicl r0,r3,(63-MSR_FP_LG),1
rldicl r3,r0,(MSR_FP_LG+1),0
mtmsrd r3 /* disable use of fpu now */
isync
blr
#ifdef CONFIG_ALTIVEC
#if 0 /* this has no callers for now */
/*
* disable_kernel_altivec()
* Disable the VMX.
*/
_GLOBAL(disable_kernel_altivec)
mfmsr r3
rldicl r0,r3,(63-MSR_VEC_LG),1
rldicl r3,r0,(MSR_VEC_LG+1),0
mtmsrd r3 /* disable use of VMX now */
isync
blr
#endif /* 0 */
/*
* giveup_altivec(tsk)
* Disable VMX for the task given as the argument,
* and save the vector registers in its thread_struct.
* Enables the VMX for use in the kernel on return.
*/
_GLOBAL(giveup_altivec)
mfmsr r5
oris r5,r5,MSR_VEC@h
mtmsrd r5 /* enable use of VMX now */
isync
cmpdi 0,r3,0
beqlr- /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */
ld r5,PT_REGS(r3)
cmpdi 0,r5,0
SAVE_32VRS(0,r4,r3)
mfvscr vr0
li r4,THREAD_VSCR
stvx vr0,r4,r3
beq 1f
ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
lis r3,MSR_VEC@h
andc r4,r4,r3 /* disable FP for previous task */
std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#ifndef CONFIG_SMP
li r5,0
ld r4,last_task_used_altivec@got(r2)
std r5,0(r4)
#endif /* CONFIG_SMP */
blr
#endif /* CONFIG_ALTIVEC */
_GLOBAL(__setup_cpu_power3)
blr
_GLOBAL(execve)
li r0,__NR_execve
sc
bnslr
neg r3,r3
blr
/* kexec_wait(phys_cpu)
*
* wait for the flag to change, indicating this kernel is going away but
* the slave code for the next one is at addresses 0 to 100.
*
* This is used by all slaves.
*
* Physical (hardware) cpu id should be in r3.
*/
_GLOBAL(kexec_wait)
bl 1f
1: mflr r5
addi r5,r5,kexec_flag-1b
99: HMT_LOW
#ifdef CONFIG_KEXEC /* use no memory without kexec */
lwz r4,0(r5)
cmpwi 0,r4,0
bnea 0x60
#endif
b 99b
/* this can be in text because we won't change it until we are
* running in real anyways
*/
kexec_flag:
.long 0
#ifdef CONFIG_KEXEC
/* kexec_smp_wait(void)
*
* call with interrupts off
* note: this is a terminal routine, it does not save lr
*
* get phys id from paca
* set paca id to -1 to say we got here
* switch to real mode
* join other cpus in kexec_wait(phys_id)
*/
_GLOBAL(kexec_smp_wait)
lhz r3,PACAHWCPUID(r13)
li r4,-1
sth r4,PACAHWCPUID(r13) /* let others know we left */
bl real_mode
b .kexec_wait
/*
* switch to real mode (turn mmu off)
* we use the early kernel trick that the hardware ignores bits
* 0 and 1 (big endian) of the effective address in real mode
*
* don't overwrite r3 here, it is live for kexec_wait above.
*/
real_mode: /* assume normal blr return */
1: li r9,MSR_RI
li r10,MSR_DR|MSR_IR
mflr r11 /* return address to SRR0 */
mfmsr r12
andc r9,r12,r9
andc r10,r12,r10
mtmsrd r9,1
mtspr SPRN_SRR1,r10
mtspr SPRN_SRR0,r11
rfid
/*
* kexec_sequence(newstack, start, image, control, clear_all())
*
* does the grungy work with stack switching and real mode switches
* also does simple calls to other code
*/
_GLOBAL(kexec_sequence)
mflr r0
std r0,16(r1)
/* switch stacks to newstack -- &kexec_stack.stack */
stdu r1,THREAD_SIZE-112(r3)
mr r1,r3
li r0,0
std r0,16(r1)
/* save regs for local vars on new stack.
* yes, we won't go back, but ...
*/
std r31,-8(r1)
std r30,-16(r1)
std r29,-24(r1)
std r28,-32(r1)
std r27,-40(r1)
std r26,-48(r1)
std r25,-56(r1)
stdu r1,-112-64(r1)
/* save args into preserved regs */
mr r31,r3 /* newstack (both) */
mr r30,r4 /* start (real) */
mr r29,r5 /* image (virt) */
mr r28,r6 /* control, unused */
mr r27,r7 /* clear_all() fn desc */
mr r26,r8 /* spare */
lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
/* disable interrupts, we are overwriting kernel data next */
mfmsr r3
rlwinm r3,r3,0,17,15
mtmsrd r3,1
/* copy dest pages, flush whole dest image */
mr r3,r29
bl .kexec_copy_flush /* (image) */
/* turn off mmu */
bl real_mode
/* clear out hardware hash page table and tlb */
ld r5,0(r27) /* deref function descriptor */
mtctr r5
bctrl /* ppc_md.hash_clear_all(void); */
/*
* kexec image calling is:
* the first 0x100 bytes of the entry point are copied to 0
*
* all slaves branch to slave = 0x60 (absolute)
* slave(phys_cpu_id);
*
* master goes to start = entry point
* start(phys_cpu_id, start, 0);
*
*
* a wrapper is needed to call existing kernels, here is an approximate
* description of one method:
*
* v2: (2.6.10)
* start will be near the boot_block (maybe 0x100 bytes before it?)
* it will have a 0x60, which will b to boot_block, where it will wait
* and 0 will store phys into struct boot-block and load r3 from there,
* copy kernel 0-0x100 and tell slaves to back down to 0x60 again
*
* v1: (2.6.9)
* boot block will have all cpus scanning device tree to see if they
* are the boot cpu ?????
* other device tree differences (prop sizes, va vs pa, etc)...
*/
/* copy 0x100 bytes starting at start to 0 */
li r3,0
mr r4,r30
li r5,0x100
li r6,0
bl .copy_and_flush /* (dest, src, copy limit, start offset) */
1: /* assume normal blr return */
/* release other cpus to the new kernel secondary start at 0x60 */
mflr r5
li r6,1
stw r6,kexec_flag-1b(5)
mr r3,r25 # my phys cpu
mr r4,r30 # start, aka phys mem offset
mtlr 4
li r5,0
blr /* image->start(physid, image->start, 0); */
#endif /* CONFIG_KEXEC */

View File

@ -1,76 +0,0 @@
/*
* c 2001 PPC 64 Team, IBM Corp
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/console.h>
#include <net/checksum.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/system.h>
#include <asm/hw_irq.h>
#include <asm/abs_addr.h>
#include <asm/cacheflush.h>
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strncpy);
EXPORT_SYMBOL(strcat);
EXPORT_SYMBOL(strncat);
EXPORT_SYMBOL(strchr);
EXPORT_SYMBOL(strrchr);
EXPORT_SYMBOL(strpbrk);
EXPORT_SYMBOL(strstr);
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(strnlen);
EXPORT_SYMBOL(strcmp);
EXPORT_SYMBOL(strncmp);
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_generic);
EXPORT_SYMBOL(ip_fast_csum);
EXPORT_SYMBOL(csum_tcpudp_magic);
EXPORT_SYMBOL(__copy_tofrom_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__strnlen_user);
EXPORT_SYMBOL(reloc_offset);
EXPORT_SYMBOL(_insb);
EXPORT_SYMBOL(_outsb);
EXPORT_SYMBOL(_insw);
EXPORT_SYMBOL(_outsw);
EXPORT_SYMBOL(_insl);
EXPORT_SYMBOL(_outsl);
EXPORT_SYMBOL(_insw_ns);
EXPORT_SYMBOL(_outsw_ns);
EXPORT_SYMBOL(_insl_ns);
EXPORT_SYMBOL(_outsl_ns);
EXPORT_SYMBOL(kernel_thread);
EXPORT_SYMBOL(giveup_fpu);
#ifdef CONFIG_ALTIVEC
EXPORT_SYMBOL(giveup_altivec);
#endif
EXPORT_SYMBOL(__flush_icache_range);
EXPORT_SYMBOL(flush_dcache_range);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memscan);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(timer_interrupt);
EXPORT_SYMBOL(console_drivers);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,358 +0,0 @@
/*
* Real Time Clock interface for PPC64.
*
* Based on rtc.c by Paul Gortmaker
*
* This driver allows use of the real time clock
* from user space. It exports the /dev/rtc
* interface supporting various ioctl() and also the
* /proc/driver/rtc pseudo-file for status information.
*
* Interface does not support RTC interrupts nor an alarm.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* 1.0 Mike Corrigan: IBM iSeries rtc support
* 1.1 Dave Engebretsen: IBM pSeries rtc support
*/
#define RTC_VERSION "1.1"
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/ioport.h>
#include <linux/fcntl.h>
#include <linux/mc146818rtc.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/spinlock.h>
#include <linux/bcd.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/time.h>
#include <asm/rtas.h>
#include <asm/machdep.h>
/*
* We sponge a minor off of the misc major. No need slurping
* up another valuable major dev number for this. If you add
* an ioctl, make sure you don't conflict with SPARC's RTC
* ioctls.
*/
static ssize_t rtc_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos);
static int rtc_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg);
static int rtc_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data);
/*
* If this driver ever becomes modularised, it will be really nice
* to make the epoch retain its value across module reload...
*/
static unsigned long epoch = 1900; /* year corresponding to 0x00 */
static const unsigned char days_in_mo[] =
{0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
/*
* Now all the various file operations that we export.
*/
static ssize_t rtc_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
return -EIO;
}
static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
unsigned long arg)
{
struct rtc_time wtime;
switch (cmd) {
case RTC_RD_TIME: /* Read the time/date from RTC */
{
memset(&wtime, 0, sizeof(struct rtc_time));
ppc_md.get_rtc_time(&wtime);
break;
}
case RTC_SET_TIME: /* Set the RTC */
{
struct rtc_time rtc_tm;
unsigned char mon, day, hrs, min, sec, leap_yr;
unsigned int yrs;
if (!capable(CAP_SYS_TIME))
return -EACCES;
if (copy_from_user(&rtc_tm, (struct rtc_time __user *)arg,
sizeof(struct rtc_time)))
return -EFAULT;
yrs = rtc_tm.tm_year;
mon = rtc_tm.tm_mon + 1; /* tm_mon starts at zero */
day = rtc_tm.tm_mday;
hrs = rtc_tm.tm_hour;
min = rtc_tm.tm_min;
sec = rtc_tm.tm_sec;
if (yrs < 70)
return -EINVAL;
leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400));
if ((mon > 12) || (day == 0))
return -EINVAL;
if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
return -EINVAL;
if ((hrs >= 24) || (min >= 60) || (sec >= 60))
return -EINVAL;
if ( yrs > 169 )
return -EINVAL;
ppc_md.set_rtc_time(&rtc_tm);
return 0;
}
case RTC_EPOCH_READ: /* Read the epoch. */
{
return put_user (epoch, (unsigned long __user *)arg);
}
case RTC_EPOCH_SET: /* Set the epoch. */
{
/*
* There were no RTC clocks before 1900.
*/
if (arg < 1900)
return -EINVAL;
if (!capable(CAP_SYS_TIME))
return -EACCES;
epoch = arg;
return 0;
}
default:
return -EINVAL;
}
return copy_to_user((void __user *)arg, &wtime, sizeof wtime) ? -EFAULT : 0;
}
static int rtc_open(struct inode *inode, struct file *file)
{
nonseekable_open(inode, file);
return 0;
}
static int rtc_release(struct inode *inode, struct file *file)
{
return 0;
}
/*
* The various file operations we support.
*/
static struct file_operations rtc_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = rtc_read,
.ioctl = rtc_ioctl,
.open = rtc_open,
.release = rtc_release,
};
static struct miscdevice rtc_dev = {
.minor = RTC_MINOR,
.name = "rtc",
.fops = &rtc_fops
};
static int __init rtc_init(void)
{
int retval;
retval = misc_register(&rtc_dev);
if(retval < 0)
return retval;
#ifdef CONFIG_PROC_FS
if (create_proc_read_entry("driver/rtc", 0, NULL, rtc_read_proc, NULL)
== NULL) {
misc_deregister(&rtc_dev);
return -ENOMEM;
}
#endif
printk(KERN_INFO "i/pSeries Real Time Clock Driver v" RTC_VERSION "\n");
return 0;
}
static void __exit rtc_exit (void)
{
remove_proc_entry ("driver/rtc", NULL);
misc_deregister(&rtc_dev);
}
module_init(rtc_init);
module_exit(rtc_exit);
/*
* Info exported via "/proc/driver/rtc".
*/
static int rtc_proc_output (char *buf)
{
char *p;
struct rtc_time tm;
p = buf;
ppc_md.get_rtc_time(&tm);
/*
* There is no way to tell if the luser has the RTC set for local
* time or for Universal Standard Time (GMT). Probably local though.
*/
p += sprintf(p,
"rtc_time\t: %02d:%02d:%02d\n"
"rtc_date\t: %04d-%02d-%02d\n"
"rtc_epoch\t: %04lu\n",
tm.tm_hour, tm.tm_min, tm.tm_sec,
tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, epoch);
p += sprintf(p,
"DST_enable\t: no\n"
"BCD\t\t: yes\n"
"24hr\t\t: yes\n" );
return p - buf;
}
static int rtc_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
int len = rtc_proc_output (page);
if (len <= off+count) *eof = 1;
*start = page + off;
len -= off;
if (len>count) len = count;
if (len<0) len = 0;
return len;
}
#ifdef CONFIG_PPC_RTAS
#define MAX_RTC_WAIT 5000 /* 5 sec */
#define RTAS_CLOCK_BUSY (-2)
unsigned long rtas_get_boot_time(void)
{
int ret[8];
int error, wait_time;
unsigned long max_wait_tb;
max_wait_tb = __get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);
if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
wait_time = rtas_extended_busy_delay_time(error);
/* This is boot time so we spin. */
udelay(wait_time*1000);
error = RTAS_CLOCK_BUSY;
}
} while (error == RTAS_CLOCK_BUSY && (__get_tb() < max_wait_tb));
if (error != 0 && printk_ratelimit()) {
printk(KERN_WARNING "error: reading the clock failed (%d)\n",
error);
return 0;
}
return mktime(ret[0], ret[1], ret[2], ret[3], ret[4], ret[5]);
}
/* NOTE: get_rtc_time will get an error if executed in interrupt context
* and if a delay is needed to read the clock. In this case we just
* silently return without updating rtc_tm.
*/
void rtas_get_rtc_time(struct rtc_time *rtc_tm)
{
int ret[8];
int error, wait_time;
unsigned long max_wait_tb;
max_wait_tb = __get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret);
if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
if (in_interrupt() && printk_ratelimit()) {
printk(KERN_WARNING "error: reading clock would delay interrupt\n");
return; /* delay not allowed */
}
wait_time = rtas_extended_busy_delay_time(error);
msleep_interruptible(wait_time);
error = RTAS_CLOCK_BUSY;
}
} while (error == RTAS_CLOCK_BUSY && (__get_tb() < max_wait_tb));
if (error != 0 && printk_ratelimit()) {
printk(KERN_WARNING "error: reading the clock failed (%d)\n",
error);
return;
}
rtc_tm->tm_sec = ret[5];
rtc_tm->tm_min = ret[4];
rtc_tm->tm_hour = ret[3];
rtc_tm->tm_mday = ret[2];
rtc_tm->tm_mon = ret[1] - 1;
rtc_tm->tm_year = ret[0] - 1900;
}
int rtas_set_rtc_time(struct rtc_time *tm)
{
int error, wait_time;
unsigned long max_wait_tb;
max_wait_tb = __get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT;
do {
error = rtas_call(rtas_token("set-time-of-day"), 7, 1, NULL,
tm->tm_year + 1900, tm->tm_mon + 1,
tm->tm_mday, tm->tm_hour, tm->tm_min,
tm->tm_sec, 0);
if (error == RTAS_CLOCK_BUSY || rtas_is_extended_busy(error)) {
if (in_interrupt())
return 1; /* probably decrementer */
wait_time = rtas_extended_busy_delay_time(error);
msleep_interruptible(wait_time);
error = RTAS_CLOCK_BUSY;
}
} while (error == RTAS_CLOCK_BUSY && (__get_tb() < max_wait_tb));
if (error != 0 && printk_ratelimit())
printk(KERN_WARNING "error: setting the clock failed (%d)\n",
error);
return 0;
}
#endif

View File

@ -1,136 +0,0 @@
/*
*
*
* PowerPC-specific semaphore code.
*
* Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
* to eliminate the SMP races in the old version between the updates
* of `count' and `waking'. Now we use negative `count' values to
* indicate that some process(es) are waiting for the semaphore.
*/
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/module.h>
#include <asm/atomic.h>
#include <asm/semaphore.h>
#include <asm/errno.h>
/*
* Atomically update sem->count.
* This does the equivalent of the following:
*
* old_count = sem->count;
* tmp = MAX(old_count, 0) + incr;
* sem->count = tmp;
* return old_count;
*/
static inline int __sem_update_count(struct semaphore *sem, int incr)
{
int old_count, tmp;
__asm__ __volatile__("\n"
"1: lwarx %0,0,%3\n"
" srawi %1,%0,31\n"
" andc %1,%0,%1\n"
" add %1,%1,%4\n"
" stwcx. %1,0,%3\n"
" bne 1b"
: "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
: "r" (&sem->count), "r" (incr), "m" (sem->count)
: "cc");
return old_count;
}
void __up(struct semaphore *sem)
{
/*
* Note that we incremented count in up() before we came here,
* but that was ineffective since the result was <= 0, and
* any negative value of count is equivalent to 0.
* This ends up setting count to 1, unless count is now > 0
* (i.e. because some other cpu has called up() in the meantime),
* in which case we just increment count.
*/
__sem_update_count(sem, 1);
wake_up(&sem->wait);
}
EXPORT_SYMBOL(__up);
/*
* Note that when we come in to __down or __down_interruptible,
* we have already decremented count, but that decrement was
* ineffective since the result was < 0, and any negative value
* of count is equivalent to 0.
* Thus it is only when we decrement count from some value > 0
* that we have actually got the semaphore.
*/
void __sched __down(struct semaphore *sem)
{
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
add_wait_queue_exclusive(&sem->wait, &wait);
/*
* Try to get the semaphore. If the count is > 0, then we've
* got the semaphore; we decrement count and exit the loop.
* If the count is 0 or negative, we set it to -1, indicating
* that we are asleep, and then sleep.
*/
while (__sem_update_count(sem, -1) <= 0) {
schedule();
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
}
remove_wait_queue(&sem->wait, &wait);
__set_task_state(tsk, TASK_RUNNING);
/*
* If there are any more sleepers, wake one of them up so
* that it can either get the semaphore, or set count to -1
* indicating that there are still processes sleeping.
*/
wake_up(&sem->wait);
}
EXPORT_SYMBOL(__down);
int __sched __down_interruptible(struct semaphore * sem)
{
int retval = 0;
struct task_struct *tsk = current;
DECLARE_WAITQUEUE(wait, tsk);
__set_task_state(tsk, TASK_INTERRUPTIBLE);
add_wait_queue_exclusive(&sem->wait, &wait);
while (__sem_update_count(sem, -1) <= 0) {
if (signal_pending(current)) {
/*
* A signal is pending - give up trying.
* Set sem->count to 0 if it is negative,
* since we are no longer sleeping.
*/
__sem_update_count(sem, 0);
retval = -EINTR;
break;
}
schedule();
set_task_state(tsk, TASK_INTERRUPTIBLE);
}
remove_wait_queue(&sem->wait, &wait);
__set_task_state(tsk, TASK_RUNNING);
wake_up(&sem->wait);
return retval;
}
EXPORT_SYMBOL(__down_interruptible);

View File

@ -1,625 +0,0 @@
/*
* linux/arch/ppc64/kernel/vdso.c
*
* Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/slab.h>
#include <linux/user.h>
#include <linux/elf.h>
#include <linux/security.h>
#include <linux/bootmem.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/sections.h>
#include <asm/systemcfg.h>
#include <asm/vdso.h>
#undef DEBUG
#ifdef DEBUG
#define DBG(fmt...) printk(fmt)
#else
#define DBG(fmt...)
#endif
/*
* The vDSOs themselves are here
*/
extern char vdso64_start, vdso64_end;
extern char vdso32_start, vdso32_end;
static void *vdso64_kbase = &vdso64_start;
static void *vdso32_kbase = &vdso32_start;
unsigned int vdso64_pages;
unsigned int vdso32_pages;
/* Signal trampolines user addresses */
unsigned long vdso64_rt_sigtramp;
unsigned long vdso32_sigtramp;
unsigned long vdso32_rt_sigtramp;
/* Format of the patch table */
struct vdso_patch_def
{
u32 pvr_mask, pvr_value;
const char *gen_name;
const char *fix_name;
};
/* Table of functions to patch based on the CPU type/revision
*
* TODO: Improve by adding whole lists for each entry
*/
static struct vdso_patch_def vdso_patches[] = {
{
0xffff0000, 0x003a0000, /* POWER5 */
"__kernel_sync_dicache", "__kernel_sync_dicache_p5"
},
{
0xffff0000, 0x003b0000, /* POWER5 */
"__kernel_sync_dicache", "__kernel_sync_dicache_p5"
},
};
/*
* Some infos carried around for each of them during parsing at
* boot time.
*/
struct lib32_elfinfo
{
Elf32_Ehdr *hdr; /* ptr to ELF */
Elf32_Sym *dynsym; /* ptr to .dynsym section */
unsigned long dynsymsize; /* size of .dynsym section */
char *dynstr; /* ptr to .dynstr section */
unsigned long text; /* offset of .text section in .so */
};
struct lib64_elfinfo
{
Elf64_Ehdr *hdr;
Elf64_Sym *dynsym;
unsigned long dynsymsize;
char *dynstr;
unsigned long text;
};
#ifdef __DEBUG
static void dump_one_vdso_page(struct page *pg, struct page *upg)
{
printk("kpg: %p (c:%d,f:%08lx)", __va(page_to_pfn(pg) << PAGE_SHIFT),
page_count(pg),
pg->flags);
if (upg/* && pg != upg*/) {
printk(" upg: %p (c:%d,f:%08lx)", __va(page_to_pfn(upg) << PAGE_SHIFT),
page_count(upg),
upg->flags);
}
printk("\n");
}
static void dump_vdso_pages(struct vm_area_struct * vma)
{
int i;
if (!vma || test_thread_flag(TIF_32BIT)) {
printk("vDSO32 @ %016lx:\n", (unsigned long)vdso32_kbase);
for (i=0; i<vdso32_pages; i++) {
struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
struct page *upg = (vma && vma->vm_mm) ?
follow_page(vma->vm_mm, vma->vm_start + i*PAGE_SIZE, 0)
: NULL;
dump_one_vdso_page(pg, upg);
}
}
if (!vma || !test_thread_flag(TIF_32BIT)) {
printk("vDSO64 @ %016lx:\n", (unsigned long)vdso64_kbase);
for (i=0; i<vdso64_pages; i++) {
struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
struct page *upg = (vma && vma->vm_mm) ?
follow_page(vma->vm_mm, vma->vm_start + i*PAGE_SIZE, 0)
: NULL;
dump_one_vdso_page(pg, upg);
}
}
}
#endif /* DEBUG */
/*
* Keep a dummy vma_close for now, it will prevent VMA merging.
*/
static void vdso_vma_close(struct vm_area_struct * vma)
{
}
/*
* Our nopage() function, maps in the actual vDSO kernel pages, they will
* be mapped read-only by do_no_page(), and eventually COW'ed, either
* right away for an initial write access, or by do_wp_page().
*/
static struct page * vdso_vma_nopage(struct vm_area_struct * vma,
unsigned long address, int *type)
{
unsigned long offset = address - vma->vm_start;
struct page *pg;
void *vbase = test_thread_flag(TIF_32BIT) ? vdso32_kbase : vdso64_kbase;
DBG("vdso_vma_nopage(current: %s, address: %016lx, off: %lx)\n",
current->comm, address, offset);
if (address < vma->vm_start || address > vma->vm_end)
return NOPAGE_SIGBUS;
/*
* Last page is systemcfg.
*/
if ((vma->vm_end - address) <= PAGE_SIZE)
pg = virt_to_page(_systemcfg);
else
pg = virt_to_page(vbase + offset);
get_page(pg);
DBG(" ->page count: %d\n", page_count(pg));
return pg;
}
static struct vm_operations_struct vdso_vmops = {
.close = vdso_vma_close,
.nopage = vdso_vma_nopage,
};
/*
* This is called from binfmt_elf, we create the special vma for the
* vDSO and insert it into the mm struct tree
*/
int arch_setup_additional_pages(struct linux_binprm *bprm, int executable_stack)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long vdso_pages;
unsigned long vdso_base;
if (test_thread_flag(TIF_32BIT)) {
vdso_pages = vdso32_pages;
vdso_base = VDSO32_MBASE;
} else {
vdso_pages = vdso64_pages;
vdso_base = VDSO64_MBASE;
}
current->thread.vdso_base = 0;
/* vDSO has a problem and was disabled, just don't "enable" it for the
* process
*/
if (vdso_pages == 0)
return 0;
vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
if (vma == NULL)
return -ENOMEM;
memset(vma, 0, sizeof(*vma));
/*
* pick a base address for the vDSO in process space. We try to put it
* at vdso_base which is the "natural" base for it, but we might fail
* and end up putting it elsewhere.
*/
vdso_base = get_unmapped_area(NULL, vdso_base,
vdso_pages << PAGE_SHIFT, 0, 0);
if (vdso_base & ~PAGE_MASK) {
kmem_cache_free(vm_area_cachep, vma);
return (int)vdso_base;
}
current->thread.vdso_base = vdso_base;
vma->vm_mm = mm;
vma->vm_start = current->thread.vdso_base;
/*
* the VMA size is one page more than the vDSO since systemcfg
* is mapped in the last one
*/
vma->vm_end = vma->vm_start + ((vdso_pages + 1) << PAGE_SHIFT);
/*
* our vma flags don't have VM_WRITE so by default, the process isn't allowed
* to write those pages.
* gdb can break that with ptrace interface, and thus trigger COW on those
* pages but it's then your responsibility to never do that on the "data" page
* of the vDSO or you'll stop getting kernel updates and your nice userland
* gettimeofday will be totally dead. It's fine to use that for setting
* breakpoints in the vDSO code pages though
*/
vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | VM_RESERVED;
vma->vm_flags |= mm->def_flags;
vma->vm_page_prot = protection_map[vma->vm_flags & 0x7];
vma->vm_ops = &vdso_vmops;
down_write(&mm->mmap_sem);
if (insert_vm_struct(mm, vma)) {
up_write(&mm->mmap_sem);
kmem_cache_free(vm_area_cachep, vma);
return -ENOMEM;
}
mm->total_vm += (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
up_write(&mm->mmap_sem);
return 0;
}
static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname,
unsigned long *size)
{
Elf32_Shdr *sechdrs;
unsigned int i;
char *secnames;
/* Grab section headers and strings so we can tell who is who */
sechdrs = (void *)ehdr + ehdr->e_shoff;
secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset;
/* Find the section they want */
for (i = 1; i < ehdr->e_shnum; i++) {
if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) {
if (size)
*size = sechdrs[i].sh_size;
return (void *)ehdr + sechdrs[i].sh_offset;
}
}
*size = 0;
return NULL;
}
static void * __init find_section64(Elf64_Ehdr *ehdr, const char *secname,
unsigned long *size)
{
Elf64_Shdr *sechdrs;
unsigned int i;
char *secnames;
/* Grab section headers and strings so we can tell who is who */
sechdrs = (void *)ehdr + ehdr->e_shoff;
secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset;
/* Find the section they want */
for (i = 1; i < ehdr->e_shnum; i++) {
if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) {
if (size)
*size = sechdrs[i].sh_size;
return (void *)ehdr + sechdrs[i].sh_offset;
}
}
if (size)
*size = 0;
return NULL;
}
static Elf32_Sym * __init find_symbol32(struct lib32_elfinfo *lib, const char *symname)
{
unsigned int i;
char name[32], *c;
for (i = 0; i < (lib->dynsymsize / sizeof(Elf32_Sym)); i++) {
if (lib->dynsym[i].st_name == 0)
continue;
strlcpy(name, lib->dynstr + lib->dynsym[i].st_name, 32);
c = strchr(name, '@');
if (c)
*c = 0;
if (strcmp(symname, name) == 0)
return &lib->dynsym[i];
}
return NULL;
}
static Elf64_Sym * __init find_symbol64(struct lib64_elfinfo *lib, const char *symname)
{
unsigned int i;
char name[32], *c;
for (i = 0; i < (lib->dynsymsize / sizeof(Elf64_Sym)); i++) {
if (lib->dynsym[i].st_name == 0)
continue;
strlcpy(name, lib->dynstr + lib->dynsym[i].st_name, 32);
c = strchr(name, '@');
if (c)
*c = 0;
if (strcmp(symname, name) == 0)
return &lib->dynsym[i];
}
return NULL;
}
/* Note that we assume the section is .text and the symbol is relative to
* the library base
*/
static unsigned long __init find_function32(struct lib32_elfinfo *lib, const char *symname)
{
Elf32_Sym *sym = find_symbol32(lib, symname);
if (sym == NULL) {
printk(KERN_WARNING "vDSO32: function %s not found !\n", symname);
return 0;
}
return sym->st_value - VDSO32_LBASE;
}
/* Note that we assume the section is .text and the symbol is relative to
* the library base
*/
static unsigned long __init find_function64(struct lib64_elfinfo *lib, const char *symname)
{
Elf64_Sym *sym = find_symbol64(lib, symname);
if (sym == NULL) {
printk(KERN_WARNING "vDSO64: function %s not found !\n", symname);
return 0;
}
#ifdef VDS64_HAS_DESCRIPTORS
return *((u64 *)(vdso64_kbase + sym->st_value - VDSO64_LBASE)) - VDSO64_LBASE;
#else
return sym->st_value - VDSO64_LBASE;
#endif
}
static __init int vdso_do_find_sections(struct lib32_elfinfo *v32,
struct lib64_elfinfo *v64)
{
void *sect;
/*
* Locate symbol tables & text section
*/
v32->dynsym = find_section32(v32->hdr, ".dynsym", &v32->dynsymsize);
v32->dynstr = find_section32(v32->hdr, ".dynstr", NULL);
if (v32->dynsym == NULL || v32->dynstr == NULL) {
printk(KERN_ERR "vDSO32: a required symbol section was not found\n");
return -1;
}
sect = find_section32(v32->hdr, ".text", NULL);
if (sect == NULL) {
printk(KERN_ERR "vDSO32: the .text section was not found\n");
return -1;
}
v32->text = sect - vdso32_kbase;
v64->dynsym = find_section64(v64->hdr, ".dynsym", &v64->dynsymsize);
v64->dynstr = find_section64(v64->hdr, ".dynstr", NULL);
if (v64->dynsym == NULL || v64->dynstr == NULL) {
printk(KERN_ERR "vDSO64: a required symbol section was not found\n");
return -1;
}
sect = find_section64(v64->hdr, ".text", NULL);
if (sect == NULL) {
printk(KERN_ERR "vDSO64: the .text section was not found\n");
return -1;
}
v64->text = sect - vdso64_kbase;
return 0;
}
static __init void vdso_setup_trampolines(struct lib32_elfinfo *v32,
struct lib64_elfinfo *v64)
{
/*
* Find signal trampolines
*/
vdso64_rt_sigtramp = find_function64(v64, "__kernel_sigtramp_rt64");
vdso32_sigtramp = find_function32(v32, "__kernel_sigtramp32");
vdso32_rt_sigtramp = find_function32(v32, "__kernel_sigtramp_rt32");
}
static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32,
struct lib64_elfinfo *v64)
{
Elf32_Sym *sym32;
Elf64_Sym *sym64;
sym32 = find_symbol32(v32, "__kernel_datapage_offset");
if (sym32 == NULL) {
printk(KERN_ERR "vDSO32: Can't find symbol __kernel_datapage_offset !\n");
return -1;
}
*((int *)(vdso32_kbase + (sym32->st_value - VDSO32_LBASE))) =
(vdso32_pages << PAGE_SHIFT) - (sym32->st_value - VDSO32_LBASE);
sym64 = find_symbol64(v64, "__kernel_datapage_offset");
if (sym64 == NULL) {
printk(KERN_ERR "vDSO64: Can't find symbol __kernel_datapage_offset !\n");
return -1;
}
*((int *)(vdso64_kbase + sym64->st_value - VDSO64_LBASE)) =
(vdso64_pages << PAGE_SHIFT) - (sym64->st_value - VDSO64_LBASE);
return 0;
}
static int vdso_do_func_patch32(struct lib32_elfinfo *v32,
struct lib64_elfinfo *v64,
const char *orig, const char *fix)
{
Elf32_Sym *sym32_gen, *sym32_fix;
sym32_gen = find_symbol32(v32, orig);
if (sym32_gen == NULL) {
printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", orig);
return -1;
}
sym32_fix = find_symbol32(v32, fix);
if (sym32_fix == NULL) {
printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", fix);
return -1;
}
sym32_gen->st_value = sym32_fix->st_value;
sym32_gen->st_size = sym32_fix->st_size;
sym32_gen->st_info = sym32_fix->st_info;
sym32_gen->st_other = sym32_fix->st_other;
sym32_gen->st_shndx = sym32_fix->st_shndx;
return 0;
}
static int vdso_do_func_patch64(struct lib32_elfinfo *v32,
struct lib64_elfinfo *v64,
const char *orig, const char *fix)
{
Elf64_Sym *sym64_gen, *sym64_fix;
sym64_gen = find_symbol64(v64, orig);
if (sym64_gen == NULL) {
printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", orig);
return -1;
}
sym64_fix = find_symbol64(v64, fix);
if (sym64_fix == NULL) {
printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", fix);
return -1;
}
sym64_gen->st_value = sym64_fix->st_value;
sym64_gen->st_size = sym64_fix->st_size;
sym64_gen->st_info = sym64_fix->st_info;
sym64_gen->st_other = sym64_fix->st_other;
sym64_gen->st_shndx = sym64_fix->st_shndx;
return 0;
}
static __init int vdso_fixup_alt_funcs(struct lib32_elfinfo *v32,
struct lib64_elfinfo *v64)
{
u32 pvr;
int i;
pvr = mfspr(SPRN_PVR);
for (i = 0; i < ARRAY_SIZE(vdso_patches); i++) {
struct vdso_patch_def *patch = &vdso_patches[i];
int match = (pvr & patch->pvr_mask) == patch->pvr_value;
DBG("patch %d (mask: %x, pvr: %x) : %s\n",
i, patch->pvr_mask, patch->pvr_value, match ? "match" : "skip");
if (!match)
continue;
DBG("replacing %s with %s...\n", patch->gen_name, patch->fix_name);
/*
* Patch the 32 bits and 64 bits symbols. Note that we do not patch
* the "." symbol on 64 bits. It would be easy to do, but doesn't
* seem to be necessary, patching the OPD symbol is enough.
*/
vdso_do_func_patch32(v32, v64, patch->gen_name, patch->fix_name);
vdso_do_func_patch64(v32, v64, patch->gen_name, patch->fix_name);
}
return 0;
}
static __init int vdso_setup(void)
{
struct lib32_elfinfo v32;
struct lib64_elfinfo v64;
v32.hdr = vdso32_kbase;
v64.hdr = vdso64_kbase;
if (vdso_do_find_sections(&v32, &v64))
return -1;
if (vdso_fixup_datapage(&v32, &v64))
return -1;
if (vdso_fixup_alt_funcs(&v32, &v64))
return -1;
vdso_setup_trampolines(&v32, &v64);
return 0;
}
void __init vdso_init(void)
{
int i;
vdso64_pages = (&vdso64_end - &vdso64_start) >> PAGE_SHIFT;
vdso32_pages = (&vdso32_end - &vdso32_start) >> PAGE_SHIFT;
DBG("vdso64_kbase: %p, 0x%x pages, vdso32_kbase: %p, 0x%x pages\n",
vdso64_kbase, vdso64_pages, vdso32_kbase, vdso32_pages);
/*
* Initialize the vDSO images in memory, that is do necessary
* fixups of vDSO symbols, locate trampolines, etc...
*/
if (vdso_setup()) {
printk(KERN_ERR "vDSO setup failure, not enabled !\n");
/* XXX should free pages here ? */
vdso64_pages = vdso32_pages = 0;
return;
}
/* Make sure pages are in the correct state */
for (i = 0; i < vdso64_pages; i++) {
struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
ClearPageReserved(pg);
get_page(pg);
}
for (i = 0; i < vdso32_pages; i++) {
struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
ClearPageReserved(pg);
get_page(pg);
}
get_page(virt_to_page(_systemcfg));
}
int in_gate_area_no_task(unsigned long addr)
{
return 0;
}
int in_gate_area(struct task_struct *task, unsigned long addr)
{
return 0;
}
struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
{
return NULL;
}

View File

@ -1,151 +0,0 @@
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
OUTPUT_ARCH(powerpc:common64)
jiffies = jiffies_64;
SECTIONS
{
/* Sections to be discarded. */
/DISCARD/ : {
*(.exitcall.exit)
}
/* Read-only sections, merged into text segment: */
.text : {
*(.text .text.*)
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.fixup)
. = ALIGN(PAGE_SIZE);
_etext = .;
}
__ex_table : {
__start___ex_table = .;
*(__ex_table)
__stop___ex_table = .;
}
__bug_table : {
__start___bug_table = .;
*(__bug_table)
__stop___bug_table = .;
}
__ftr_fixup : {
__start___ftr_fixup = .;
*(__ftr_fixup)
__stop___ftr_fixup = .;
}
RODATA
/* will be freed after init */
. = ALIGN(PAGE_SIZE);
__init_begin = .;
.init.text : {
_sinittext = .;
*(.init.text)
_einittext = .;
}
.init.data : {
*(.init.data)
}
. = ALIGN(16);
.init.setup : {
__setup_start = .;
*(.init.setup)
__setup_end = .;
}
.initcall.init : {
__initcall_start = .;
*(.initcall1.init)
*(.initcall2.init)
*(.initcall3.init)
*(.initcall4.init)
*(.initcall5.init)
*(.initcall6.init)
*(.initcall7.init)
__initcall_end = .;
}
.con_initcall.init : {
__con_initcall_start = .;
*(.con_initcall.init)
__con_initcall_end = .;
}
SECURITY_INIT
. = ALIGN(PAGE_SIZE);
.init.ramfs : {
__initramfs_start = .;
*(.init.ramfs)
__initramfs_end = .;
}
.data.percpu : {
__per_cpu_start = .;
*(.data.percpu)
__per_cpu_end = .;
}
. = ALIGN(PAGE_SIZE);
. = ALIGN(16384);
__init_end = .;
/* freed after init ends here */
/* Read/write sections */
. = ALIGN(PAGE_SIZE);
. = ALIGN(16384);
_sdata = .;
/* The initial task and kernel stack */
.data.init_task : {
*(.data.init_task)
}
. = ALIGN(PAGE_SIZE);
.data.page_aligned : {
*(.data.page_aligned)
}
.data.cacheline_aligned : {
*(.data.cacheline_aligned)
}
.data : {
*(.data .data.rel* .toc1)
*(.branch_lt)
}
.opd : {
*(.opd)
}
.got : {
__toc_start = .;
*(.got)
*(.toc)
. = ALIGN(PAGE_SIZE);
_edata = .;
}
. = ALIGN(PAGE_SIZE);
.bss : {
__bss_start = .;
*(.bss)
__bss_stop = .;
}
. = ALIGN(PAGE_SIZE);
_end = . ;
}

View File

@ -1,64 +0,0 @@
/*
* Copyright (C) 1996 Paul Mackerras.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define GETREG(reg) \
static inline unsigned long get_ ## reg (void) \
{ unsigned long ret; asm volatile ("mf" #reg " %0" : "=r" (ret) :); return ret; }
#define SETREG(reg) \
static inline void set_ ## reg (unsigned long val) \
{ asm volatile ("mt" #reg " %0" : : "r" (val)); }
GETREG(msr)
SETREG(msrd)
GETREG(cr)
#define GSETSPR(n, name) \
static inline long get_ ## name (void) \
{ long ret; asm volatile ("mfspr %0," #n : "=r" (ret) : ); return ret; } \
static inline void set_ ## name (long val) \
{ asm volatile ("mtspr " #n ",%0" : : "r" (val)); }
GSETSPR(0, mq)
GSETSPR(1, xer)
GSETSPR(4, rtcu)
GSETSPR(5, rtcl)
GSETSPR(8, lr)
GSETSPR(9, ctr)
GSETSPR(18, dsisr)
GSETSPR(19, dar)
GSETSPR(22, dec)
GSETSPR(25, sdr1)
GSETSPR(26, srr0)
GSETSPR(27, srr1)
GSETSPR(272, sprg0)
GSETSPR(273, sprg1)
GSETSPR(274, sprg2)
GSETSPR(275, sprg3)
GSETSPR(282, ear)
GSETSPR(287, pvr)
GSETSPR(1008, hid0)
GSETSPR(1009, hid1)
GSETSPR(1010, iabr)
GSETSPR(1023, pir)
static inline void store_inst(void *p)
{
asm volatile ("dcbst 0,%0; sync; icbi 0,%0; isync" : : "r" (p));
}
static inline void cflush(void *p)
{
asm volatile ("dcbf 0,%0; icbi 0,%0" : : "r" (p));
}
static inline void cinval(void *p)
{
asm volatile ("dcbi 0,%0; icbi 0,%0" : : "r" (p));
}

View File

@ -23,6 +23,14 @@ config GENERIC_CALIBRATE_DELAY
bool
default y
config GENERIC_HARDIRQS
bool
default y
config GENERIC_IRQ_PROBE
bool
default y
# Turn off some random 386 crap that can affect device config
config ISA
bool

View File

@ -1,8 +1,8 @@
/*
* arch/v850/kernel/irq.c -- High-level interrupt handling
*
* Copyright (C) 2001,02,03,04 NEC Electronics Corporation
* Copyright (C) 2001,02,03,04 Miles Bader <miles@gnu.org>
* Copyright (C) 2001,02,03,04,05 NEC Electronics Corporation
* Copyright (C) 2001,02,03,04,05 Miles Bader <miles@gnu.org>
* Copyright (C) 1994-2000 Ralf Baechle
* Copyright (C) 1992 Linus Torvalds
*
@ -27,55 +27,15 @@
#include <asm/system.h>
/*
* Controller mappings for all interrupt sources:
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves, it doesn't deserve
* a generic callback i think.
*/
irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
[0 ... NR_IRQS-1] = {
.handler = &no_irq_type,
.lock = SPIN_LOCK_UNLOCKED
}
};
/*
* Special irq handlers.
*/
irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
void ack_bad_irq(unsigned int irq)
{
return IRQ_NONE;
}
/*
* Generic no controller code
*/
static void enable_none(unsigned int irq) { }
static unsigned int startup_none(unsigned int irq) { return 0; }
static void disable_none(unsigned int irq) { }
static void ack_none(unsigned int irq)
{
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves, it doesn't deserve
* a generic callback i think.
*/
printk("received IRQ %d with unknown interrupt type\n", irq);
}
/* startup is the same as "enable", shutdown is same as "disable" */
#define shutdown_none disable_none
#define end_none enable_none
struct hw_interrupt_type no_irq_type = {
.typename = "none",
.startup = startup_none,
.shutdown = shutdown_none,
.enable = enable_none,
.disable = disable_none,
.ack = ack_none,
.end = end_none
};
volatile unsigned long irq_err_count, spurious_count;
/*
@ -84,643 +44,68 @@ volatile unsigned long irq_err_count, spurious_count;
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v;
struct irqaction * action;
unsigned long flags;
int irq = *(loff_t *) v;
if (i == 0) {
if (irq == 0) {
int cpu;
seq_puts(p, " ");
for (i=0; i < 1 /*smp_num_cpus*/; i++)
seq_printf(p, "CPU%d ", i);
for (cpu=0; cpu < 1 /*smp_num_cpus*/; cpu++)
seq_printf(p, "CPU%d ", cpu);
seq_putc(p, '\n');
}
if (i < NR_IRQS) {
int j, count, num;
const char *type_name = irq_desc[i].handler->typename;
spin_lock_irqsave(&irq_desc[j].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
if (irq < NR_IRQS) {
unsigned long flags;
struct irqaction *action;
count = 0;
num = -1;
for (j = 0; j < NR_IRQS; j++)
if (irq_desc[j].handler->typename == type_name) {
if (i == j)
num = count;
count++;
}
spin_lock_irqsave(&irq_desc[irq].lock, flags);
seq_printf(p, "%3d: ",i);
seq_printf(p, "%10u ", kstat_irqs(i));
if (count > 1) {
int prec = (num >= 100 ? 3 : num >= 10 ? 2 : 1);
seq_printf(p, " %*s%d", 14 - prec, type_name, num);
} else
seq_printf(p, " %14s", type_name);
action = irq_desc[irq].action;
if (action) {
int j;
int count = 0;
int num = -1;
const char *type_name = irq_desc[irq].handler->typename;
for (j = 0; j < NR_IRQS; j++)
if (irq_desc[j].handler->typename == type_name){
if (irq == j)
num = count;
count++;
}
seq_printf(p, "%3d: ",irq);
seq_printf(p, "%10u ", kstat_irqs(irq));
if (count > 1) {
int prec = (num >= 100 ? 3 : num >= 10 ? 2 : 1);
seq_printf(p, " %*s%d", 14 - prec,
type_name, num);
} else
seq_printf(p, " %14s", type_name);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
seq_printf(p, ", %s", action->name);
seq_putc(p, '\n');
skip:
spin_unlock_irqrestore(&irq_desc[j].lock, flags);
} else if (i == NR_IRQS)
seq_printf(p, "ERR: %10lu\n", irq_err_count);
return 0;
}
/*
* This should really return information about whether
* we should do bottom half handling etc. Right now we
* end up _always_ checking the bottom half, which is a
* waste of time and is not what some drivers would
* prefer.
*/
int handle_IRQ_event(unsigned int irq, struct pt_regs * regs, struct irqaction * action)
{
int status = 1; /* Force the "do bottom halves" bit */
int ret;
if (!(action->flags & SA_INTERRUPT))
local_irq_enable();
do {
ret = action->handler(irq, action->dev_id, regs);
if (ret == IRQ_HANDLED)
status |= action->flags;
action = action->next;
} while (action);
if (status & SA_SAMPLE_RANDOM)
add_interrupt_randomness(irq);
local_irq_disable();
return status;
}
/*
* Generic enable/disable code: this just calls
* down into the PIC-specific version for the actual
* hardware disable after having gotten the irq
* controller lock.
*/
/**
* disable_irq_nosync - disable an irq without waiting
* @irq: Interrupt to disable
*
* Disable the selected interrupt line. Disables of an interrupt
* stack. Unlike disable_irq(), this function does not ensure existing
* instances of the IRQ handler have completed before returning.
*
* This function may be called from IRQ context.
*/
void inline disable_irq_nosync(unsigned int irq)
{
irq_desc_t *desc = irq_desc + irq;
unsigned long flags;
spin_lock_irqsave(&desc->lock, flags);
if (!desc->depth++) {
desc->status |= IRQ_DISABLED;
desc->handler->disable(irq);
}
spin_unlock_irqrestore(&desc->lock, flags);
}
/**
* disable_irq - disable an irq and wait for completion
* @irq: Interrupt to disable
*
* Disable the selected interrupt line. Disables of an interrupt
* stack. That is for two disables you need two enables. This
* function waits for any pending IRQ handlers for this interrupt
* to complete before returning. If you use this function while
* holding a resource the IRQ handler may need you will deadlock.
*
* This function may be called - with care - from IRQ context.
*/
void disable_irq(unsigned int irq)
{
disable_irq_nosync(irq);
synchronize_irq(irq);
}
/**
* enable_irq - enable interrupt handling on an irq
* @irq: Interrupt to enable
*
* Re-enables the processing of interrupts on this IRQ line
* providing no disable_irq calls are now in effect.
*
* This function may be called from IRQ context.
*/
void enable_irq(unsigned int irq)
{
irq_desc_t *desc = irq_desc + irq;
unsigned long flags;
spin_lock_irqsave(&desc->lock, flags);
switch (desc->depth) {
case 1: {
unsigned int status = desc->status & ~IRQ_DISABLED;
desc->status = status;
if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
desc->status = status | IRQ_REPLAY;
hw_resend_irq(desc->handler,irq);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
seq_printf(p, ", %s", action->name);
seq_putc(p, '\n');
}
desc->handler->enable(irq);
/* fall-through */
}
default:
desc->depth--;
break;
case 0:
printk("enable_irq(%u) unbalanced from %p\n", irq,
__builtin_return_address(0));
}
spin_unlock_irqrestore(&desc->lock, flags);
spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
} else if (irq == NR_IRQS)
seq_printf(p, "ERR: %10lu\n", irq_err_count);
return 0;
}
/* Handle interrupt IRQ. REGS are the registers at the time of ther
interrupt. */
unsigned int handle_irq (int irq, struct pt_regs *regs)
{
/*
* We ack quickly, we don't want the irq controller
* thinking we're snobs just because some other CPU has
* disabled global interrupts (we have already done the
* INT_ACK cycles, it's too late to try to pretend to the
* controller that we aren't taking the interrupt).
*
* 0 return value means that this irq is already being
* handled by some other CPU. (or is disabled)
*/
int cpu = smp_processor_id();
irq_desc_t *desc = irq_desc + irq;
struct irqaction * action;
unsigned int status;
irq_enter();
kstat_cpu(cpu).irqs[irq]++;
spin_lock(&desc->lock);
desc->handler->ack(irq);
/*
REPLAY is when Linux resends an IRQ that was dropped earlier
WAITING is used by probe to mark irqs that are being tested
*/
status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
status |= IRQ_PENDING; /* we _want_ to handle it */
/*
* If the IRQ is disabled for whatever reason, we cannot
* use the action we have.
*/
action = NULL;
if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
action = desc->action;
status &= ~IRQ_PENDING; /* we commit to handling */
status |= IRQ_INPROGRESS; /* we are handling it */
}
desc->status = status;
/*
* If there is no IRQ handler or it was disabled, exit early.
Since we set PENDING, if another processor is handling
a different instance of this same irq, the other processor
will take care of it.
*/
if (unlikely(!action))
goto out;
/*
* Edge triggered interrupts need to remember
* pending events.
* This applies to any hw interrupts that allow a second
* instance of the same irq to arrive while we are in handle_irq
* or in the handler. But the code here only handles the _second_
* instance of the irq, not the third or fourth. So it is mostly
* useful for irq hardware that does not mask cleanly in an
* SMP environment.
*/
for (;;) {
spin_unlock(&desc->lock);
handle_IRQ_event(irq, regs, action);
spin_lock(&desc->lock);
if (likely(!(desc->status & IRQ_PENDING)))
break;
desc->status &= ~IRQ_PENDING;
}
desc->status &= ~IRQ_INPROGRESS;
out:
/*
* The ->end() handler has to deal with interrupts which got
* disabled while the handler was running.
*/
desc->handler->end(irq);
spin_unlock(&desc->lock);
__do_IRQ(irq, regs);
irq_exit();
return 1;
}
/**
* request_irq - allocate an interrupt line
* @irq: Interrupt line to allocate
* @handler: Function to be called when the IRQ occurs
* @irqflags: Interrupt type flags
* @devname: An ascii name for the claiming device
* @dev_id: A cookie passed back to the handler function
*
* This call allocates interrupt resources and enables the
* interrupt line and IRQ handling. From the point this
* call is made your handler function may be invoked. Since
* your handler function must clear any interrupt the board
* raises, you must take care both to initialise your hardware
* and to set up the interrupt handler in the right order.
*
* Dev_id must be globally unique. Normally the address of the
* device data structure is used as the cookie. Since the handler
* receives this value it makes sense to use it.
*
* If your interrupt is shared you must pass a non NULL dev_id
* as this is required when freeing the interrupt.
*
* Flags:
*
* SA_SHIRQ Interrupt is shared
*
* SA_INTERRUPT Disable local interrupts while processing
*
* SA_SAMPLE_RANDOM The interrupt can be used for entropy
*
*/
int request_irq(unsigned int irq,
irqreturn_t (*handler)(int, void *, struct pt_regs *),
unsigned long irqflags,
const char * devname,
void *dev_id)
{
int retval;
struct irqaction * action;
#if 1
/*
* Sanity-check: shared interrupts should REALLY pass in
* a real dev-ID, otherwise we'll have trouble later trying
* to figure out which interrupt is which (messes up the
* interrupt freeing logic etc).
*/
if (irqflags & SA_SHIRQ) {
if (!dev_id)
printk("Bad boy: %s (at 0x%x) called us without a dev_id!\n", devname, (&irq)[-1]);
}
#endif
if (irq >= NR_IRQS)
return -EINVAL;
if (!handler)
return -EINVAL;
action = (struct irqaction *)
kmalloc(sizeof(struct irqaction), GFP_KERNEL);
if (!action)
return -ENOMEM;
action->handler = handler;
action->flags = irqflags;
cpus_clear(action->mask);
action->name = devname;
action->next = NULL;
action->dev_id = dev_id;
retval = setup_irq(irq, action);
if (retval)
kfree(action);
return retval;
}
EXPORT_SYMBOL(request_irq);
/**
* free_irq - free an interrupt
* @irq: Interrupt line to free
* @dev_id: Device identity to free
*
* Remove an interrupt handler. The handler is removed and if the
* interrupt line is no longer in use by any driver it is disabled.
* On a shared IRQ the caller must ensure the interrupt is disabled
* on the card it drives before calling this function. The function
* does not return until any executing interrupts for this IRQ
* have completed.
*
* This function may be called from interrupt context.
*
* Bugs: Attempting to free an irq in a handler for the same irq hangs
* the machine.
*/
void free_irq(unsigned int irq, void *dev_id)
{
irq_desc_t *desc;
struct irqaction **p;
unsigned long flags;
if (irq >= NR_IRQS)
return;
desc = irq_desc + irq;
spin_lock_irqsave(&desc->lock,flags);
p = &desc->action;
for (;;) {
struct irqaction * action = *p;
if (action) {
struct irqaction **pp = p;
p = &action->next;
if (action->dev_id != dev_id)
continue;
/* Found it - now remove it from the list of entries */
*pp = action->next;
if (!desc->action) {
desc->status |= IRQ_DISABLED;
desc->handler->shutdown(irq);
}
spin_unlock_irqrestore(&desc->lock,flags);
synchronize_irq(irq);
kfree(action);
return;
}
printk("Trying to free free IRQ%d\n",irq);
spin_unlock_irqrestore(&desc->lock,flags);
return;
}
}
EXPORT_SYMBOL(free_irq);
/*
* IRQ autodetection code..
*
* This depends on the fact that any interrupt that
* comes in on to an unassigned handler will get stuck
* with "IRQ_WAITING" cleared and the interrupt
* disabled.
*/
static DECLARE_MUTEX(probe_sem);
/**
* probe_irq_on - begin an interrupt autodetect
*
* Commence probing for an interrupt. The interrupts are scanned
* and a mask of potential interrupt lines is returned.
*
*/
unsigned long probe_irq_on(void)
{
unsigned int i;
irq_desc_t *desc;
unsigned long val;
unsigned long delay;
down(&probe_sem);
/*
* something may have generated an irq long ago and we want to
* flush such a longstanding irq before considering it as spurious.
*/
for (i = NR_IRQS-1; i > 0; i--) {
desc = irq_desc + i;
spin_lock_irq(&desc->lock);
if (!irq_desc[i].action)
irq_desc[i].handler->startup(i);
spin_unlock_irq(&desc->lock);
}
/* Wait for longstanding interrupts to trigger. */
for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
/* about 20ms delay */ barrier();
/*
* enable any unassigned irqs
* (we must startup again here because if a longstanding irq
* happened in the previous stage, it may have masked itself)
*/
for (i = NR_IRQS-1; i > 0; i--) {
desc = irq_desc + i;
spin_lock_irq(&desc->lock);
if (!desc->action) {
desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
if (desc->handler->startup(i))
desc->status |= IRQ_PENDING;
}
spin_unlock_irq(&desc->lock);
}
/*
* Wait for spurious interrupts to trigger
*/
for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
/* about 100ms delay */ barrier();
/*
* Now filter out any obviously spurious interrupts
*/
val = 0;
for (i = 0; i < NR_IRQS; i++) {
irq_desc_t *desc = irq_desc + i;
unsigned int status;
spin_lock_irq(&desc->lock);
status = desc->status;
if (status & IRQ_AUTODETECT) {
/* It triggered already - consider it spurious. */
if (!(status & IRQ_WAITING)) {
desc->status = status & ~IRQ_AUTODETECT;
desc->handler->shutdown(i);
} else
if (i < 32)
val |= 1 << i;
}
spin_unlock_irq(&desc->lock);
}
return val;
}
EXPORT_SYMBOL(probe_irq_on);
/*
* Return a mask of triggered interrupts (this
* can handle only legacy ISA interrupts).
*/
/**
* probe_irq_mask - scan a bitmap of interrupt lines
* @val: mask of interrupts to consider
*
* Scan the ISA bus interrupt lines and return a bitmap of
* active interrupts. The interrupt probe logic state is then
* returned to its previous value.
*
* Note: we need to scan all the irq's even though we will
* only return ISA irq numbers - just so that we reset them
* all to a known state.
*/
unsigned int probe_irq_mask(unsigned long val)
{
int i;
unsigned int mask;
mask = 0;
for (i = 0; i < NR_IRQS; i++) {
irq_desc_t *desc = irq_desc + i;
unsigned int status;
spin_lock_irq(&desc->lock);
status = desc->status;
if (status & IRQ_AUTODETECT) {
if (i < 16 && !(status & IRQ_WAITING))
mask |= 1 << i;
desc->status = status & ~IRQ_AUTODETECT;
desc->handler->shutdown(i);
}
spin_unlock_irq(&desc->lock);
}
up(&probe_sem);
return mask & val;
}
/*
* Return the one interrupt that triggered (this can
* handle any interrupt source).
*/
/**
* probe_irq_off - end an interrupt autodetect
* @val: mask of potential interrupts (unused)
*
* Scans the unused interrupt lines and returns the line which
* appears to have triggered the interrupt. If no interrupt was
* found then zero is returned. If more than one interrupt is
* found then minus the first candidate is returned to indicate
* their is doubt.
*
* The interrupt probe logic state is returned to its previous
* value.
*
* BUGS: When used in a module (which arguably shouldnt happen)
* nothing prevents two IRQ probe callers from overlapping. The
* results of this are non-optimal.
*/
int probe_irq_off(unsigned long val)
{
int i, irq_found, nr_irqs;
nr_irqs = 0;
irq_found = 0;
for (i = 0; i < NR_IRQS; i++) {
irq_desc_t *desc = irq_desc + i;
unsigned int status;
spin_lock_irq(&desc->lock);
status = desc->status;
if (status & IRQ_AUTODETECT) {
if (!(status & IRQ_WAITING)) {
if (!nr_irqs)
irq_found = i;
nr_irqs++;
}
desc->status = status & ~IRQ_AUTODETECT;
desc->handler->shutdown(i);
}
spin_unlock_irq(&desc->lock);
}
up(&probe_sem);
if (nr_irqs > 1)
irq_found = -irq_found;
return irq_found;
}
EXPORT_SYMBOL(probe_irq_off);
/* this was setup_x86_irq but it seems pretty generic */
int setup_irq(unsigned int irq, struct irqaction * new)
{
int shared = 0;
unsigned long flags;
struct irqaction *old, **p;
irq_desc_t *desc = irq_desc + irq;
/*
* Some drivers like serial.c use request_irq() heavily,
* so we have to be careful not to interfere with a
* running system.
*/
if (new->flags & SA_SAMPLE_RANDOM) {
/*
* This function might sleep, we want to call it first,
* outside of the atomic block.
* Yes, this might clear the entropy pool if the wrong
* driver is attempted to be loaded, without actually
* installing a new handler, but is this really a problem,
* only the sysadmin is able to do this.
*/
rand_initialize_irq(irq);
}
/*
* The following block of code has to be executed atomically
*/
spin_lock_irqsave(&desc->lock,flags);
p = &desc->action;
if ((old = *p) != NULL) {
/* Can't share interrupts unless both agree to */
if (!(old->flags & new->flags & SA_SHIRQ)) {
spin_unlock_irqrestore(&desc->lock,flags);
return -EBUSY;
}
/* add new interrupt at end of irq queue */
do {
p = &old->next;
old = *p;
} while (old);
shared = 1;
}
*p = new;
if (!shared) {
desc->depth = 0;
desc->status &= ~(IRQ_DISABLED | IRQ_AUTODETECT | IRQ_WAITING | IRQ_INPROGRESS);
desc->handler->startup(irq);
}
spin_unlock_irqrestore(&desc->lock,flags);
/* register_irq_proc(irq); */
return 0;
}
/* Initialize irq handling for IRQs.
BASE_IRQ, BASE_IRQ+INTERVAL, ..., BASE_IRQ+NUM*INTERVAL
to IRQ_TYPE. An IRQ_TYPE of 0 means to use a generic interrupt type. */
@ -736,9 +121,3 @@ init_irq_handlers (int base_irq, int num, int interval,
base_irq += interval;
}
}
#if defined(CONFIG_PROC_FS) && defined(CONFIG_SYSCTL)
void init_irq_proc(void)
{
}
#endif /* CONFIG_PROC_FS && CONFIG_SYSCTL */

View File

@ -226,22 +226,42 @@ config SCHED_SMT
source "kernel/Kconfig.preempt"
config K8_NUMA
bool "K8 NUMA support"
select NUMA
config NUMA
bool "Non Uniform Memory Access (NUMA) Support"
depends on SMP
help
Enable NUMA (Non Unified Memory Architecture) support for
AMD Opteron Multiprocessor systems. The kernel will try to allocate
memory used by a CPU on the local memory controller of the CPU
and add some more NUMA awareness to the kernel.
This code is recommended on all multiprocessor Opteron systems
and normally doesn't hurt on others.
Enable NUMA (Non Uniform Memory Access) support. The kernel
will try to allocate memory used by a CPU on the local memory
controller of the CPU and add some more NUMA awareness to the kernel.
This code is recommended on all multiprocessor Opteron systems.
If the system is EM64T, you should say N unless your system is EM64T
NUMA.
config K8_NUMA
bool "Old style AMD Opteron NUMA detection"
depends on NUMA
default y
help
Enable K8 NUMA node topology detection. You should say Y here if
you have a multi processor AMD K8 system. This uses an old
method to read the NUMA configurtion directly from the builtin
Northbridge of Opteron. It is recommended to use X86_64_ACPI_NUMA
instead, which also takes priority if both are compiled in.
# Dummy CONFIG option to select ACPI_NUMA from drivers/acpi/Kconfig.
config X86_64_ACPI_NUMA
bool "ACPI NUMA detection"
depends on NUMA
select ACPI
select ACPI_NUMA
default y
help
Enable ACPI SRAT based node topology detection.
config NUMA_EMU
bool "NUMA emulation support"
select NUMA
depends on SMP
bool "NUMA emulation"
depends on NUMA
help
Enable NUMA emulation. A flat machine will be split
into virtual nodes when booted with "numa=fake=N", where N is the
@ -252,9 +272,6 @@ config ARCH_DISCONTIGMEM_ENABLE
depends on NUMA
default y
config NUMA
bool
default n
config ARCH_DISCONTIGMEM_ENABLE
def_bool y
@ -374,6 +391,14 @@ config X86_MCE_INTEL
Additional support for intel specific MCE features such as
the thermal monitor.
config X86_MCE_AMD
bool "AMD MCE features"
depends on X86_MCE && X86_LOCAL_APIC
default y
help
Additional support for AMD specific MCE features such as
the DRAM Error Threshold.
config PHYSICAL_START
hex "Physical address where the kernel is loaded" if EMBEDDED
default "0x100000"
@ -502,7 +527,7 @@ config IA32_EMULATION
left.
config IA32_AOUT
bool "IA32 a.out support"
tristate "IA32 a.out support"
depends on IA32_EMULATION
help
Support old a.out binaries in the 32bit emulation.

View File

@ -2,15 +2,6 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
# !SMP for now because the context switch early causes GPF in segment reloading
# and the GS base checking does the wrong thing then, causing a hang.
config CHECKING
bool "Additional run-time checks"
depends on DEBUG_KERNEL && !SMP
help
Enables some internal consistency checks for kernel debugging.
You should normally say N.
config INIT_DEBUG
bool "Debug __init statements"
depends on DEBUG_KERNEL

View File

@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.13-git11
# Mon Sep 12 16:16:16 2005
# Linux kernel version: 2.6.14-git7
# Sat Nov 5 15:55:50 2005
#
CONFIG_X86_64=y
CONFIG_64BIT=y
@ -35,7 +35,7 @@ CONFIG_POSIX_MQUEUE=y
# CONFIG_BSD_PROCESS_ACCT is not set
CONFIG_SYSCTL=y
# CONFIG_AUDIT is not set
# CONFIG_HOTPLUG is not set
CONFIG_HOTPLUG=y
CONFIG_KOBJECT_UEVENT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
@ -93,10 +93,11 @@ CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
CONFIG_PREEMPT_BKL=y
CONFIG_NUMA=y
CONFIG_K8_NUMA=y
CONFIG_X86_64_ACPI_NUMA=y
# CONFIG_NUMA_EMU is not set
CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
CONFIG_NUMA=y
CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_SELECT_MEMORY_MODEL=y
@ -107,9 +108,10 @@ CONFIG_DISCONTIGMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_NEED_MULTIPLE_NODES=y
# CONFIG_SPARSEMEM_STATIC is not set
CONFIG_SPLIT_PTLOCK_CPUS=4
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
CONFIG_HAVE_DEC_LOCK=y
CONFIG_NR_CPUS=32
CONFIG_HOTPLUG_CPU=y
CONFIG_HPET_TIMER=y
CONFIG_X86_PM_TIMER=y
CONFIG_HPET_EMULATE_RTC=y
@ -117,6 +119,7 @@ CONFIG_GART_IOMMU=y
CONFIG_SWIOTLB=y
CONFIG_X86_MCE=y
CONFIG_X86_MCE_INTEL=y
CONFIG_X86_MCE_AMD=y
CONFIG_PHYSICAL_START=0x100000
# CONFIG_KEXEC is not set
CONFIG_SECCOMP=y
@ -136,11 +139,15 @@ CONFIG_PM=y
# CONFIG_PM_DEBUG is not set
CONFIG_SOFTWARE_SUSPEND=y
CONFIG_PM_STD_PARTITION=""
CONFIG_SUSPEND_SMP=y
#
# ACPI (Advanced Configuration and Power Interface) Support
#
CONFIG_ACPI=y
CONFIG_ACPI_SLEEP=y
CONFIG_ACPI_SLEEP_PROC_FS=y
CONFIG_ACPI_SLEEP_PROC_SLEEP=y
CONFIG_ACPI_AC=y
CONFIG_ACPI_BATTERY=y
CONFIG_ACPI_BUTTON=y
@ -148,6 +155,7 @@ CONFIG_ACPI_BUTTON=y
CONFIG_ACPI_HOTKEY=m
CONFIG_ACPI_FAN=y
CONFIG_ACPI_PROCESSOR=y
CONFIG_ACPI_HOTPLUG_CPU=y
CONFIG_ACPI_THERMAL=y
CONFIG_ACPI_NUMA=y
# CONFIG_ACPI_ASUS is not set
@ -158,7 +166,7 @@ CONFIG_ACPI_BLACKLIST_YEAR=2001
CONFIG_ACPI_EC=y
CONFIG_ACPI_POWER=y
CONFIG_ACPI_SYSTEM=y
# CONFIG_ACPI_CONTAINER is not set
CONFIG_ACPI_CONTAINER=y
#
# CPU Frequency scaling
@ -293,7 +301,6 @@ CONFIG_IPV6=y
# Network testing
#
# CONFIG_NET_PKTGEN is not set
# CONFIG_NETFILTER_NETLINK is not set
# CONFIG_HAMRADIO is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
@ -311,6 +318,11 @@ CONFIG_PREVENT_FIRMWARE_BUILD=y
# CONFIG_FW_LOADER is not set
# CONFIG_DEBUG_DRIVER is not set
#
# Connector - unified userspace <-> kernelspace linker
#
# CONFIG_CONNECTOR is not set
#
# Memory Technology Devices (MTD)
#
@ -354,6 +366,11 @@ CONFIG_IOSCHED_NOOP=y
# CONFIG_IOSCHED_AS is not set
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
# CONFIG_DEFAULT_AS is not set
CONFIG_DEFAULT_DEADLINE=y
# CONFIG_DEFAULT_CFQ is not set
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="cfq"
# CONFIG_ATA_OVER_ETH is not set
#
@ -450,6 +467,7 @@ CONFIG_BLK_DEV_SD=y
CONFIG_SCSI_SPI_ATTRS=y
# CONFIG_SCSI_FC_ATTRS is not set
# CONFIG_SCSI_ISCSI_ATTRS is not set
# CONFIG_SCSI_SAS_ATTRS is not set
#
# SCSI low-level drivers
@ -469,20 +487,24 @@ CONFIG_AIC79XX_DEBUG_MASK=0
# CONFIG_AIC79XX_REG_PRETTY_PRINT is not set
# CONFIG_MEGARAID_NEWGEN is not set
# CONFIG_MEGARAID_LEGACY is not set
# CONFIG_MEGARAID_SAS is not set
CONFIG_SCSI_SATA=y
# CONFIG_SCSI_SATA_AHCI is not set
# CONFIG_SCSI_SATA_SVW is not set
CONFIG_SCSI_ATA_PIIX=y
# CONFIG_SCSI_SATA_MV is not set
# CONFIG_SCSI_SATA_NV is not set
# CONFIG_SCSI_SATA_PROMISE is not set
CONFIG_SCSI_SATA_NV=y
# CONFIG_SCSI_PDC_ADMA is not set
# CONFIG_SCSI_SATA_QSTOR is not set
# CONFIG_SCSI_SATA_PROMISE is not set
# CONFIG_SCSI_SATA_SX4 is not set
# CONFIG_SCSI_SATA_SIL is not set
# CONFIG_SCSI_SATA_SIL24 is not set
# CONFIG_SCSI_SATA_SIS is not set
# CONFIG_SCSI_SATA_ULI is not set
CONFIG_SCSI_SATA_VIA=y
# CONFIG_SCSI_SATA_VITESSE is not set
CONFIG_SCSI_SATA_INTEL_COMBINED=y
# CONFIG_SCSI_BUSLOGIC is not set
# CONFIG_SCSI_DMX3191D is not set
# CONFIG_SCSI_EATA is not set
@ -525,6 +547,7 @@ CONFIG_BLK_DEV_DM=y
CONFIG_FUSION=y
CONFIG_FUSION_SPI=y
# CONFIG_FUSION_FC is not set
# CONFIG_FUSION_SAS is not set
CONFIG_FUSION_MAX_SGE=128
# CONFIG_FUSION_CTL is not set
@ -564,6 +587,7 @@ CONFIG_NET_ETHERNET=y
CONFIG_MII=y
# CONFIG_HAPPYMEAL is not set
# CONFIG_SUNGEM is not set
# CONFIG_CASSINI is not set
CONFIG_NET_VENDOR_3COM=y
CONFIG_VORTEX=y
# CONFIG_TYPHOON is not set
@ -740,7 +764,43 @@ CONFIG_LEGACY_PTY_COUNT=256
#
# Watchdog Cards
#
# CONFIG_WATCHDOG is not set
CONFIG_WATCHDOG=y
# CONFIG_WATCHDOG_NOWAYOUT is not set
#
# Watchdog Device Drivers
#
CONFIG_SOFT_WATCHDOG=y
# CONFIG_ACQUIRE_WDT is not set
# CONFIG_ADVANTECH_WDT is not set
# CONFIG_ALIM1535_WDT is not set
# CONFIG_ALIM7101_WDT is not set
# CONFIG_SC520_WDT is not set
# CONFIG_EUROTECH_WDT is not set
# CONFIG_IB700_WDT is not set
# CONFIG_IBMASR is not set
# CONFIG_WAFER_WDT is not set
# CONFIG_I6300ESB_WDT is not set
# CONFIG_I8XX_TCO is not set
# CONFIG_SC1200_WDT is not set
# CONFIG_60XX_WDT is not set
# CONFIG_SBC8360_WDT is not set
# CONFIG_CPU5_WDT is not set
# CONFIG_W83627HF_WDT is not set
# CONFIG_W83877F_WDT is not set
# CONFIG_W83977F_WDT is not set
# CONFIG_MACHZ_WDT is not set
#
# PCI-based Watchdog Cards
#
# CONFIG_PCIPCWATCHDOG is not set
# CONFIG_WDTPCI is not set
#
# USB-based Watchdog Cards
#
# CONFIG_USBPCWATCHDOG is not set
CONFIG_HW_RANDOM=y
# CONFIG_NVRAM is not set
CONFIG_RTC=y
@ -767,6 +827,7 @@ CONFIG_MAX_RAW_DEVS=256
# TPM devices
#
# CONFIG_TCG_TPM is not set
# CONFIG_TELCLOCK is not set
#
# I2C support
@ -783,6 +844,7 @@ CONFIG_MAX_RAW_DEVS=256
#
CONFIG_HWMON=y
# CONFIG_HWMON_VID is not set
# CONFIG_SENSORS_HDAPS is not set
# CONFIG_HWMON_DEBUG_CHIP is not set
#
@ -886,12 +948,15 @@ CONFIG_USB_UHCI_HCD=y
# USB Device Class drivers
#
# CONFIG_OBSOLETE_OSS_USB_DRIVER is not set
# CONFIG_USB_BLUETOOTH_TTY is not set
# CONFIG_USB_ACM is not set
CONFIG_USB_PRINTER=y
#
# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
#
#
# may also be needed; see USB_STORAGE Help for more information
#
CONFIG_USB_STORAGE=y
# CONFIG_USB_STORAGE_DEBUG is not set
@ -924,6 +989,7 @@ CONFIG_USB_HIDINPUT=y
# CONFIG_USB_XPAD is not set
# CONFIG_USB_ATI_REMOTE is not set
# CONFIG_USB_KEYSPAN_REMOTE is not set
# CONFIG_USB_APPLETOUCH is not set
#
# USB Imaging devices
@ -1005,7 +1071,7 @@ CONFIG_USB_MON=y
#
# CONFIG_EDD is not set
# CONFIG_DELL_RBU is not set
CONFIG_DCDBAS=m
# CONFIG_DCDBAS is not set
#
# File systems
@ -1037,7 +1103,7 @@ CONFIG_INOTIFY=y
# CONFIG_QUOTA is not set
CONFIG_DNOTIFY=y
CONFIG_AUTOFS_FS=y
# CONFIG_AUTOFS4_FS is not set
CONFIG_AUTOFS4_FS=y
# CONFIG_FUSE_FS is not set
#
@ -1068,7 +1134,7 @@ CONFIG_TMPFS=y
CONFIG_HUGETLBFS=y
CONFIG_HUGETLB_PAGE=y
CONFIG_RAMFS=y
# CONFIG_RELAYFS_FS is not set
CONFIG_RELAYFS_FS=y
#
# Miscellaneous filesystems
@ -1186,7 +1252,9 @@ CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_DEBUG_KOBJECT is not set
# CONFIG_DEBUG_INFO is not set
CONFIG_DEBUG_FS=y
# CONFIG_DEBUG_VM is not set
# CONFIG_FRAME_POINTER is not set
# CONFIG_RCU_TORTURE_TEST is not set
CONFIG_INIT_DEBUG=y
# CONFIG_IOMMU_DEBUG is not set
CONFIG_KPROBES=y

View File

@ -36,9 +36,6 @@
#undef WARN_OLD
#undef CORE_DUMP /* probably broken */
extern int ia32_setup_arg_pages(struct linux_binprm *bprm,
unsigned long stack_top, int exec_stack);
static int load_aout_binary(struct linux_binprm *, struct pt_regs * regs);
static int load_aout_library(struct file*);

View File

@ -335,7 +335,8 @@ static void elf32_init(struct pt_regs *regs)
me->thread.es = __USER_DS;
}
int setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top, int executable_stack)
int ia32_setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top,
int executable_stack)
{
unsigned long stack_base;
struct vm_area_struct *mpnt;
@ -389,6 +390,7 @@ int setup_arg_pages(struct linux_binprm *bprm, unsigned long stack_top, int exec
return 0;
}
EXPORT_SYMBOL(ia32_setup_arg_pages);
static unsigned long
elf32_map (struct file *filep, unsigned long addr, struct elf_phdr *eppnt, int prot, int type)

View File

@ -11,6 +11,7 @@ obj-y := process.o signal.o entry.o traps.o irq.o \
obj-$(CONFIG_X86_MCE) += mce.o
obj-$(CONFIG_X86_MCE_INTEL) += mce_intel.o
obj-$(CONFIG_X86_MCE_AMD) += mce_amd.o
obj-$(CONFIG_MTRR) += ../../i386/kernel/cpu/mtrr/
obj-$(CONFIG_ACPI) += acpi/
obj-$(CONFIG_X86_MSR) += msr.o

View File

@ -196,7 +196,7 @@ static __u32 __init search_agp_bridge(u32 *order, int *valid_agp)
void __init iommu_hole_init(void)
{
int fix, num;
u32 aper_size, aper_alloc = 0, aper_order, last_aper_order = 0;
u32 aper_size, aper_alloc = 0, aper_order = 0, last_aper_order = 0;
u64 aper_base, last_aper_base = 0;
int valid_agp = 0;

View File

@ -833,6 +833,16 @@ int setup_profiling_timer(unsigned int multiplier)
return 0;
}
#ifdef CONFIG_X86_MCE_AMD
void setup_threshold_lvt(unsigned long lvt_off)
{
unsigned int v = 0;
unsigned long reg = (lvt_off << 4) + 0x500;
v |= THRESHOLD_APIC_VECTOR;
apic_write(reg, v);
}
#endif /* CONFIG_X86_MCE_AMD */
#undef APIC_DIVISOR
/*

View File

@ -23,8 +23,7 @@
#include <asm/e820.h>
#include <asm/proto.h>
#include <asm/bootsetup.h>
extern char _end[];
#include <asm/sections.h>
/*
* PFN of last memory page.

View File

@ -612,6 +612,9 @@ retint_kernel:
ENTRY(thermal_interrupt)
apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
ENTRY(threshold_interrupt)
apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
#ifdef CONFIG_SMP
ENTRY(reschedule_interrupt)
apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt

View File

@ -12,6 +12,7 @@
#include <linux/linkage.h>
#include <linux/threads.h>
#include <linux/init.h>
#include <asm/desc.h>
#include <asm/segment.h>
#include <asm/page.h>
@ -70,7 +71,7 @@ startup_32:
movl %eax, %cr4
/* Setup early boot stage 4 level pagetables */
movl $(init_level4_pgt - __START_KERNEL_map), %eax
movl $(boot_level4_pgt - __START_KERNEL_map), %eax
movl %eax, %cr3
/* Setup EFER (Extended Feature Enable Register) */
@ -113,7 +114,7 @@ startup_64:
movq %rax, %cr4
/* Setup early boot stage 4 level pagetables. */
movq $(init_level4_pgt - __START_KERNEL_map), %rax
movq $(boot_level4_pgt - __START_KERNEL_map), %rax
movq %rax, %cr3
/* Check if nx is implemented */
@ -240,20 +241,10 @@ ljumpvector:
ENTRY(stext)
ENTRY(_stext)
/*
* This default setting generates an ident mapping at address 0x100000
* and a mapping for the kernel that precisely maps virtual address
* 0xffffffff80000000 to physical address 0x000000. (always using
* 2Mbyte large pages provided by PAE mode)
*/
.org 0x1000
ENTRY(init_level4_pgt)
.quad 0x0000000000002007 + __PHYSICAL_START /* -> level3_ident_pgt */
.fill 255,8,0
.quad 0x000000000000a007 + __PHYSICAL_START
.fill 254,8,0
/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
.quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */
/* This gets initialized in x86_64_start_kernel */
.fill 512,8,0
.org 0x2000
ENTRY(level3_ident_pgt)
@ -350,6 +341,24 @@ ENTRY(wakeup_level4_pgt)
.quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */
#endif
#ifndef CONFIG_HOTPLUG_CPU
__INITDATA
#endif
/*
* This default setting generates an ident mapping at address 0x100000
* and a mapping for the kernel that precisely maps virtual address
* 0xffffffff80000000 to physical address 0x000000. (always using
* 2Mbyte large pages provided by PAE mode)
*/
.align PAGE_SIZE
ENTRY(boot_level4_pgt)
.quad 0x0000000000002007 + __PHYSICAL_START /* -> level3_ident_pgt */
.fill 255,8,0
.quad 0x000000000000a007 + __PHYSICAL_START
.fill 254,8,0
/* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
.quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */
.data
.align 16

View File

@ -19,14 +19,15 @@
#include <asm/bootsetup.h>
#include <asm/setup.h>
#include <asm/desc.h>
#include <asm/pgtable.h>
#include <asm/sections.h>
/* Don't add a printk in there. printk relies on the PDA which is not initialized
yet. */
static void __init clear_bss(void)
{
extern char __bss_start[], __bss_end[];
memset(__bss_start, 0,
(unsigned long) __bss_end - (unsigned long) __bss_start);
(unsigned long) __bss_stop - (unsigned long) __bss_start);
}
#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
@ -75,8 +76,6 @@ static void __init setup_boot_cpu_data(void)
boot_cpu_data.x86_mask = eax & 0xf;
}
extern char _end[];
void __init x86_64_start_kernel(char * real_mode_data)
{
char *s;
@ -86,6 +85,13 @@ void __init x86_64_start_kernel(char * real_mode_data)
set_intr_gate(i, early_idt_handler);
asm volatile("lidt %0" :: "m" (idt_descr));
clear_bss();
/*
* switch to init_level4_pgt from boot_level4_pgt
*/
memcpy(init_level4_pgt, boot_level4_pgt, PTRS_PER_PGD*sizeof(pgd_t));
asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
pda_init(0);
copy_bootdata(real_mode_data);
#ifdef CONFIG_SMP

View File

@ -492,6 +492,7 @@ void invalidate_interrupt5(void);
void invalidate_interrupt6(void);
void invalidate_interrupt7(void);
void thermal_interrupt(void);
void threshold_interrupt(void);
void i8254_timer_resume(void);
static void setup_timer_hardware(void)
@ -580,6 +581,7 @@ void __init init_IRQ(void)
set_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
#endif
set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
set_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
#ifdef CONFIG_X86_LOCAL_APIC
/* self generated IPI for local APIC timer */

View File

@ -57,7 +57,7 @@ int nr_ioapic_registers[MAX_IO_APICS];
* Rough estimation of how many shared IRQs there are, can
* be changed anytime.
*/
#define MAX_PLUS_SHARED_IRQS NR_IRQS
#define MAX_PLUS_SHARED_IRQS NR_IRQ_VECTORS
#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
/*
@ -85,6 +85,7 @@ int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
int pin; \
struct irq_pin_list *entry = irq_2_pin + irq; \
\
BUG_ON(irq >= NR_IRQS); \
for (;;) { \
unsigned int reg; \
pin = entry->pin; \
@ -127,6 +128,8 @@ static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
}
#endif
static u8 gsi_2_irq[NR_IRQ_VECTORS] = { [0 ... NR_IRQ_VECTORS-1] = 0xFF };
/*
* The common case is 1:1 IRQ<->pin mappings. Sometimes there are
* shared ISA-space IRQs, so we have to support them. We are super
@ -137,6 +140,7 @@ static void add_pin_to_irq(unsigned int irq, int apic, int pin)
static int first_free_entry = NR_IRQS;
struct irq_pin_list *entry = irq_2_pin + irq;
BUG_ON(irq >= NR_IRQS);
while (entry->next)
entry = irq_2_pin + entry->next;
@ -144,7 +148,7 @@ static void add_pin_to_irq(unsigned int irq, int apic, int pin)
entry->next = first_free_entry;
entry = irq_2_pin + entry->next;
if (++first_free_entry >= PIN_MAP_SIZE)
panic("io_apic.c: whoops");
panic("io_apic.c: ran out of irq_2_pin entries!");
}
entry->apic = apic;
entry->pin = pin;
@ -420,6 +424,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
best_guess = irq;
}
}
BUG_ON(best_guess >= NR_IRQS);
return best_guess;
}
@ -610,6 +615,64 @@ static inline int irq_trigger(int idx)
return MPBIOS_trigger(idx);
}
static int next_irq = 16;
/*
* gsi_irq_sharing -- Name overload! "irq" can be either a legacy IRQ
* in the range 0-15, a linux IRQ in the range 0-223, or a GSI number
* from ACPI, which can reach 800 in large boxen.
*
* Compact the sparse GSI space into a sequential IRQ series and reuse
* vectors if possible.
*/
int gsi_irq_sharing(int gsi)
{
int i, tries, vector;
BUG_ON(gsi >= NR_IRQ_VECTORS);
if (platform_legacy_irq(gsi))
return gsi;
if (gsi_2_irq[gsi] != 0xFF)
return (int)gsi_2_irq[gsi];
tries = NR_IRQS;
try_again:
vector = assign_irq_vector(gsi);
/*
* Sharing vectors means sharing IRQs, so scan irq_vectors for previous
* use of vector and if found, return that IRQ. However, we never want
* to share legacy IRQs, which usually have a different trigger mode
* than PCI.
*/
for (i = 0; i < NR_IRQS; i++)
if (IO_APIC_VECTOR(i) == vector)
break;
if (platform_legacy_irq(i)) {
if (--tries >= 0) {
IO_APIC_VECTOR(i) = 0;
goto try_again;
}
panic("gsi_irq_sharing: didn't find an IRQ using vector 0x%02X for GSI %d", vector, gsi);
}
if (i < NR_IRQS) {
gsi_2_irq[gsi] = i;
printk(KERN_INFO "GSI %d sharing vector 0x%02X and IRQ %d\n",
gsi, vector, i);
return i;
}
i = next_irq++;
BUG_ON(i >= NR_IRQS);
gsi_2_irq[gsi] = i;
IO_APIC_VECTOR(i) = vector;
printk(KERN_INFO "GSI %d assigned vector 0x%02X and IRQ %d\n",
gsi, vector, i);
return i;
}
static int pin_2_irq(int idx, int apic, int pin)
{
int irq, i;
@ -639,6 +702,7 @@ static int pin_2_irq(int idx, int apic, int pin)
while (i < apic)
irq += nr_ioapic_registers[i++];
irq += pin;
irq = gsi_irq_sharing(irq);
break;
}
default:
@ -648,6 +712,7 @@ static int pin_2_irq(int idx, int apic, int pin)
break;
}
}
BUG_ON(irq >= NR_IRQS);
/*
* PCI IRQ command line redirection. Yes, limits are hardcoded.
@ -663,6 +728,7 @@ static int pin_2_irq(int idx, int apic, int pin)
}
}
}
BUG_ON(irq >= NR_IRQS);
return irq;
}
@ -690,8 +756,8 @@ int assign_irq_vector(int irq)
{
static int current_vector = FIRST_DEVICE_VECTOR, offset = 0;
BUG_ON(irq >= NR_IRQ_VECTORS);
if (IO_APIC_VECTOR(irq) > 0)
BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0)
return IO_APIC_VECTOR(irq);
next:
current_vector += 8;
@ -699,9 +765,8 @@ next:
goto next;
if (current_vector >= FIRST_SYSTEM_VECTOR) {
offset++;
if (!(offset%8))
return -ENOSPC;
/* If we run out of vectors on large boxen, must share them. */
offset = (offset + 1) % 8;
current_vector = FIRST_DEVICE_VECTOR + offset;
}
@ -1917,6 +1982,7 @@ int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int a
entry.polarity = active_high_low;
entry.mask = 1; /* Disabled (masked) */
irq = gsi_irq_sharing(irq);
/*
* IRQs < 16 are already in the irq_2_pin[] map
*/

View File

@ -37,7 +37,7 @@ static unsigned long bank[NR_BANKS] = { [0 ... NR_BANKS-1] = ~0UL };
static unsigned long console_logged;
static int notify_user;
static int rip_msr;
static int mce_bootlog;
static int mce_bootlog = 1;
/*
* Lockless MCE logging infrastructure.
@ -347,7 +347,11 @@ static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
/* disable GART TBL walk error reporting, which trips off
incorrectly with the IOMMU & 3ware & Cerberus. */
clear_bit(10, &bank[4]);
/* Lots of broken BIOS around that don't clear them
by default and leave crap in there. Don't log. */
mce_bootlog = 0;
}
}
static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
@ -356,6 +360,9 @@ static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
case X86_VENDOR_INTEL:
mce_intel_feature_init(c);
break;
case X86_VENDOR_AMD:
mce_amd_feature_init(c);
break;
default:
break;
}
@ -495,16 +502,16 @@ static int __init mcheck_disable(char *str)
/* mce=off disables machine check. Note you can reenable it later
using sysfs.
mce=TOLERANCELEVEL (number, see above)
mce=bootlog Log MCEs from before booting. Disabled by default to work
around buggy BIOS that leave bogus MCEs. */
mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
mce=nobootlog Don't log MCEs from before booting. */
static int __init mcheck_enable(char *str)
{
if (*str == '=')
str++;
if (!strcmp(str, "off"))
mce_dont_init = 1;
else if (!strcmp(str, "bootlog"))
mce_bootlog = 1;
else if (!strcmp(str, "bootlog") || !strcmp(str,"nobootlog"))
mce_bootlog = str[0] == 'b';
else if (isdigit(str[0]))
get_option(&str, &tolerant);
else

View File

@ -0,0 +1,538 @@
/*
* (c) 2005 Advanced Micro Devices, Inc.
* Your use of this code is subject to the terms and conditions of the
* GNU general public license version 2. See "COPYING" or
* http://www.gnu.org/licenses/gpl.html
*
* Written by Jacob Shin - AMD, Inc.
*
* Support : jacob.shin@amd.com
*
* MC4_MISC0 DRAM ECC Error Threshold available under AMD K8 Rev F.
* MC4_MISC0 exists per physical processor.
*
*/
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/kobject.h>
#include <linux/notifier.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/sysdev.h>
#include <linux/sysfs.h>
#include <asm/apic.h>
#include <asm/mce.h>
#include <asm/msr.h>
#include <asm/percpu.h>
#define PFX "mce_threshold: "
#define VERSION "version 1.00.9"
#define NR_BANKS 5
#define THRESHOLD_MAX 0xFFF
#define INT_TYPE_APIC 0x00020000
#define MASK_VALID_HI 0x80000000
#define MASK_LVTOFF_HI 0x00F00000
#define MASK_COUNT_EN_HI 0x00080000
#define MASK_INT_TYPE_HI 0x00060000
#define MASK_OVERFLOW_HI 0x00010000
#define MASK_ERR_COUNT_HI 0x00000FFF
#define MASK_OVERFLOW 0x0001000000000000L
struct threshold_bank {
unsigned int cpu;
u8 bank;
u8 interrupt_enable;
u16 threshold_limit;
struct kobject kobj;
};
static struct threshold_bank threshold_defaults = {
.interrupt_enable = 0,
.threshold_limit = THRESHOLD_MAX,
};
#ifdef CONFIG_SMP
static unsigned char shared_bank[NR_BANKS] = {
0, 0, 0, 0, 1
};
#endif
static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
/*
* CPU Initialization
*/
/* must be called with correct cpu affinity */
static void threshold_restart_bank(struct threshold_bank *b,
int reset, u16 old_limit)
{
u32 mci_misc_hi, mci_misc_lo;
rdmsr(MSR_IA32_MC0_MISC + b->bank * 4, mci_misc_lo, mci_misc_hi);
if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX))
reset = 1; /* limit cannot be lower than err count */
if (reset) { /* reset err count and overflow bit */
mci_misc_hi =
(mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
(THRESHOLD_MAX - b->threshold_limit);
} else if (old_limit) { /* change limit w/o reset */
int new_count = (mci_misc_hi & THRESHOLD_MAX) +
(old_limit - b->threshold_limit);
mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) |
(new_count & THRESHOLD_MAX);
}
b->interrupt_enable ?
(mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) :
(mci_misc_hi &= ~MASK_INT_TYPE_HI);
mci_misc_hi |= MASK_COUNT_EN_HI;
wrmsr(MSR_IA32_MC0_MISC + b->bank * 4, mci_misc_lo, mci_misc_hi);
}
void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c)
{
int bank;
u32 mci_misc_lo, mci_misc_hi;
unsigned int cpu = smp_processor_id();
for (bank = 0; bank < NR_BANKS; ++bank) {
rdmsr(MSR_IA32_MC0_MISC + bank * 4, mci_misc_lo, mci_misc_hi);
/* !valid, !counter present, bios locked */
if (!(mci_misc_hi & MASK_VALID_HI) ||
!(mci_misc_hi & MASK_VALID_HI >> 1) ||
(mci_misc_hi & MASK_VALID_HI >> 2))
continue;
per_cpu(bank_map, cpu) |= (1 << bank);
#ifdef CONFIG_SMP
if (shared_bank[bank] && cpu_core_id[cpu])
continue;
#endif
setup_threshold_lvt((mci_misc_hi & MASK_LVTOFF_HI) >> 20);
threshold_defaults.cpu = cpu;
threshold_defaults.bank = bank;
threshold_restart_bank(&threshold_defaults, 0, 0);
}
}
/*
* APIC Interrupt Handler
*/
/*
* threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
* the interrupt goes off when error_count reaches threshold_limit.
* the handler will simply log mcelog w/ software defined bank number.
*/
asmlinkage void mce_threshold_interrupt(void)
{
int bank;
struct mce m;
ack_APIC_irq();
irq_enter();
memset(&m, 0, sizeof(m));
rdtscll(m.tsc);
m.cpu = smp_processor_id();
/* assume first bank caused it */
for (bank = 0; bank < NR_BANKS; ++bank) {
m.bank = MCE_THRESHOLD_BASE + bank;
rdmsrl(MSR_IA32_MC0_MISC + bank * 4, m.misc);
if (m.misc & MASK_OVERFLOW) {
mce_log(&m);
goto out;
}
}
out:
irq_exit();
}
/*
* Sysfs Interface
*/
static struct sysdev_class threshold_sysclass = {
set_kset_name("threshold"),
};
static DEFINE_PER_CPU(struct sys_device, device_threshold);
struct threshold_attr {
struct attribute attr;
ssize_t(*show) (struct threshold_bank *, char *);
ssize_t(*store) (struct threshold_bank *, const char *, size_t count);
};
static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]);
static cpumask_t affinity_set(unsigned int cpu)
{
cpumask_t oldmask = current->cpus_allowed;
cpumask_t newmask = CPU_MASK_NONE;
cpu_set(cpu, newmask);
set_cpus_allowed(current, newmask);
return oldmask;
}
static void affinity_restore(cpumask_t oldmask)
{
set_cpus_allowed(current, oldmask);
}
#define SHOW_FIELDS(name) \
static ssize_t show_ ## name(struct threshold_bank * b, char *buf) \
{ \
return sprintf(buf, "%lx\n", (unsigned long) b->name); \
}
SHOW_FIELDS(interrupt_enable)
SHOW_FIELDS(threshold_limit)
static ssize_t store_interrupt_enable(struct threshold_bank *b,
const char *buf, size_t count)
{
char *end;
cpumask_t oldmask;
unsigned long new = simple_strtoul(buf, &end, 0);
if (end == buf)
return -EINVAL;
b->interrupt_enable = !!new;
oldmask = affinity_set(b->cpu);
threshold_restart_bank(b, 0, 0);
affinity_restore(oldmask);
return end - buf;
}
static ssize_t store_threshold_limit(struct threshold_bank *b,
const char *buf, size_t count)
{
char *end;
cpumask_t oldmask;
u16 old;
unsigned long new = simple_strtoul(buf, &end, 0);
if (end == buf)
return -EINVAL;
if (new > THRESHOLD_MAX)
new = THRESHOLD_MAX;
if (new < 1)
new = 1;
old = b->threshold_limit;
b->threshold_limit = new;
oldmask = affinity_set(b->cpu);
threshold_restart_bank(b, 0, old);
affinity_restore(oldmask);
return end - buf;
}
static ssize_t show_error_count(struct threshold_bank *b, char *buf)
{
u32 high, low;
cpumask_t oldmask;
oldmask = affinity_set(b->cpu);
rdmsr(MSR_IA32_MC0_MISC + b->bank * 4, low, high); /* ignore low 32 */
affinity_restore(oldmask);
return sprintf(buf, "%x\n",
(high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit));
}
static ssize_t store_error_count(struct threshold_bank *b,
const char *buf, size_t count)
{
cpumask_t oldmask;
oldmask = affinity_set(b->cpu);
threshold_restart_bank(b, 1, 0);
affinity_restore(oldmask);
return 1;
}
#define THRESHOLD_ATTR(_name,_mode,_show,_store) { \
.attr = {.name = __stringify(_name), .mode = _mode }, \
.show = _show, \
.store = _store, \
};
#define ATTR_FIELDS(name) \
static struct threshold_attr name = \
THRESHOLD_ATTR(name, 0644, show_## name, store_## name)
ATTR_FIELDS(interrupt_enable);
ATTR_FIELDS(threshold_limit);
ATTR_FIELDS(error_count);
static struct attribute *default_attrs[] = {
&interrupt_enable.attr,
&threshold_limit.attr,
&error_count.attr,
NULL
};
#define to_bank(k) container_of(k,struct threshold_bank,kobj)
#define to_attr(a) container_of(a,struct threshold_attr,attr)
static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
{
struct threshold_bank *b = to_bank(kobj);
struct threshold_attr *a = to_attr(attr);
ssize_t ret;
ret = a->show ? a->show(b, buf) : -EIO;
return ret;
}
static ssize_t store(struct kobject *kobj, struct attribute *attr,
const char *buf, size_t count)
{
struct threshold_bank *b = to_bank(kobj);
struct threshold_attr *a = to_attr(attr);
ssize_t ret;
ret = a->store ? a->store(b, buf, count) : -EIO;
return ret;
}
static struct sysfs_ops threshold_ops = {
.show = show,
.store = store,
};
static struct kobj_type threshold_ktype = {
.sysfs_ops = &threshold_ops,
.default_attrs = default_attrs,
};
/* symlinks sibling shared banks to first core. first core owns dir/files. */
static __cpuinit int threshold_create_bank(unsigned int cpu, int bank)
{
int err = 0;
struct threshold_bank *b = 0;
#ifdef CONFIG_SMP
if (cpu_core_id[cpu] && shared_bank[bank]) { /* symlink */
char name[16];
unsigned lcpu = first_cpu(cpu_core_map[cpu]);
if (cpu_core_id[lcpu])
goto out; /* first core not up yet */
b = per_cpu(threshold_banks, lcpu)[bank];
if (!b)
goto out;
sprintf(name, "bank%i", bank);
err = sysfs_create_link(&per_cpu(device_threshold, cpu).kobj,
&b->kobj, name);
if (err)
goto out;
per_cpu(threshold_banks, cpu)[bank] = b;
goto out;
}
#endif
b = kmalloc(sizeof(struct threshold_bank), GFP_KERNEL);
if (!b) {
err = -ENOMEM;
goto out;
}
memset(b, 0, sizeof(struct threshold_bank));
b->cpu = cpu;
b->bank = bank;
b->interrupt_enable = 0;
b->threshold_limit = THRESHOLD_MAX;
kobject_set_name(&b->kobj, "bank%i", bank);
b->kobj.parent = &per_cpu(device_threshold, cpu).kobj;
b->kobj.ktype = &threshold_ktype;
err = kobject_register(&b->kobj);
if (err) {
kfree(b);
goto out;
}
per_cpu(threshold_banks, cpu)[bank] = b;
out:
return err;
}
/* create dir/files for all valid threshold banks */
static __cpuinit int threshold_create_device(unsigned int cpu)
{
int bank;
int err = 0;
per_cpu(device_threshold, cpu).id = cpu;
per_cpu(device_threshold, cpu).cls = &threshold_sysclass;
err = sysdev_register(&per_cpu(device_threshold, cpu));
if (err)
goto out;
for (bank = 0; bank < NR_BANKS; ++bank) {
if (!(per_cpu(bank_map, cpu) & 1 << bank))
continue;
err = threshold_create_bank(cpu, bank);
if (err)
goto out;
}
out:
return err;
}
#ifdef CONFIG_HOTPLUG_CPU
/*
* let's be hotplug friendly.
* in case of multiple core processors, the first core always takes ownership
* of shared sysfs dir/files, and rest of the cores will be symlinked to it.
*/
/* cpu hotplug call removes all symlinks before first core dies */
static __cpuinit void threshold_remove_bank(unsigned int cpu, int bank)
{
struct threshold_bank *b;
char name[16];
b = per_cpu(threshold_banks, cpu)[bank];
if (!b)
return;
if (shared_bank[bank] && atomic_read(&b->kobj.kref.refcount) > 2) {
sprintf(name, "bank%i", bank);
sysfs_remove_link(&per_cpu(device_threshold, cpu).kobj, name);
per_cpu(threshold_banks, cpu)[bank] = 0;
} else {
kobject_unregister(&b->kobj);
kfree(per_cpu(threshold_banks, cpu)[bank]);
}
}
static __cpuinit void threshold_remove_device(unsigned int cpu)
{
int bank;
for (bank = 0; bank < NR_BANKS; ++bank) {
if (!(per_cpu(bank_map, cpu) & 1 << bank))
continue;
threshold_remove_bank(cpu, bank);
}
sysdev_unregister(&per_cpu(device_threshold, cpu));
}
/* link all existing siblings when first core comes up */
static __cpuinit int threshold_create_symlinks(unsigned int cpu)
{
int bank, err = 0;
unsigned int lcpu = 0;
if (cpu_core_id[cpu])
return 0;
for_each_cpu_mask(lcpu, cpu_core_map[cpu]) {
if (lcpu == cpu)
continue;
for (bank = 0; bank < NR_BANKS; ++bank) {
if (!(per_cpu(bank_map, cpu) & 1 << bank))
continue;
if (!shared_bank[bank])
continue;
err = threshold_create_bank(lcpu, bank);
}
}
return err;
}
/* remove all symlinks before first core dies. */
static __cpuinit void threshold_remove_symlinks(unsigned int cpu)
{
int bank;
unsigned int lcpu = 0;
if (cpu_core_id[cpu])
return;
for_each_cpu_mask(lcpu, cpu_core_map[cpu]) {
if (lcpu == cpu)
continue;
for (bank = 0; bank < NR_BANKS; ++bank) {
if (!(per_cpu(bank_map, cpu) & 1 << bank))
continue;
if (!shared_bank[bank])
continue;
threshold_remove_bank(lcpu, bank);
}
}
}
#else /* !CONFIG_HOTPLUG_CPU */
static __cpuinit void threshold_create_symlinks(unsigned int cpu)
{
}
static __cpuinit void threshold_remove_symlinks(unsigned int cpu)
{
}
static void threshold_remove_device(unsigned int cpu)
{
}
#endif
/* get notified when a cpu comes on/off */
static __cpuinit int threshold_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
/* cpu was unsigned int to begin with */
unsigned int cpu = (unsigned long)hcpu;
if (cpu >= NR_CPUS)
goto out;
switch (action) {
case CPU_ONLINE:
threshold_create_device(cpu);
threshold_create_symlinks(cpu);
break;
case CPU_DOWN_PREPARE:
threshold_remove_symlinks(cpu);
break;
case CPU_DOWN_FAILED:
threshold_create_symlinks(cpu);
break;
case CPU_DEAD:
threshold_remove_device(cpu);
break;
default:
break;
}
out:
return NOTIFY_OK;
}
static struct notifier_block threshold_cpu_notifier = {
.notifier_call = threshold_cpu_callback,
};
static __init int threshold_init_device(void)
{
int err;
int lcpu = 0;
err = sysdev_class_register(&threshold_sysclass);
if (err)
goto out;
/* to hit CPUs online before the notifier is up */
for_each_online_cpu(lcpu) {
err = threshold_create_device(lcpu);
if (err)
goto out;
}
register_cpu_notifier(&threshold_cpu_notifier);
out:
return err;
}
device_initcall(threshold_init_device);

View File

@ -42,7 +42,7 @@ int acpi_found_madt;
* Various Linux-internal data structures created from the
* MP-table.
*/
int apic_version [MAX_APICS];
unsigned char apic_version [MAX_APICS];
unsigned char mp_bus_id_to_type [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
@ -65,7 +65,9 @@ unsigned long mp_lapic_addr = 0;
/* Processor that is doing the boot up */
unsigned int boot_cpu_id = -1U;
/* Internal processor count */
static unsigned int num_processors = 0;
unsigned int num_processors __initdata = 0;
unsigned disabled_cpus __initdata;
/* Bitmask of physically existing CPUs */
physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
@ -106,11 +108,14 @@ static int __init mpf_checksum(unsigned char *mp, int len)
static void __init MP_processor_info (struct mpc_config_processor *m)
{
int ver, cpu;
int cpu;
unsigned char ver;
static int found_bsp=0;
if (!(m->mpc_cpuflag & CPU_ENABLED))
if (!(m->mpc_cpuflag & CPU_ENABLED)) {
disabled_cpus++;
return;
}
printk(KERN_INFO "Processor #%d %d:%d APIC version %d\n",
m->mpc_apicid,
@ -129,12 +134,14 @@ static void __init MP_processor_info (struct mpc_config_processor *m)
}
cpu = num_processors++;
if (m->mpc_apicid > MAX_APICS) {
#if MAX_APICS < 255
if ((int)m->mpc_apicid > MAX_APICS) {
printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n",
m->mpc_apicid, MAX_APICS);
return;
}
#endif
ver = m->mpc_apicver;
physid_set(m->mpc_apicid, phys_cpu_present_map);
@ -218,7 +225,7 @@ static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
m->mpc_irqtype, m->mpc_irqflag & 3,
(m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
if (++mp_irq_entries == MAX_IRQ_SOURCES)
if (++mp_irq_entries >= MAX_IRQ_SOURCES)
panic("Max # of irq sources exceeded!!\n");
}
@ -549,7 +556,7 @@ void __init get_smp_config (void)
* Read the physical hardware table. Anything here will
* override the defaults.
*/
if (!smp_read_mpc((void *)(unsigned long)mpf->mpf_physptr)) {
if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) {
smp_found_config = 0;
printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");

View File

@ -220,6 +220,12 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
uses the normal dma_mask for alloc_coherent. */
dma_mask &= *dev->dma_mask;
/* Why <=? Even when the mask is smaller than 4GB it is often larger
than 16MB and in this case we have a chance of finding fitting memory
in the next higher zone first. If not retry with true GFP_DMA. -AK */
if (dma_mask <= 0xffffffff)
gfp |= GFP_DMA32;
again:
memory = dma_alloc_pages(dev, gfp, get_order(size));
if (memory == NULL)
@ -245,7 +251,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
}
if (!(gfp & GFP_DMA)) {
gfp |= GFP_DMA;
gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
goto again;
}
return NULL;

View File

@ -144,7 +144,8 @@ void cpu_idle_wait(void)
do {
ssleep(1);
for_each_online_cpu(cpu) {
if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
if (cpu_isset(cpu, map) &&
!per_cpu(cpu_idle_state, cpu))
cpu_clear(cpu, map);
}
cpus_and(map, map, cpu_online_map);
@ -275,7 +276,8 @@ void __show_regs(struct pt_regs * regs)
system_utsname.version);
printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
printk_address(regs->rip);
printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp, regs->eflags);
printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp,
regs->eflags);
printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
regs->rax, regs->rbx, regs->rcx);
printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
@ -427,15 +429,14 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
struct pt_regs * childregs;
struct task_struct *me = current;
childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
childregs = ((struct pt_regs *)
(THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
*childregs = *regs;
childregs->rax = 0;
childregs->rsp = rsp;
if (rsp == ~0UL) {
if (rsp == ~0UL)
childregs->rsp = (unsigned long)childregs;
}
p->thread.rsp = (unsigned long) childregs;
p->thread.rsp0 = (unsigned long) (childregs+1);
@ -457,7 +458,8 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
p->thread.io_bitmap_max = 0;
return -ENOMEM;
}
memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr, IO_BITMAP_BYTES);
memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
IO_BITMAP_BYTES);
}
/*
@ -494,7 +496,8 @@ out:
* - fold all the options into a flag word and test it with a single test.
* - could test fs/gs bitsliced
*/
struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
struct task_struct *
__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
struct thread_struct *prev = &prev_p->thread,
*next = &next_p->thread;
@ -565,7 +568,8 @@ struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *
prev->userrsp = read_pda(oldrsp);
write_pda(oldrsp, next->userrsp);
write_pda(pcurrent, next_p);
write_pda(kernelstack, (unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET);
write_pda(kernelstack,
(unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET);
/*
* Now maybe reload the debug registers
@ -646,7 +650,9 @@ asmlinkage long sys_fork(struct pt_regs *regs)
return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
}
asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
asmlinkage long
sys_clone(unsigned long clone_flags, unsigned long newsp,
void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
{
if (!newsp)
newsp = regs->rsp;
@ -682,7 +688,8 @@ unsigned long get_wchan(struct task_struct *p)
return 0;
fp = *(u64 *)(p->thread.rsp);
do {
if (fp < (unsigned long)stack || fp > (unsigned long)stack+THREAD_SIZE)
if (fp < (unsigned long)stack ||
fp > (unsigned long)stack+THREAD_SIZE)
return 0;
rip = *(u64 *)(fp+8);
if (!in_sched_functions(rip))
@ -717,8 +724,8 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
task->thread.gsindex = 0;
task->thread.gs = addr;
if (doit) {
load_gs_index(0);
ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
load_gs_index(0);
ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
}
}
put_cpu();
@ -735,7 +742,7 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
set_32bit_tls(task, FS_TLS, addr);
if (doit) {
load_TLS(&task->thread, cpu);
asm volatile("movl %0,%%fs" :: "r" (FS_TLS_SEL));
asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
}
task->thread.fsindex = FS_TLS_SEL;
task->thread.fs = 0;
@ -745,8 +752,8 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
if (doit) {
/* set the selector to 0 to not confuse
__switch_to */
asm volatile("movl %0,%%fs" :: "r" (0));
ret = checking_wrmsrl(MSR_FS_BASE, addr);
asm volatile("movl %0,%%fs" :: "r" (0));
ret = checking_wrmsrl(MSR_FS_BASE, addr);
}
}
put_cpu();
@ -755,9 +762,9 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
unsigned long base;
if (task->thread.fsindex == FS_TLS_SEL)
base = read_32bit_tls(task, FS_TLS);
else if (doit) {
else if (doit)
rdmsrl(MSR_FS_BASE, base);
} else
else
base = task->thread.fs;
ret = put_user(base, (unsigned long __user *)addr);
break;
@ -766,9 +773,9 @@ long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
unsigned long base;
if (task->thread.gsindex == GS_TLS_SEL)
base = read_32bit_tls(task, GS_TLS);
else if (doit) {
else if (doit)
rdmsrl(MSR_KERNEL_GS_BASE, base);
} else
else
base = task->thread.gs;
ret = put_user(base, (unsigned long __user *)addr);
break;

View File

@ -77,6 +77,7 @@ static inline void kb_wait(void)
void machine_shutdown(void)
{
unsigned long flags;
/* Stop the cpus and apics */
#ifdef CONFIG_SMP
int reboot_cpu_id;
@ -98,7 +99,7 @@ void machine_shutdown(void)
smp_send_stop();
#endif
local_irq_disable();
local_irq_save(flags);
#ifndef CONFIG_SMP
disable_local_APIC();
@ -106,7 +107,7 @@ void machine_shutdown(void)
disable_IO_APIC();
local_irq_enable();
local_irq_restore(flags);
}
void machine_emergency_restart(void)
@ -120,7 +121,7 @@ void machine_emergency_restart(void)
/* Could also try the reset bit in the Hammer NB */
switch (reboot_type) {
case BOOT_KBD:
for (i=0; i<100; i++) {
for (i=0; i<10; i++) {
kb_wait();
udelay(50);
outb(0xfe,0x64); /* pulse reset low */

View File

@ -60,6 +60,7 @@
#include <asm/setup.h>
#include <asm/mach_apic.h>
#include <asm/numa.h>
#include <asm/sections.h>
/*
* Machine setup..
@ -103,7 +104,6 @@ struct edid_info edid_info;
struct e820map e820;
extern int root_mountflags;
extern char _text, _etext, _edata, _end;
char command_line[COMMAND_LINE_SIZE];
@ -412,7 +412,6 @@ contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
{
unsigned long bootmap_size, bootmap;
memory_present(0, start_pfn, end_pfn);
bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
if (bootmap == -1L)
@ -571,6 +570,8 @@ void __init setup_arch(char **cmdline_p)
init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
zap_low_mappings(0);
#ifdef CONFIG_ACPI
/*
* Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
@ -657,8 +658,6 @@ void __init setup_arch(char **cmdline_p)
}
#endif
sparse_init();
paging_init();
check_ioapic();
@ -793,7 +792,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
#endif
bits = 0;
while ((1 << bits) < c->x86_num_cores)
while ((1 << bits) < c->x86_max_cores)
bits++;
/* Low order bits define the core id (index of core in socket) */
@ -823,10 +822,10 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
if (!node_online(node))
node = nearby_node(apicid);
}
cpu_to_node[cpu] = node;
numa_set_node(cpu, node);
printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n",
cpu, c->x86_num_cores, node, cpu_core_id[cpu]);
cpu, c->x86_max_cores, node, cpu_core_id[cpu]);
#endif
#endif
}
@ -875,9 +874,9 @@ static int __init init_amd(struct cpuinfo_x86 *c)
display_cacheinfo(c);
if (c->extended_cpuid_level >= 0x80000008) {
c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
if (c->x86_num_cores & (c->x86_num_cores - 1))
c->x86_num_cores = 1;
c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
if (c->x86_max_cores & (c->x86_max_cores - 1))
c->x86_max_cores = 1;
amd_detect_cmp(c);
}
@ -889,54 +888,44 @@ static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
u32 eax, ebx, ecx, edx;
int index_msb, tmp;
int index_msb, core_bits;
int cpu = smp_processor_id();
cpuid(1, &eax, &ebx, &ecx, &edx);
c->apicid = phys_pkg_id(0);
if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
return;
cpuid(1, &eax, &ebx, &ecx, &edx);
smp_num_siblings = (ebx & 0xff0000) >> 16;
if (smp_num_siblings == 1) {
printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
} else if (smp_num_siblings > 1) {
index_msb = 31;
/*
* At this point we only support two siblings per
* processor package.
*/
} else if (smp_num_siblings > 1 ) {
if (smp_num_siblings > NR_CPUS) {
printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
smp_num_siblings = 1;
return;
}
tmp = smp_num_siblings;
while ((tmp & 0x80000000 ) == 0) {
tmp <<=1 ;
index_msb--;
}
if (smp_num_siblings & (smp_num_siblings - 1))
index_msb++;
index_msb = get_count_order(smp_num_siblings);
phys_proc_id[cpu] = phys_pkg_id(index_msb);
printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
phys_proc_id[cpu]);
smp_num_siblings = smp_num_siblings / c->x86_num_cores;
smp_num_siblings = smp_num_siblings / c->x86_max_cores;
tmp = smp_num_siblings;
index_msb = 31;
while ((tmp & 0x80000000) == 0) {
tmp <<=1 ;
index_msb--;
}
if (smp_num_siblings & (smp_num_siblings - 1))
index_msb++;
index_msb = get_count_order(smp_num_siblings) ;
cpu_core_id[cpu] = phys_pkg_id(index_msb);
core_bits = get_count_order(c->x86_max_cores);
if (c->x86_num_cores > 1)
cpu_core_id[cpu] = phys_pkg_id(index_msb) &
((1 << core_bits) - 1);
if (c->x86_max_cores > 1)
printk(KERN_INFO "CPU: Processor Core ID: %d\n",
cpu_core_id[cpu]);
}
@ -975,7 +964,7 @@ static void srat_detect_node(void)
node = apicid_to_node[hard_smp_processor_id()];
if (node == NUMA_NO_NODE)
node = 0;
cpu_to_node[cpu] = node;
numa_set_node(cpu, node);
if (acpi_numa > 0)
printk(KERN_INFO "CPU %d -> Node %d\n", cpu, node);
@ -993,13 +982,18 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
unsigned eax = cpuid_eax(0x80000008);
c->x86_virt_bits = (eax >> 8) & 0xff;
c->x86_phys_bits = eax & 0xff;
/* CPUID workaround for Intel 0F34 CPU */
if (c->x86_vendor == X86_VENDOR_INTEL &&
c->x86 == 0xF && c->x86_model == 0x3 &&
c->x86_mask == 0x4)
c->x86_phys_bits = 36;
}
if (c->x86 == 15)
c->x86_cache_alignment = c->x86_clflush_size * 2;
if (c->x86 >= 15)
set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
c->x86_num_cores = intel_num_cpu_cores(c);
c->x86_max_cores = intel_num_cpu_cores(c);
srat_detect_node();
}
@ -1037,7 +1031,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
c->x86_model_id[0] = '\0'; /* Unset */
c->x86_clflush_size = 64;
c->x86_cache_alignment = c->x86_clflush_size;
c->x86_num_cores = 1;
c->x86_max_cores = 1;
c->extended_cpuid_level = 0;
memset(&c->x86_capability, 0, sizeof c->x86_capability);
@ -1060,10 +1054,10 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
c->x86 = (tfms >> 8) & 0xf;
c->x86_model = (tfms >> 4) & 0xf;
c->x86_mask = tfms & 0xf;
if (c->x86 == 0xf) {
if (c->x86 == 0xf)
c->x86 += (tfms >> 20) & 0xff;
if (c->x86 >= 0x6)
c->x86_model += ((tfms >> 16) & 0xF) << 4;
}
if (c->x86_capability[0] & (1<<19))
c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
} else {
@ -1271,13 +1265,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
#ifdef CONFIG_SMP
if (smp_num_siblings * c->x86_num_cores > 1) {
if (smp_num_siblings * c->x86_max_cores > 1) {
int cpu = c - cpu_data;
seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]);
seq_printf(m, "siblings\t: %d\n",
c->x86_num_cores * smp_num_siblings);
seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]);
seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores);
seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
}
#endif

View File

@ -141,7 +141,6 @@ void pda_init(int cpu)
panic("cannot allocate irqstack for cpu %d", cpu);
}
asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
pda->irqstackptr += IRQSTACKSIZE-64;
}
@ -197,6 +196,7 @@ void __cpuinit cpu_init (void)
/* CPU 0 is initialised in head64.c */
if (cpu != 0) {
pda_init(cpu);
zap_low_mappings(cpu);
} else
estacks = boot_exception_stacks;

View File

@ -110,6 +110,15 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, unsigned
COPY(r14);
COPY(r15);
/* Kernel saves and restores only the CS segment register on signals,
* which is the bare minimum needed to allow mixed 32/64-bit code.
* App's signal handler can save/restore other segments if needed. */
{
unsigned cs;
err |= __get_user(cs, &sc->cs);
regs->cs = cs | 3; /* Force into user mode */
}
{
unsigned int tmpflags;
err |= __get_user(tmpflags, &sc->eflags);
@ -187,6 +196,7 @@ setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned lo
{
int err = 0;
err |= __put_user(regs->cs, &sc->cs);
err |= __put_user(0, &sc->gs);
err |= __put_user(0, &sc->fs);
@ -318,7 +328,14 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
regs->rsp = (unsigned long)frame;
/* Set up the CS register to run signal handlers in 64-bit mode,
even if the handler happens to be interrupting 32-bit code. */
regs->cs = __USER_CS;
/* This, by contrast, has nothing to do with segment registers -
see include/asm-x86_64/uaccess.h for details. */
set_fs(USER_DS);
regs->eflags &= ~TF_MASK;
if (test_thread_flag(TIF_SINGLESTEP))
ptrace_notify(SIGTRAP);

View File

@ -28,8 +28,6 @@
#include <asm/proto.h>
#include <asm/apicdef.h>
#define __cpuinit __init
/*
* Smarter SMP flushing macros.
* c/o Linus Torvalds.
@ -452,13 +450,14 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
void smp_stop_cpu(void)
{
unsigned long flags;
/*
* Remove this CPU:
*/
cpu_clear(smp_processor_id(), cpu_online_map);
local_irq_disable();
local_irq_save(flags);
disable_local_APIC();
local_irq_enable();
local_irq_restore(flags);
}
static void smp_really_stop_cpu(void *dummy)

View File

@ -64,6 +64,7 @@
int smp_num_siblings = 1;
/* Package ID of each logical CPU */
u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
/* core ID of each logical CPU */
u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
/* Bitmask of currently online CPUs */
@ -87,7 +88,10 @@ struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
/* Set when the idlers are all forked */
int smp_threads_ready;
/* representing HT siblings of each logical CPU */
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
/* representing HT and core siblings of each logical CPU */
cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_core_map);
@ -434,30 +438,59 @@ void __cpuinit smp_callin(void)
cpu_set(cpuid, cpu_callin_map);
}
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;
static inline void set_cpu_sibling_map(int cpu)
{
int i;
struct cpuinfo_x86 *c = cpu_data;
cpu_set(cpu, cpu_sibling_setup_map);
if (smp_num_siblings > 1) {
for_each_cpu(i) {
if (cpu_core_id[cpu] == cpu_core_id[i]) {
for_each_cpu_mask(i, cpu_sibling_setup_map) {
if (phys_proc_id[cpu] == phys_proc_id[i] &&
cpu_core_id[cpu] == cpu_core_id[i]) {
cpu_set(i, cpu_sibling_map[cpu]);
cpu_set(cpu, cpu_sibling_map[i]);
cpu_set(i, cpu_core_map[cpu]);
cpu_set(cpu, cpu_core_map[i]);
}
}
} else {
cpu_set(cpu, cpu_sibling_map[cpu]);
}
if (current_cpu_data.x86_num_cores > 1) {
for_each_cpu(i) {
if (phys_proc_id[cpu] == phys_proc_id[i]) {
cpu_set(i, cpu_core_map[cpu]);
cpu_set(cpu, cpu_core_map[i]);
}
}
} else {
if (current_cpu_data.x86_max_cores == 1) {
cpu_core_map[cpu] = cpu_sibling_map[cpu];
c[cpu].booted_cores = 1;
return;
}
for_each_cpu_mask(i, cpu_sibling_setup_map) {
if (phys_proc_id[cpu] == phys_proc_id[i]) {
cpu_set(i, cpu_core_map[cpu]);
cpu_set(cpu, cpu_core_map[i]);
/*
* Does this new cpu bringup a new core?
*/
if (cpus_weight(cpu_sibling_map[cpu]) == 1) {
/*
* for each core in package, increment
* the booted_cores for this new cpu
*/
if (first_cpu(cpu_sibling_map[i]) == i)
c[cpu].booted_cores++;
/*
* increment the core count for all
* the other cpus in this package
*/
if (i != cpu)
c[i].booted_cores++;
} else if (i != cpu && !c[cpu].booted_cores)
c[cpu].booted_cores = c[i].booted_cores;
}
}
}
@ -879,6 +912,9 @@ static __init void disable_smp(void)
}
#ifdef CONFIG_HOTPLUG_CPU
int additional_cpus __initdata = -1;
/*
* cpu_possible_map should be static, it cannot change as cpu's
* are onlined, or offlined. The reason is per-cpu data-structures
@ -887,14 +923,38 @@ static __init void disable_smp(void)
* cpu_present_map on the other hand can change dynamically.
* In case when cpu_hotplug is not compiled, then we resort to current
* behaviour, which is cpu_possible == cpu_present.
* If cpu-hotplug is supported, then we need to preallocate for all
* those NR_CPUS, hence cpu_possible_map represents entire NR_CPUS range.
* - Ashok Raj
*
* Three ways to find out the number of additional hotplug CPUs:
* - If the BIOS specified disabled CPUs in ACPI/mptables use that.
* - otherwise use half of the available CPUs or 2, whatever is more.
* - The user can overwrite it with additional_cpus=NUM
* We do this because additional CPUs waste a lot of memory.
* -AK
*/
__init void prefill_possible_map(void)
{
int i;
for (i = 0; i < NR_CPUS; i++)
int possible;
if (additional_cpus == -1) {
if (disabled_cpus > 0) {
additional_cpus = disabled_cpus;
} else {
additional_cpus = num_processors / 2;
if (additional_cpus == 0)
additional_cpus = 2;
}
}
possible = num_processors + additional_cpus;
if (possible > NR_CPUS)
possible = NR_CPUS;
printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
possible,
max_t(int, possible - num_processors, 0));
for (i = 0; i < possible; i++)
cpu_set(i, cpu_possible_map);
}
#endif
@ -965,6 +1025,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
nmi_watchdog_default();
current_cpu_data = boot_cpu_data;
current_thread_info()->cpu = 0; /* needed? */
set_cpu_sibling_map(0);
if (smp_sanity_check(max_cpus) < 0) {
printk(KERN_INFO "SMP disabled\n");
@ -1008,8 +1069,6 @@ void __init smp_prepare_boot_cpu(void)
int me = smp_processor_id();
cpu_set(me, cpu_online_map);
cpu_set(me, cpu_callout_map);
cpu_set(0, cpu_sibling_map[0]);
cpu_set(0, cpu_core_map[0]);
per_cpu(cpu_state, me) = CPU_ONLINE;
}
@ -1062,9 +1121,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
*/
void __init smp_cpus_done(unsigned int max_cpus)
{
#ifndef CONFIG_HOTPLUG_CPU
zap_low_mappings();
#endif
smp_cleanup_boot();
#ifdef CONFIG_X86_IO_APIC
@ -1081,15 +1137,24 @@ void __init smp_cpus_done(unsigned int max_cpus)
static void remove_siblinginfo(int cpu)
{
int sibling;
struct cpuinfo_x86 *c = cpu_data;
for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
cpu_clear(cpu, cpu_core_map[sibling]);
/*
* last thread sibling in this cpu core going down
*/
if (cpus_weight(cpu_sibling_map[cpu]) == 1)
c[sibling].booted_cores--;
}
for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
cpu_clear(cpu, cpu_sibling_map[sibling]);
for_each_cpu_mask(sibling, cpu_core_map[cpu])
cpu_clear(cpu, cpu_core_map[sibling]);
cpus_clear(cpu_sibling_map[cpu]);
cpus_clear(cpu_core_map[cpu]);
phys_proc_id[cpu] = BAD_APICID;
cpu_core_id[cpu] = BAD_APICID;
cpu_clear(cpu, cpu_sibling_setup_map);
}
void remove_cpu_from_maps(void)
@ -1153,6 +1218,12 @@ void __cpu_die(unsigned int cpu)
printk(KERN_ERR "CPU %u didn't die...\n", cpu);
}
static __init int setup_additional_cpus(char *s)
{
return get_option(&s, &additional_cpus);
}
__setup("additional_cpus=", setup_additional_cpus);
#else /* ... !CONFIG_HOTPLUG_CPU */
int __cpu_disable(void)

View File

@ -154,17 +154,3 @@ asmlinkage long sys_uname(struct new_utsname __user * name)
err |= copy_to_user(&name->machine, "i686", 5);
return err ? -EFAULT : 0;
}
asmlinkage long sys_time64(long __user * tloc)
{
struct timeval now;
int i;
do_gettimeofday(&now);
i = now.tv_sec;
if (tloc) {
if (put_user(i,tloc))
i = -EFAULT;
}
return i;
}

View File

@ -428,19 +428,6 @@ static void __kprobes do_trap(int trapnr, int signr, char *str,
{
conditional_sti(regs);
#ifdef CONFIG_CHECKING
{
unsigned long gs;
struct x8664_pda *pda = cpu_pda + safe_smp_processor_id();
rdmsrl(MSR_GS_BASE, gs);
if (gs != (unsigned long)pda) {
wrmsrl(MSR_GS_BASE, pda);
printk("%s: wrong gs %lx expected %p rip %lx\n", str, gs, pda,
regs->rip);
}
}
#endif
if (user_mode(regs)) {
struct task_struct *tsk = current;
@ -513,20 +500,6 @@ asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
{
conditional_sti(regs);
#ifdef CONFIG_CHECKING
{
unsigned long gs;
struct x8664_pda *pda = cpu_pda + safe_smp_processor_id();
rdmsrl(MSR_GS_BASE, gs);
if (gs != (unsigned long)pda) {
wrmsrl(MSR_GS_BASE, pda);
oops_in_progress++;
printk("general protection handler: wrong gs %lx expected %p\n", gs, pda);
oops_in_progress--;
}
}
#endif
if (user_mode(regs)) {
struct task_struct *tsk = current;
@ -665,19 +638,6 @@ asmlinkage void __kprobes do_debug(struct pt_regs * regs,
struct task_struct *tsk = current;
siginfo_t info;
#ifdef CONFIG_CHECKING
{
/* RED-PEN interaction with debugger - could destroy gs */
unsigned long gs;
struct x8664_pda *pda = cpu_pda + safe_smp_processor_id();
rdmsrl(MSR_GS_BASE, gs);
if (gs != (unsigned long)pda) {
wrmsrl(MSR_GS_BASE, pda);
printk("debug handler: wrong gs %lx expected %p\n", gs, pda);
}
}
#endif
get_debugreg(condition, 6);
if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
@ -888,6 +848,10 @@ asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
{
}
asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
{
}
/*
* 'math_state_restore()' saves the current math information in the
* old math state array, and gets the new ones from the current task

View File

@ -50,7 +50,7 @@ SECTIONS
*(.bss.page_aligned)
*(.bss)
}
__bss_end = .;
__bss_stop = .;
. = ALIGN(PAGE_SIZE);
. = ALIGN(CONFIG_X86_L1_CACHE_BYTES);

View File

@ -203,3 +203,6 @@ EXPORT_SYMBOL(flush_tlb_page);
#endif
EXPORT_SYMBOL(cpu_khz);
EXPORT_SYMBOL(load_gs_index);

Some files were not shown because too many files have changed in this diff Show More