mirror of
https://github.com/torvalds/linux.git
synced 2024-10-27 07:16:27 +00:00
Merge branches 'x86-efi-for-linus', 'x86-gart-for-linus', 'x86-irq-for-linus' and 'x86-mce-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-efi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, efi: Ensure that the entirity of a region is mapped x86, efi: Pass a minimal map to SetVirtualAddressMap() x86, efi: Merge contiguous memory regions of the same type and attribute x86, efi: Consolidate EFI nx control x86, efi: Remove virtual-mode SetVirtualAddressMap call * 'x86-gart-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, gart: Don't enforce GART aperture lower-bound by alignment * 'x86-irq-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86: Don't unmask disabled irqs when migrating them x86: Skip migrating IRQF_PER_CPU irqs in fixup_irqs() * 'x86-mce-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: x86, mce: Drop the default decoding notifier x86, MCE: Do not taint when handling correctable errors
This commit is contained in:
commit
ac2941f59a
|
@ -90,6 +90,7 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
|
||||||
#endif /* CONFIG_X86_32 */
|
#endif /* CONFIG_X86_32 */
|
||||||
|
|
||||||
extern int add_efi_memmap;
|
extern int add_efi_memmap;
|
||||||
|
extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
|
||||||
extern void efi_memblock_x86_reserve_range(void);
|
extern void efi_memblock_x86_reserve_range(void);
|
||||||
extern void efi_call_phys_prelog(void);
|
extern void efi_call_phys_prelog(void);
|
||||||
extern void efi_call_phys_epilog(void);
|
extern void efi_call_phys_epilog(void);
|
||||||
|
|
|
@ -142,8 +142,6 @@ static inline void winchip_mcheck_init(struct cpuinfo_x86 *c) {}
|
||||||
static inline void enable_p5_mce(void) {}
|
static inline void enable_p5_mce(void) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern void (*x86_mce_decode_callback)(struct mce *m);
|
|
||||||
|
|
||||||
void mce_setup(struct mce *m);
|
void mce_setup(struct mce *m);
|
||||||
void mce_log(struct mce *m);
|
void mce_log(struct mce *m);
|
||||||
DECLARE_PER_CPU(struct sys_device, mce_dev);
|
DECLARE_PER_CPU(struct sys_device, mce_dev);
|
||||||
|
|
|
@ -30,6 +30,22 @@
|
||||||
#include <asm/amd_nb.h>
|
#include <asm/amd_nb.h>
|
||||||
#include <asm/x86_init.h>
|
#include <asm/x86_init.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Using 512M as goal, in case kexec will load kernel_big
|
||||||
|
* that will do the on-position decompress, and could overlap with
|
||||||
|
* with the gart aperture that is used.
|
||||||
|
* Sequence:
|
||||||
|
* kernel_small
|
||||||
|
* ==> kexec (with kdump trigger path or gart still enabled)
|
||||||
|
* ==> kernel_small (gart area become e820_reserved)
|
||||||
|
* ==> kexec (with kdump trigger path or gart still enabled)
|
||||||
|
* ==> kerne_big (uncompressed size will be big than 64M or 128M)
|
||||||
|
* So don't use 512M below as gart iommu, leave the space for kernel
|
||||||
|
* code for safe.
|
||||||
|
*/
|
||||||
|
#define GART_MIN_ADDR (512ULL << 20)
|
||||||
|
#define GART_MAX_ADDR (1ULL << 32)
|
||||||
|
|
||||||
int gart_iommu_aperture;
|
int gart_iommu_aperture;
|
||||||
int gart_iommu_aperture_disabled __initdata;
|
int gart_iommu_aperture_disabled __initdata;
|
||||||
int gart_iommu_aperture_allowed __initdata;
|
int gart_iommu_aperture_allowed __initdata;
|
||||||
|
@ -70,21 +86,9 @@ static u32 __init allocate_aperture(void)
|
||||||
* memory. Unfortunately we cannot move it up because that would
|
* memory. Unfortunately we cannot move it up because that would
|
||||||
* make the IOMMU useless.
|
* make the IOMMU useless.
|
||||||
*/
|
*/
|
||||||
/*
|
addr = memblock_find_in_range(GART_MIN_ADDR, GART_MAX_ADDR,
|
||||||
* using 512M as goal, in case kexec will load kernel_big
|
aper_size, aper_size);
|
||||||
* that will do the on position decompress, and could overlap with
|
if (addr == MEMBLOCK_ERROR || addr + aper_size > GART_MAX_ADDR) {
|
||||||
* that position with gart that is used.
|
|
||||||
* sequende:
|
|
||||||
* kernel_small
|
|
||||||
* ==> kexec (with kdump trigger path or previous doesn't shutdown gart)
|
|
||||||
* ==> kernel_small(gart area become e820_reserved)
|
|
||||||
* ==> kexec (with kdump trigger path or previous doesn't shutdown gart)
|
|
||||||
* ==> kerne_big (uncompressed size will be big than 64M or 128M)
|
|
||||||
* so don't use 512M below as gart iommu, leave the space for kernel
|
|
||||||
* code for safe
|
|
||||||
*/
|
|
||||||
addr = memblock_find_in_range(0, 1ULL<<32, aper_size, 512ULL<<20);
|
|
||||||
if (addr == MEMBLOCK_ERROR || addr + aper_size > 0xffffffff) {
|
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR
|
||||||
"Cannot allocate aperture memory hole (%lx,%uK)\n",
|
"Cannot allocate aperture memory hole (%lx,%uK)\n",
|
||||||
addr, aper_size>>10);
|
addr, aper_size>>10);
|
||||||
|
|
|
@ -105,20 +105,6 @@ static int cpu_missing;
|
||||||
ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
|
ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
|
||||||
EXPORT_SYMBOL_GPL(x86_mce_decoder_chain);
|
EXPORT_SYMBOL_GPL(x86_mce_decoder_chain);
|
||||||
|
|
||||||
static int default_decode_mce(struct notifier_block *nb, unsigned long val,
|
|
||||||
void *data)
|
|
||||||
{
|
|
||||||
pr_emerg(HW_ERR "No human readable MCE decoding support on this CPU type.\n");
|
|
||||||
pr_emerg(HW_ERR "Run the message through 'mcelog --ascii' to decode.\n");
|
|
||||||
|
|
||||||
return NOTIFY_STOP;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct notifier_block mce_dec_nb = {
|
|
||||||
.notifier_call = default_decode_mce,
|
|
||||||
.priority = -1,
|
|
||||||
};
|
|
||||||
|
|
||||||
/* MCA banks polled by the period polling timer for corrected events */
|
/* MCA banks polled by the period polling timer for corrected events */
|
||||||
DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
|
DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
|
||||||
[0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
|
[0 ... BITS_TO_LONGS(MAX_NR_BANKS)-1] = ~0UL
|
||||||
|
@ -212,6 +198,8 @@ void mce_log(struct mce *mce)
|
||||||
|
|
||||||
static void print_mce(struct mce *m)
|
static void print_mce(struct mce *m)
|
||||||
{
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
|
pr_emerg(HW_ERR "CPU %d: Machine Check Exception: %Lx Bank %d: %016Lx\n",
|
||||||
m->extcpu, m->mcgstatus, m->bank, m->status);
|
m->extcpu, m->mcgstatus, m->bank, m->status);
|
||||||
|
|
||||||
|
@ -239,7 +227,11 @@ static void print_mce(struct mce *m)
|
||||||
* Print out human-readable details about the MCE error,
|
* Print out human-readable details about the MCE error,
|
||||||
* (if the CPU has an implementation for that)
|
* (if the CPU has an implementation for that)
|
||||||
*/
|
*/
|
||||||
atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
|
ret = atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
|
||||||
|
if (ret == NOTIFY_STOP)
|
||||||
|
return;
|
||||||
|
|
||||||
|
pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
#define PANIC_TIMEOUT 5 /* 5 seconds */
|
#define PANIC_TIMEOUT 5 /* 5 seconds */
|
||||||
|
@ -590,7 +582,6 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
|
||||||
if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce) {
|
if (!(flags & MCP_DONTLOG) && !mce_dont_log_ce) {
|
||||||
mce_log(&m);
|
mce_log(&m);
|
||||||
atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, &m);
|
atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, &m);
|
||||||
add_taint(TAINT_MACHINE_CHECK);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1722,8 +1713,6 @@ __setup("mce", mcheck_enable);
|
||||||
|
|
||||||
int __init mcheck_init(void)
|
int __init mcheck_init(void)
|
||||||
{
|
{
|
||||||
atomic_notifier_chain_register(&x86_mce_decoder_chain, &mce_dec_nb);
|
|
||||||
|
|
||||||
mcheck_intel_therm_init();
|
mcheck_intel_therm_init();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -187,8 +187,6 @@ static int therm_throt_process(bool new_event, int event, int level)
|
||||||
this_cpu,
|
this_cpu,
|
||||||
level == CORE_LEVEL ? "Core" : "Package",
|
level == CORE_LEVEL ? "Core" : "Package",
|
||||||
state->count);
|
state->count);
|
||||||
|
|
||||||
add_taint(TAINT_MACHINE_CHECK);
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
if (old_event) {
|
if (old_event) {
|
||||||
|
@ -393,7 +391,6 @@ static void unexpected_thermal_interrupt(void)
|
||||||
{
|
{
|
||||||
printk(KERN_ERR "CPU%d: Unexpected LVT thermal interrupt!\n",
|
printk(KERN_ERR "CPU%d: Unexpected LVT thermal interrupt!\n",
|
||||||
smp_processor_id());
|
smp_processor_id());
|
||||||
add_taint(TAINT_MACHINE_CHECK);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
|
static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
|
||||||
|
|
|
@ -249,7 +249,7 @@ void fixup_irqs(void)
|
||||||
|
|
||||||
data = irq_desc_get_irq_data(desc);
|
data = irq_desc_get_irq_data(desc);
|
||||||
affinity = data->affinity;
|
affinity = data->affinity;
|
||||||
if (!irq_has_action(irq) ||
|
if (!irq_has_action(irq) || irqd_is_per_cpu(data) ||
|
||||||
cpumask_subset(affinity, cpu_online_mask)) {
|
cpumask_subset(affinity, cpu_online_mask)) {
|
||||||
raw_spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
continue;
|
continue;
|
||||||
|
@ -276,7 +276,8 @@ void fixup_irqs(void)
|
||||||
else if (!(warned++))
|
else if (!(warned++))
|
||||||
set_affinity = 0;
|
set_affinity = 0;
|
||||||
|
|
||||||
if (!irqd_can_move_in_process_context(data) && chip->irq_unmask)
|
if (!irqd_can_move_in_process_context(data) &&
|
||||||
|
!irqd_irq_disabled(data) && chip->irq_unmask)
|
||||||
chip->irq_unmask(data);
|
chip->irq_unmask(data);
|
||||||
|
|
||||||
raw_spin_unlock(&desc->lock);
|
raw_spin_unlock(&desc->lock);
|
||||||
|
|
|
@ -145,17 +145,6 @@ static void virt_efi_reset_system(int reset_type,
|
||||||
data_size, data);
|
data_size, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
static efi_status_t virt_efi_set_virtual_address_map(
|
|
||||||
unsigned long memory_map_size,
|
|
||||||
unsigned long descriptor_size,
|
|
||||||
u32 descriptor_version,
|
|
||||||
efi_memory_desc_t *virtual_map)
|
|
||||||
{
|
|
||||||
return efi_call_virt4(set_virtual_address_map,
|
|
||||||
memory_map_size, descriptor_size,
|
|
||||||
descriptor_version, virtual_map);
|
|
||||||
}
|
|
||||||
|
|
||||||
static efi_status_t __init phys_efi_set_virtual_address_map(
|
static efi_status_t __init phys_efi_set_virtual_address_map(
|
||||||
unsigned long memory_map_size,
|
unsigned long memory_map_size,
|
||||||
unsigned long descriptor_size,
|
unsigned long descriptor_size,
|
||||||
|
@ -468,11 +457,25 @@ void __init efi_init(void)
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
|
||||||
|
{
|
||||||
|
u64 addr, npages;
|
||||||
|
|
||||||
|
addr = md->virt_addr;
|
||||||
|
npages = md->num_pages;
|
||||||
|
|
||||||
|
memrange_efi_to_native(&addr, &npages);
|
||||||
|
|
||||||
|
if (executable)
|
||||||
|
set_memory_x(addr, npages);
|
||||||
|
else
|
||||||
|
set_memory_nx(addr, npages);
|
||||||
|
}
|
||||||
|
|
||||||
static void __init runtime_code_page_mkexec(void)
|
static void __init runtime_code_page_mkexec(void)
|
||||||
{
|
{
|
||||||
efi_memory_desc_t *md;
|
efi_memory_desc_t *md;
|
||||||
void *p;
|
void *p;
|
||||||
u64 addr, npages;
|
|
||||||
|
|
||||||
/* Make EFI runtime service code area executable */
|
/* Make EFI runtime service code area executable */
|
||||||
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
||||||
|
@ -481,10 +484,7 @@ static void __init runtime_code_page_mkexec(void)
|
||||||
if (md->type != EFI_RUNTIME_SERVICES_CODE)
|
if (md->type != EFI_RUNTIME_SERVICES_CODE)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
addr = md->virt_addr;
|
efi_set_executable(md, true);
|
||||||
npages = md->num_pages;
|
|
||||||
memrange_efi_to_native(&addr, &npages);
|
|
||||||
set_memory_x(addr, npages);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -498,13 +498,42 @@ static void __init runtime_code_page_mkexec(void)
|
||||||
*/
|
*/
|
||||||
void __init efi_enter_virtual_mode(void)
|
void __init efi_enter_virtual_mode(void)
|
||||||
{
|
{
|
||||||
efi_memory_desc_t *md;
|
efi_memory_desc_t *md, *prev_md = NULL;
|
||||||
efi_status_t status;
|
efi_status_t status;
|
||||||
unsigned long size;
|
unsigned long size;
|
||||||
u64 end, systab, addr, npages, end_pfn;
|
u64 end, systab, addr, npages, end_pfn;
|
||||||
void *p, *va;
|
void *p, *va, *new_memmap = NULL;
|
||||||
|
int count = 0;
|
||||||
|
|
||||||
efi.systab = NULL;
|
efi.systab = NULL;
|
||||||
|
|
||||||
|
/* Merge contiguous regions of the same type and attribute */
|
||||||
|
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
||||||
|
u64 prev_size;
|
||||||
|
md = p;
|
||||||
|
|
||||||
|
if (!prev_md) {
|
||||||
|
prev_md = md;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (prev_md->type != md->type ||
|
||||||
|
prev_md->attribute != md->attribute) {
|
||||||
|
prev_md = md;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
prev_size = prev_md->num_pages << EFI_PAGE_SHIFT;
|
||||||
|
|
||||||
|
if (md->phys_addr == (prev_md->phys_addr + prev_size)) {
|
||||||
|
prev_md->num_pages += md->num_pages;
|
||||||
|
md->type = EFI_RESERVED_TYPE;
|
||||||
|
md->attribute = 0;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
prev_md = md;
|
||||||
|
}
|
||||||
|
|
||||||
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
||||||
md = p;
|
md = p;
|
||||||
if (!(md->attribute & EFI_MEMORY_RUNTIME))
|
if (!(md->attribute & EFI_MEMORY_RUNTIME))
|
||||||
|
@ -541,15 +570,21 @@ void __init efi_enter_virtual_mode(void)
|
||||||
systab += md->virt_addr - md->phys_addr;
|
systab += md->virt_addr - md->phys_addr;
|
||||||
efi.systab = (efi_system_table_t *) (unsigned long) systab;
|
efi.systab = (efi_system_table_t *) (unsigned long) systab;
|
||||||
}
|
}
|
||||||
|
new_memmap = krealloc(new_memmap,
|
||||||
|
(count + 1) * memmap.desc_size,
|
||||||
|
GFP_KERNEL);
|
||||||
|
memcpy(new_memmap + (count * memmap.desc_size), md,
|
||||||
|
memmap.desc_size);
|
||||||
|
count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
BUG_ON(!efi.systab);
|
BUG_ON(!efi.systab);
|
||||||
|
|
||||||
status = phys_efi_set_virtual_address_map(
|
status = phys_efi_set_virtual_address_map(
|
||||||
memmap.desc_size * memmap.nr_map,
|
memmap.desc_size * count,
|
||||||
memmap.desc_size,
|
memmap.desc_size,
|
||||||
memmap.desc_version,
|
memmap.desc_version,
|
||||||
memmap.phys_map);
|
(efi_memory_desc_t *)__pa(new_memmap));
|
||||||
|
|
||||||
if (status != EFI_SUCCESS) {
|
if (status != EFI_SUCCESS) {
|
||||||
printk(KERN_ALERT "Unable to switch EFI into virtual mode "
|
printk(KERN_ALERT "Unable to switch EFI into virtual mode "
|
||||||
|
@ -572,11 +607,12 @@ void __init efi_enter_virtual_mode(void)
|
||||||
efi.set_variable = virt_efi_set_variable;
|
efi.set_variable = virt_efi_set_variable;
|
||||||
efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
|
efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
|
||||||
efi.reset_system = virt_efi_reset_system;
|
efi.reset_system = virt_efi_reset_system;
|
||||||
efi.set_virtual_address_map = virt_efi_set_virtual_address_map;
|
efi.set_virtual_address_map = NULL;
|
||||||
if (__supported_pte_mask & _PAGE_NX)
|
if (__supported_pte_mask & _PAGE_NX)
|
||||||
runtime_code_page_mkexec();
|
runtime_code_page_mkexec();
|
||||||
early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
|
early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
|
||||||
memmap.map = NULL;
|
memmap.map = NULL;
|
||||||
|
kfree(new_memmap);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -41,22 +41,7 @@
|
||||||
static pgd_t save_pgd __initdata;
|
static pgd_t save_pgd __initdata;
|
||||||
static unsigned long efi_flags __initdata;
|
static unsigned long efi_flags __initdata;
|
||||||
|
|
||||||
static void __init early_mapping_set_exec(unsigned long start,
|
static void __init early_code_mapping_set_exec(int executable)
|
||||||
unsigned long end,
|
|
||||||
int executable)
|
|
||||||
{
|
|
||||||
unsigned long num_pages;
|
|
||||||
|
|
||||||
start &= PMD_MASK;
|
|
||||||
end = (end + PMD_SIZE - 1) & PMD_MASK;
|
|
||||||
num_pages = (end - start) >> PAGE_SHIFT;
|
|
||||||
if (executable)
|
|
||||||
set_memory_x((unsigned long)__va(start), num_pages);
|
|
||||||
else
|
|
||||||
set_memory_nx((unsigned long)__va(start), num_pages);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __init early_runtime_code_mapping_set_exec(int executable)
|
|
||||||
{
|
{
|
||||||
efi_memory_desc_t *md;
|
efi_memory_desc_t *md;
|
||||||
void *p;
|
void *p;
|
||||||
|
@ -67,11 +52,8 @@ static void __init early_runtime_code_mapping_set_exec(int executable)
|
||||||
/* Make EFI runtime service code area executable */
|
/* Make EFI runtime service code area executable */
|
||||||
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
|
||||||
md = p;
|
md = p;
|
||||||
if (md->type == EFI_RUNTIME_SERVICES_CODE) {
|
if (md->type == EFI_RUNTIME_SERVICES_CODE)
|
||||||
unsigned long end;
|
efi_set_executable(md, executable);
|
||||||
end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
|
|
||||||
early_mapping_set_exec(md->phys_addr, end, executable);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -79,7 +61,7 @@ void __init efi_call_phys_prelog(void)
|
||||||
{
|
{
|
||||||
unsigned long vaddress;
|
unsigned long vaddress;
|
||||||
|
|
||||||
early_runtime_code_mapping_set_exec(1);
|
early_code_mapping_set_exec(1);
|
||||||
local_irq_save(efi_flags);
|
local_irq_save(efi_flags);
|
||||||
vaddress = (unsigned long)__va(0x0UL);
|
vaddress = (unsigned long)__va(0x0UL);
|
||||||
save_pgd = *pgd_offset_k(0x0UL);
|
save_pgd = *pgd_offset_k(0x0UL);
|
||||||
|
@ -95,7 +77,7 @@ void __init efi_call_phys_epilog(void)
|
||||||
set_pgd(pgd_offset_k(0x0UL), save_pgd);
|
set_pgd(pgd_offset_k(0x0UL), save_pgd);
|
||||||
__flush_tlb_all();
|
__flush_tlb_all();
|
||||||
local_irq_restore(efi_flags);
|
local_irq_restore(efi_flags);
|
||||||
early_runtime_code_mapping_set_exec(0);
|
early_code_mapping_set_exec(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
|
void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
|
||||||
|
@ -107,8 +89,10 @@ void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
|
||||||
return ioremap(phys_addr, size);
|
return ioremap(phys_addr, size);
|
||||||
|
|
||||||
last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
|
last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
|
||||||
if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size)
|
if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
|
||||||
return NULL;
|
unsigned long top = last_map_pfn << PAGE_SHIFT;
|
||||||
|
efi_ioremap(top, size - (top - phys_addr), type);
|
||||||
|
}
|
||||||
|
|
||||||
return (void __iomem *)__va(phys_addr);
|
return (void __iomem *)__va(phys_addr);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue
Block a user