mirror of
https://github.com/torvalds/linux.git
synced 2024-11-21 19:41:42 +00:00
LoongArch fixes for v6.12-final
-----BEGIN PGP SIGNATURE----- iQJKBAABCAA0FiEEzOlt8mkP+tbeiYy5AoYrw/LiJnoFAmczFHIWHGNoZW5odWFj YWlAa2VybmVsLm9yZwAKCRAChivD8uImenR2D/4uxRWT46702vWN6V4miQwMm3vc OYjR8VZewj2qa1hPhxn42IyPnqeZMBkDVDgSSNAUHqh0Csp4uLjgaWVakxabwn6o 8ZgcZ4T78ln7qIvGkJ2J+JXTS9iiK7k4aQRaBwlor8is/HIUKCcYryl/oPbgq0Uh gb9aSrVuUyctLrRTzXKQdJZnP3+okHW9Ql2HBKtd0nKUxBHFkgDenhi6urAQU91+ jUoDVr11I47JHOMRu8fxHEhUepX7TmHJDGRDPPFcFCkGfcLOiDXZ8E/Cdguw5g9R 7sK6OuOlT17QRTiqDnk8fq/AN23IXcj0JYFzCn17xHd2lNSx2zW+aUu2Up/gu1aR wGHyO8fmeZ8FSpPgUOL8ark3Mf/1yllXWgw9EOP9HmTzKNVSct54B0UItORDzYNW xNJ17OoFToQZfI7eXLbMxyRNbiJJu/ZueYbZarnCu5jiFBdfRtPmbvd+BUOujg0R cTd5swrQcmwWuFiG+4w4Hnqt14BvcG2kELYJLYnFmVDjXlUp/PZ2gVI/j7l2KcEY 96YFuwTTXAJUG0cKzpA6uGNgCTeaJTFQx/pRrUOP2OjLunvlEq6n8K3fui4KDSwl s1eIRCdrVvz5vFTXT53RT0vE1jHSy2MsbcIoJwiN21G0EM1QKsGOu9esWdBx69+i WkHDCnc+aVqGrUZALw== =kp9x -----END PGP SIGNATURE----- Merge tag 'loongarch-fixes-6.12-2' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson Pull LoongArch fixes from Huacai Chen: - fix possible CPUs setup logical-physical CPU mapping, in order to avoid CPU hotplug issue - fix some KASAN bugs - fix AP booting issue in VM mode - some trivial cleanups * tag 'loongarch-fixes-6.12-2' of git://git.kernel.org/pub/scm/linux/kernel/git/chenhuacai/linux-loongson: LoongArch: Fix AP booting issue in VM mode LoongArch: Add WriteCombine shadow mapping in KASAN LoongArch: Disable KASAN if PGDIR_SIZE is too large for cpu_vabits LoongArch: Make KASAN work with 5-level page-tables LoongArch: Define a default value for VM_DATA_DEFAULT_FLAGS LoongArch: Fix early_numa_add_cpu() usage for FDT systems LoongArch: For all possible CPUs setup logical-physical CPU mapping
This commit is contained in:
commit
c5f4045118
@ -25,6 +25,7 @@
|
||||
/* 64-bit segment value. */
|
||||
#define XKPRANGE_UC_SEG (0x8000)
|
||||
#define XKPRANGE_CC_SEG (0x9000)
|
||||
#define XKPRANGE_WC_SEG (0xa000)
|
||||
#define XKVRANGE_VC_SEG (0xffff)
|
||||
|
||||
/* Cached */
|
||||
@ -41,20 +42,28 @@
|
||||
#define XKPRANGE_UC_SHADOW_SIZE (XKPRANGE_UC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
|
||||
#define XKPRANGE_UC_SHADOW_END (XKPRANGE_UC_KASAN_OFFSET + XKPRANGE_UC_SHADOW_SIZE)
|
||||
|
||||
/* WriteCombine */
|
||||
#define XKPRANGE_WC_START WRITECOMBINE_BASE
|
||||
#define XKPRANGE_WC_SIZE XRANGE_SIZE
|
||||
#define XKPRANGE_WC_KASAN_OFFSET XKPRANGE_UC_SHADOW_END
|
||||
#define XKPRANGE_WC_SHADOW_SIZE (XKPRANGE_WC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
|
||||
#define XKPRANGE_WC_SHADOW_END (XKPRANGE_WC_KASAN_OFFSET + XKPRANGE_WC_SHADOW_SIZE)
|
||||
|
||||
/* VMALLOC (Cached or UnCached) */
|
||||
#define XKVRANGE_VC_START MODULES_VADDR
|
||||
#define XKVRANGE_VC_SIZE round_up(KFENCE_AREA_END - MODULES_VADDR + 1, PGDIR_SIZE)
|
||||
#define XKVRANGE_VC_KASAN_OFFSET XKPRANGE_UC_SHADOW_END
|
||||
#define XKVRANGE_VC_KASAN_OFFSET XKPRANGE_WC_SHADOW_END
|
||||
#define XKVRANGE_VC_SHADOW_SIZE (XKVRANGE_VC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
|
||||
#define XKVRANGE_VC_SHADOW_END (XKVRANGE_VC_KASAN_OFFSET + XKVRANGE_VC_SHADOW_SIZE)
|
||||
|
||||
/* KAsan shadow memory start right after vmalloc. */
|
||||
#define KASAN_SHADOW_START round_up(KFENCE_AREA_END, PGDIR_SIZE)
|
||||
#define KASAN_SHADOW_SIZE (XKVRANGE_VC_SHADOW_END - XKPRANGE_CC_KASAN_OFFSET)
|
||||
#define KASAN_SHADOW_END round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE)
|
||||
#define KASAN_SHADOW_END (round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE) - 1)
|
||||
|
||||
#define XKPRANGE_CC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_CC_KASAN_OFFSET)
|
||||
#define XKPRANGE_UC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_UC_KASAN_OFFSET)
|
||||
#define XKPRANGE_WC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_WC_KASAN_OFFSET)
|
||||
#define XKVRANGE_VC_SHADOW_OFFSET (KASAN_SHADOW_START + XKVRANGE_VC_KASAN_OFFSET)
|
||||
|
||||
extern bool kasan_early_stage;
|
||||
|
@ -113,10 +113,7 @@ struct page *tlb_virt_to_page(unsigned long kaddr);
|
||||
extern int __virt_addr_valid(volatile void *kaddr);
|
||||
#define virt_addr_valid(kaddr) __virt_addr_valid((volatile void *)(kaddr))
|
||||
|
||||
#define VM_DATA_DEFAULT_FLAGS \
|
||||
(VM_READ | VM_WRITE | \
|
||||
((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
|
||||
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
|
||||
#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
|
||||
|
||||
#include <asm-generic/memory_model.h>
|
||||
#include <asm-generic/getorder.h>
|
||||
|
@ -58,48 +58,48 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
|
||||
return ioremap_cache(phys, size);
|
||||
}
|
||||
|
||||
static int cpu_enumerated = 0;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int set_processor_mask(u32 id, u32 flags)
|
||||
static int set_processor_mask(u32 id, u32 pass)
|
||||
{
|
||||
int nr_cpus;
|
||||
int cpu, cpuid = id;
|
||||
int cpu = -1, cpuid = id;
|
||||
|
||||
if (!cpu_enumerated)
|
||||
nr_cpus = NR_CPUS;
|
||||
else
|
||||
nr_cpus = nr_cpu_ids;
|
||||
|
||||
if (num_processors >= nr_cpus) {
|
||||
if (num_processors >= NR_CPUS) {
|
||||
pr_warn(PREFIX "nr_cpus limit of %i reached."
|
||||
" processor 0x%x ignored.\n", nr_cpus, cpuid);
|
||||
" processor 0x%x ignored.\n", NR_CPUS, cpuid);
|
||||
|
||||
return -ENODEV;
|
||||
|
||||
}
|
||||
|
||||
if (cpuid == loongson_sysconf.boot_cpu_id)
|
||||
cpu = 0;
|
||||
else
|
||||
cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
|
||||
|
||||
if (!cpu_enumerated)
|
||||
set_cpu_possible(cpu, true);
|
||||
|
||||
if (flags & ACPI_MADT_ENABLED) {
|
||||
switch (pass) {
|
||||
case 1: /* Pass 1 handle enabled processors */
|
||||
if (cpu < 0)
|
||||
cpu = find_first_zero_bit(cpumask_bits(cpu_present_mask), NR_CPUS);
|
||||
num_processors++;
|
||||
set_cpu_present(cpu, true);
|
||||
__cpu_number_map[cpuid] = cpu;
|
||||
__cpu_logical_map[cpu] = cpuid;
|
||||
} else
|
||||
break;
|
||||
case 2: /* Pass 2 handle disabled processors */
|
||||
if (cpu < 0)
|
||||
cpu = find_first_zero_bit(cpumask_bits(cpu_possible_mask), NR_CPUS);
|
||||
disabled_cpus++;
|
||||
break;
|
||||
default:
|
||||
return cpu;
|
||||
}
|
||||
|
||||
set_cpu_possible(cpu, true);
|
||||
__cpu_number_map[cpuid] = cpu;
|
||||
__cpu_logical_map[cpu] = cpuid;
|
||||
|
||||
return cpu;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int __init
|
||||
acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long end)
|
||||
acpi_parse_p1_processor(union acpi_subtable_headers *header, const unsigned long end)
|
||||
{
|
||||
struct acpi_madt_core_pic *processor = NULL;
|
||||
|
||||
@ -110,12 +110,29 @@ acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long en
|
||||
acpi_table_print_madt_entry(&header->common);
|
||||
#ifdef CONFIG_SMP
|
||||
acpi_core_pic[processor->core_id] = *processor;
|
||||
set_processor_mask(processor->core_id, processor->flags);
|
||||
if (processor->flags & ACPI_MADT_ENABLED)
|
||||
set_processor_mask(processor->core_id, 1);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init
|
||||
acpi_parse_p2_processor(union acpi_subtable_headers *header, const unsigned long end)
|
||||
{
|
||||
struct acpi_madt_core_pic *processor = NULL;
|
||||
|
||||
processor = (struct acpi_madt_core_pic *)header;
|
||||
if (BAD_MADT_ENTRY(processor, end))
|
||||
return -EINVAL;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (!(processor->flags & ACPI_MADT_ENABLED))
|
||||
set_processor_mask(processor->core_id, 2);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
static int __init
|
||||
acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long end)
|
||||
{
|
||||
@ -143,12 +160,14 @@ static void __init acpi_process_madt(void)
|
||||
}
|
||||
#endif
|
||||
acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
|
||||
acpi_parse_processor, MAX_CORE_PIC);
|
||||
acpi_parse_p1_processor, MAX_CORE_PIC);
|
||||
|
||||
acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
|
||||
acpi_parse_p2_processor, MAX_CORE_PIC);
|
||||
|
||||
acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
|
||||
acpi_parse_eio_master, MAX_IO_PICS);
|
||||
|
||||
cpu_enumerated = 1;
|
||||
loongson_sysconf.nr_cpus = num_processors;
|
||||
}
|
||||
|
||||
@ -310,6 +329,10 @@ static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
|
||||
int nid;
|
||||
|
||||
nid = acpi_get_node(handle);
|
||||
|
||||
if (nid != NUMA_NO_NODE)
|
||||
nid = early_cpu_to_node(cpu);
|
||||
|
||||
if (nid != NUMA_NO_NODE) {
|
||||
set_cpuid_to_node(physid, nid);
|
||||
node_set(nid, numa_nodes_parsed);
|
||||
@ -324,12 +347,14 @@ int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu
|
||||
{
|
||||
int cpu;
|
||||
|
||||
cpu = set_processor_mask(physid, ACPI_MADT_ENABLED);
|
||||
if (cpu < 0) {
|
||||
cpu = cpu_number_map(physid);
|
||||
if (cpu < 0 || cpu >= nr_cpu_ids) {
|
||||
pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
|
||||
return cpu;
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
num_processors++;
|
||||
set_cpu_present(cpu, true);
|
||||
acpi_map_cpu2node(handle, cpu, physid);
|
||||
|
||||
*pcpu = cpu;
|
||||
|
@ -51,11 +51,18 @@ static u64 paravt_steal_clock(int cpu)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static struct smp_ops native_ops;
|
||||
|
||||
static void pv_send_ipi_single(int cpu, unsigned int action)
|
||||
{
|
||||
int min, old;
|
||||
irq_cpustat_t *info = &per_cpu(irq_stat, cpu);
|
||||
|
||||
if (unlikely(action == ACTION_BOOT_CPU)) {
|
||||
native_ops.send_ipi_single(cpu, action);
|
||||
return;
|
||||
}
|
||||
|
||||
old = atomic_fetch_or(BIT(action), &info->message);
|
||||
if (old)
|
||||
return;
|
||||
@ -75,6 +82,11 @@ static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action)
|
||||
if (cpumask_empty(mask))
|
||||
return;
|
||||
|
||||
if (unlikely(action == ACTION_BOOT_CPU)) {
|
||||
native_ops.send_ipi_mask(mask, action);
|
||||
return;
|
||||
}
|
||||
|
||||
action = BIT(action);
|
||||
for_each_cpu(i, mask) {
|
||||
info = &per_cpu(irq_stat, i);
|
||||
@ -147,6 +159,8 @@ static void pv_init_ipi(void)
|
||||
{
|
||||
int r, swi;
|
||||
|
||||
/* Init native ipi irq for ACTION_BOOT_CPU */
|
||||
native_ops.init_ipi();
|
||||
swi = get_percpu_irq(INT_SWI0);
|
||||
if (swi < 0)
|
||||
panic("SWI0 IRQ mapping failed\n");
|
||||
@ -193,6 +207,7 @@ int __init pv_ipi_init(void)
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
native_ops = mp_ops;
|
||||
mp_ops.init_ipi = pv_init_ipi;
|
||||
mp_ops.send_ipi_single = pv_send_ipi_single;
|
||||
mp_ops.send_ipi_mask = pv_send_ipi_mask;
|
||||
|
@ -302,7 +302,7 @@ static void __init fdt_smp_setup(void)
|
||||
__cpu_number_map[cpuid] = cpu;
|
||||
__cpu_logical_map[cpu] = cpuid;
|
||||
|
||||
early_numa_add_cpu(cpu, 0);
|
||||
early_numa_add_cpu(cpuid, 0);
|
||||
set_cpuid_to_node(cpuid, 0);
|
||||
}
|
||||
|
||||
@ -331,11 +331,11 @@ void __init loongson_prepare_cpus(unsigned int max_cpus)
|
||||
int i = 0;
|
||||
|
||||
parse_acpi_topology();
|
||||
cpu_data[0].global_id = cpu_logical_map(0);
|
||||
|
||||
for (i = 0; i < loongson_sysconf.nr_cpus; i++) {
|
||||
set_cpu_present(i, true);
|
||||
csr_mail_send(0, __cpu_logical_map[i], 0);
|
||||
cpu_data[i].global_id = __cpu_logical_map[i];
|
||||
}
|
||||
|
||||
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
|
||||
@ -380,6 +380,7 @@ void loongson_init_secondary(void)
|
||||
cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
|
||||
cpu_data[cpu].core = pptt_enabled ? cpu_data[cpu].core :
|
||||
cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
|
||||
cpu_data[cpu].global_id = cpu_logical_map(cpu);
|
||||
}
|
||||
|
||||
void loongson_smp_finish(void)
|
||||
|
@ -13,6 +13,13 @@
|
||||
|
||||
static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
|
||||
|
||||
#ifdef __PAGETABLE_P4D_FOLDED
|
||||
#define __pgd_none(early, pgd) (0)
|
||||
#else
|
||||
#define __pgd_none(early, pgd) (early ? (pgd_val(pgd) == 0) : \
|
||||
(__pa(pgd_val(pgd)) == (unsigned long)__pa(kasan_early_shadow_p4d)))
|
||||
#endif
|
||||
|
||||
#ifdef __PAGETABLE_PUD_FOLDED
|
||||
#define __p4d_none(early, p4d) (0)
|
||||
#else
|
||||
@ -55,6 +62,9 @@ void *kasan_mem_to_shadow(const void *addr)
|
||||
case XKPRANGE_UC_SEG:
|
||||
offset = XKPRANGE_UC_SHADOW_OFFSET;
|
||||
break;
|
||||
case XKPRANGE_WC_SEG:
|
||||
offset = XKPRANGE_WC_SHADOW_OFFSET;
|
||||
break;
|
||||
case XKVRANGE_VC_SEG:
|
||||
offset = XKVRANGE_VC_SHADOW_OFFSET;
|
||||
break;
|
||||
@ -79,6 +89,8 @@ const void *kasan_shadow_to_mem(const void *shadow_addr)
|
||||
|
||||
if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
|
||||
return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
|
||||
else if (addr >= XKPRANGE_WC_SHADOW_OFFSET)
|
||||
return (void *)(((addr - XKPRANGE_WC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_WC_START);
|
||||
else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
|
||||
return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
|
||||
else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
|
||||
@ -142,6 +154,19 @@ static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
|
||||
return pud_offset(p4dp, addr);
|
||||
}
|
||||
|
||||
static p4d_t *__init kasan_p4d_offset(pgd_t *pgdp, unsigned long addr, int node, bool early)
|
||||
{
|
||||
if (__pgd_none(early, pgdp_get(pgdp))) {
|
||||
phys_addr_t p4d_phys = early ?
|
||||
__pa_symbol(kasan_early_shadow_p4d) : kasan_alloc_zeroed_page(node);
|
||||
if (!early)
|
||||
memcpy(__va(p4d_phys), kasan_early_shadow_p4d, sizeof(kasan_early_shadow_p4d));
|
||||
pgd_populate(&init_mm, pgdp, (p4d_t *)__va(p4d_phys));
|
||||
}
|
||||
|
||||
return p4d_offset(pgdp, addr);
|
||||
}
|
||||
|
||||
static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
|
||||
unsigned long end, int node, bool early)
|
||||
{
|
||||
@ -178,19 +203,19 @@ static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
|
||||
do {
|
||||
next = pud_addr_end(addr, end);
|
||||
kasan_pmd_populate(pudp, addr, next, node, early);
|
||||
} while (pudp++, addr = next, addr != end);
|
||||
} while (pudp++, addr = next, addr != end && __pud_none(early, READ_ONCE(*pudp)));
|
||||
}
|
||||
|
||||
static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
|
||||
unsigned long end, int node, bool early)
|
||||
{
|
||||
unsigned long next;
|
||||
p4d_t *p4dp = p4d_offset(pgdp, addr);
|
||||
p4d_t *p4dp = kasan_p4d_offset(pgdp, addr, node, early);
|
||||
|
||||
do {
|
||||
next = p4d_addr_end(addr, end);
|
||||
kasan_pud_populate(p4dp, addr, next, node, early);
|
||||
} while (p4dp++, addr = next, addr != end);
|
||||
} while (p4dp++, addr = next, addr != end && __p4d_none(early, READ_ONCE(*p4dp)));
|
||||
}
|
||||
|
||||
static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
|
||||
@ -218,7 +243,7 @@ static void __init kasan_map_populate(unsigned long start, unsigned long end,
|
||||
asmlinkage void __init kasan_early_init(void)
|
||||
{
|
||||
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
|
||||
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
|
||||
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END + 1, PGDIR_SIZE));
|
||||
}
|
||||
|
||||
static inline void kasan_set_pgd(pgd_t *pgdp, pgd_t pgdval)
|
||||
@ -233,7 +258,7 @@ static void __init clear_pgds(unsigned long start, unsigned long end)
|
||||
* swapper_pg_dir. pgd_clear() can't be used
|
||||
* here because it's nop on 2,3-level pagetable setups
|
||||
*/
|
||||
for (; start < end; start += PGDIR_SIZE)
|
||||
for (; start < end; start = pgd_addr_end(start, end))
|
||||
kasan_set_pgd((pgd_t *)pgd_offset_k(start), __pgd(0));
|
||||
}
|
||||
|
||||
@ -242,6 +267,17 @@ void __init kasan_init(void)
|
||||
u64 i;
|
||||
phys_addr_t pa_start, pa_end;
|
||||
|
||||
/*
|
||||
* If PGDIR_SIZE is too large for cpu_vabits, KASAN_SHADOW_END will
|
||||
* overflow UINTPTR_MAX and then looks like a user space address.
|
||||
* For example, PGDIR_SIZE of CONFIG_4KB_4LEVEL is 2^39, which is too
|
||||
* large for Loongson-2K series whose cpu_vabits = 39.
|
||||
*/
|
||||
if (KASAN_SHADOW_END < vm_map_base) {
|
||||
pr_warn("PGDIR_SIZE too large for cpu_vabits, KernelAddressSanitizer disabled.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* PGD was populated as invalid_pmd_table or invalid_pud_table
|
||||
* in pagetable_init() which depends on how many levels of page
|
||||
|
Loading…
Reference in New Issue
Block a user