forked from Minki/linux
Merge branch 'x86/urgent' into x86/asm, to fix semantic conflict
'cpu_has_pse' has changed to boot_cpu_has(X86_FEATURE_PSE), fix this
up in the merge commit when merging the x86/urgent tree that includes
the following commit:
103f6112f2
("x86/mm/xen: Suppress hugetlbfs in PV guests")
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
b2eafe890d
@ -19,7 +19,7 @@ ffffff0000000000 - ffffff7fffffffff (=39 bits) %esp fixup stacks
|
||||
ffffffef00000000 - ffffffff00000000 (=64 GB) EFI region mapping space
|
||||
... unused hole ...
|
||||
ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0
|
||||
ffffffffa0000000 - ffffffffff5fffff (=1525 MB) module mapping space
|
||||
ffffffffa0000000 - ffffffffff5fffff (=1526 MB) module mapping space
|
||||
ffffffffff600000 - ffffffffffdfffff (=8 MB) vsyscalls
|
||||
ffffffffffe00000 - ffffffffffffffff (=2 MB) unused hole
|
||||
|
||||
@ -31,8 +31,8 @@ vmalloc space is lazily synchronized into the different PML4 pages of
|
||||
the processes using the page fault handler, with init_level4_pgt as
|
||||
reference.
|
||||
|
||||
Current X86-64 implementations only support 40 bits of address space,
|
||||
but we support up to 46 bits. This expands into MBZ space in the page tables.
|
||||
Current X86-64 implementations support up to 46 bits of address space (64 TB),
|
||||
which is our current limit. This expands into MBZ space in the page tables.
|
||||
|
||||
We map EFI runtime services in the 'efi_pgd' PGD in a 64Gb large virtual
|
||||
memory window (this size is arbitrary, it can be raised later if needed).
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <asm/page.h>
|
||||
#include <asm-generic/hugetlb.h>
|
||||
|
||||
#define hugepages_supported() boot_cpu_has(X86_FEATURE_PSE)
|
||||
|
||||
static inline int is_hugepage_only_range(struct mm_struct *mm,
|
||||
unsigned long addr,
|
||||
|
@ -152,6 +152,11 @@ static struct clocksource hyperv_cs = {
|
||||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
static unsigned char hv_get_nmi_reason(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init ms_hyperv_init_platform(void)
|
||||
{
|
||||
/*
|
||||
@ -191,6 +196,13 @@ static void __init ms_hyperv_init_platform(void)
|
||||
machine_ops.crash_shutdown = hv_machine_crash_shutdown;
|
||||
#endif
|
||||
mark_tsc_unstable("running on Hyper-V");
|
||||
|
||||
/*
|
||||
* Generation 2 instances don't support reading the NMI status from
|
||||
* 0x61 port.
|
||||
*/
|
||||
if (efi_enabled(EFI_BOOT))
|
||||
x86_platform.get_nmi_reason = hv_get_nmi_reason;
|
||||
}
|
||||
|
||||
const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
|
||||
|
@ -362,6 +362,7 @@ struct sbridge_pvt {
|
||||
|
||||
/* Memory type detection */
|
||||
bool is_mirrored, is_lockstep, is_close_pg;
|
||||
bool is_chan_hash;
|
||||
|
||||
/* Fifo double buffers */
|
||||
struct mce mce_entry[MCE_LOG_LEN];
|
||||
@ -1060,6 +1061,20 @@ static inline u8 sad_pkg_ha(u8 pkg)
|
||||
return (pkg >> 2) & 0x1;
|
||||
}
|
||||
|
||||
static int haswell_chan_hash(int idx, u64 addr)
|
||||
{
|
||||
int i;
|
||||
|
||||
/*
|
||||
* XOR even bits from 12:26 to bit0 of idx,
|
||||
* odd bits from 13:27 to bit1
|
||||
*/
|
||||
for (i = 12; i < 28; i += 2)
|
||||
idx ^= (addr >> i) & 3;
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
Memory check routines
|
||||
****************************************************************************/
|
||||
@ -1616,6 +1631,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
|
||||
KNL_MAX_CHANNELS : NUM_CHANNELS;
|
||||
u64 knl_mc_sizes[KNL_MAX_CHANNELS];
|
||||
|
||||
if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
|
||||
pci_read_config_dword(pvt->pci_ha0, HASWELL_HASYSDEFEATURE2, ®);
|
||||
pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
|
||||
}
|
||||
if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
|
||||
pvt->info.type == KNIGHTS_LANDING)
|
||||
pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, ®);
|
||||
@ -2118,12 +2137,15 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
||||
}
|
||||
|
||||
ch_way = TAD_CH(reg) + 1;
|
||||
sck_way = 1 << TAD_SOCK(reg);
|
||||
sck_way = TAD_SOCK(reg);
|
||||
|
||||
if (ch_way == 3)
|
||||
idx = addr >> 6;
|
||||
else
|
||||
else {
|
||||
idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
|
||||
if (pvt->is_chan_hash)
|
||||
idx = haswell_chan_hash(idx, addr);
|
||||
}
|
||||
idx = idx % ch_way;
|
||||
|
||||
/*
|
||||
@ -2157,7 +2179,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
||||
switch(ch_way) {
|
||||
case 2:
|
||||
case 4:
|
||||
sck_xch = 1 << sck_way * (ch_way >> 1);
|
||||
sck_xch = (1 << sck_way) * (ch_way >> 1);
|
||||
break;
|
||||
default:
|
||||
sprintf(msg, "Invalid mirror set. Can't decode addr");
|
||||
@ -2193,7 +2215,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
|
||||
|
||||
ch_addr = addr - offset;
|
||||
ch_addr >>= (6 + shiftup);
|
||||
ch_addr /= ch_way * sck_way;
|
||||
ch_addr /= sck_xch;
|
||||
ch_addr <<= (6 + shiftup);
|
||||
ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user