forked from Minki/linux
x86/cpufeature: Remove cpu_has_gbpages
Signed-off-by: Borislav Petkov <bp@suse.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1459266123-21878-6-git-send-email-bp@alien8.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
62436a4d36
commit
b8291adc19
@ -130,7 +130,6 @@ extern const char * const x86_bug_flags[NBUGINTS*32];
|
||||
#define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX)
|
||||
#define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2)
|
||||
#define cpu_has_clflush boot_cpu_has(X86_FEATURE_CLFLUSH)
|
||||
#define cpu_has_gbpages boot_cpu_has(X86_FEATURE_GBPAGES)
|
||||
#define cpu_has_pat boot_cpu_has(X86_FEATURE_PAT)
|
||||
#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
|
||||
#define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES)
|
||||
|
@ -3836,7 +3836,8 @@ reset_tdp_shadow_zero_bits_mask(struct kvm_vcpu *vcpu,
|
||||
__reset_rsvds_bits_mask(vcpu, &context->shadow_zero_check,
|
||||
boot_cpu_data.x86_phys_bits,
|
||||
context->shadow_root_level, false,
|
||||
cpu_has_gbpages, true, true);
|
||||
boot_cpu_has(X86_FEATURE_GBPAGES),
|
||||
true, true);
|
||||
else
|
||||
__reset_rsvds_bits_mask_ept(&context->shadow_zero_check,
|
||||
boot_cpu_data.x86_phys_bits,
|
||||
|
@ -162,7 +162,7 @@ static __init int setup_hugepagesz(char *opt)
|
||||
unsigned long ps = memparse(opt, &opt);
|
||||
if (ps == PMD_SIZE) {
|
||||
hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
|
||||
} else if (ps == PUD_SIZE && cpu_has_gbpages) {
|
||||
} else if (ps == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) {
|
||||
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
|
||||
} else {
|
||||
printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
|
||||
@ -177,7 +177,7 @@ __setup("hugepagesz=", setup_hugepagesz);
|
||||
static __init int gigantic_pages_init(void)
|
||||
{
|
||||
/* With compaction or CMA we can allocate gigantic pages at runtime */
|
||||
if (cpu_has_gbpages && !size_to_hstate(1UL << PUD_SHIFT))
|
||||
if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(1UL << PUD_SHIFT))
|
||||
hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
|
||||
return 0;
|
||||
}
|
||||
|
@ -173,7 +173,7 @@ static void __init probe_page_size_mask(void)
|
||||
__supported_pte_mask &= ~_PAGE_GLOBAL;
|
||||
|
||||
/* Enable 1 GB linear kernel mappings if available: */
|
||||
if (direct_gbpages && cpu_has_gbpages) {
|
||||
if (direct_gbpages && boot_cpu_has(X86_FEATURE_GBPAGES)) {
|
||||
printk(KERN_INFO "Using GB pages for direct mapping\n");
|
||||
page_size_mask |= 1 << PG_LEVEL_1G;
|
||||
} else {
|
||||
|
@ -378,7 +378,7 @@ EXPORT_SYMBOL(iounmap);
|
||||
int __init arch_ioremap_pud_supported(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
return cpu_has_gbpages;
|
||||
return boot_cpu_has(X86_FEATURE_GBPAGES);
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
|
@ -1055,7 +1055,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
|
||||
/*
|
||||
* Map everything starting from the Gb boundary, possibly with 1G pages
|
||||
*/
|
||||
while (cpu_has_gbpages && end - start >= PUD_SIZE) {
|
||||
while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) {
|
||||
set_pud(pud, __pud(cpa->pfn << PAGE_SHIFT | _PAGE_PSE |
|
||||
massage_pgprot(pud_pgprot)));
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user