forked from Minki/linux
Merge branch 'x86/urgent' into x86/cache
Pick up pending upstream fixes to meet dependencies
This commit is contained in:
commit
4fedcde702
@ -536,7 +536,7 @@ static inline void __fpregs_load_activate(void)
|
||||
struct fpu *fpu = ¤t->thread.fpu;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (WARN_ON_ONCE(current->mm == NULL))
|
||||
if (WARN_ON_ONCE(current->flags & PF_KTHREAD))
|
||||
return;
|
||||
|
||||
if (!fpregs_state_valid(fpu, cpu)) {
|
||||
@ -567,11 +567,11 @@ static inline void __fpregs_load_activate(void)
|
||||
* otherwise.
|
||||
*
|
||||
* The FPU context is only stored/restored for a user task and
|
||||
* ->mm is used to distinguish between kernel and user threads.
|
||||
* PF_KTHREAD is used to distinguish between kernel and user threads.
|
||||
*/
|
||||
static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
|
||||
{
|
||||
if (static_cpu_has(X86_FEATURE_FPU) && current->mm) {
|
||||
if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) {
|
||||
if (!copy_fpregs_to_fpstate(old_fpu))
|
||||
old_fpu->last_cpu = -1;
|
||||
else
|
||||
|
@ -52,6 +52,9 @@
|
||||
|
||||
#define INTEL_FAM6_CANNONLAKE_MOBILE 0x66
|
||||
|
||||
#define INTEL_FAM6_ICELAKE_X 0x6A
|
||||
#define INTEL_FAM6_ICELAKE_XEON_D 0x6C
|
||||
#define INTEL_FAM6_ICELAKE_DESKTOP 0x7D
|
||||
#define INTEL_FAM6_ICELAKE_MOBILE 0x7E
|
||||
|
||||
/* "Small Core" Processors (Atom) */
|
||||
|
@ -1464,7 +1464,8 @@ static void apic_pending_intr_clear(void)
|
||||
if (queued) {
|
||||
if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) {
|
||||
ntsc = rdtsc();
|
||||
max_loops = (cpu_khz << 10) - (ntsc - tsc);
|
||||
max_loops = (long long)cpu_khz << 10;
|
||||
max_loops -= ntsc - tsc;
|
||||
} else {
|
||||
max_loops--;
|
||||
}
|
||||
|
@ -789,13 +789,16 @@ static struct syscore_ops mc_syscore_ops = {
|
||||
.resume = mc_bp_resume,
|
||||
};
|
||||
|
||||
static int mc_cpu_online(unsigned int cpu)
|
||||
static int mc_cpu_starting(unsigned int cpu)
|
||||
{
|
||||
struct device *dev;
|
||||
|
||||
dev = get_cpu_device(cpu);
|
||||
microcode_update_cpu(cpu);
|
||||
pr_debug("CPU%d added\n", cpu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mc_cpu_online(unsigned int cpu)
|
||||
{
|
||||
struct device *dev = get_cpu_device(cpu);
|
||||
|
||||
if (sysfs_create_group(&dev->kobj, &mc_attr_group))
|
||||
pr_err("Failed to create group for CPU%d\n", cpu);
|
||||
@ -872,6 +875,8 @@ int __init microcode_init(void)
|
||||
goto out_ucode_group;
|
||||
|
||||
register_syscore_ops(&mc_syscore_ops);
|
||||
cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting",
|
||||
mc_cpu_starting, NULL);
|
||||
cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
|
||||
mc_cpu_online, mc_cpu_down_prep);
|
||||
|
||||
|
@ -360,6 +360,9 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
|
||||
struct list_head *head;
|
||||
struct rdtgroup *entry;
|
||||
|
||||
if (!is_mbm_local_enabled())
|
||||
return;
|
||||
|
||||
r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
|
||||
closid = rgrp->closid;
|
||||
rmid = rgrp->mon.rmid;
|
||||
|
@ -796,8 +796,12 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of,
|
||||
struct seq_file *seq, void *v)
|
||||
{
|
||||
struct rdt_resource *r = of->kn->parent->priv;
|
||||
u32 sw_shareable = 0, hw_shareable = 0;
|
||||
u32 exclusive = 0, pseudo_locked = 0;
|
||||
/*
|
||||
* Use unsigned long even though only 32 bits are used to ensure
|
||||
* test_bit() is used safely.
|
||||
*/
|
||||
unsigned long sw_shareable = 0, hw_shareable = 0;
|
||||
unsigned long exclusive = 0, pseudo_locked = 0;
|
||||
struct rdt_domain *dom;
|
||||
int i, hwb, swb, excl, psl;
|
||||
enum rdtgrp_mode mode;
|
||||
@ -842,10 +846,10 @@ static int rdt_bit_usage_show(struct kernfs_open_file *of,
|
||||
}
|
||||
for (i = r->cache.cbm_len - 1; i >= 0; i--) {
|
||||
pseudo_locked = dom->plr ? dom->plr->cbm : 0;
|
||||
hwb = test_bit(i, (unsigned long *)&hw_shareable);
|
||||
swb = test_bit(i, (unsigned long *)&sw_shareable);
|
||||
excl = test_bit(i, (unsigned long *)&exclusive);
|
||||
psl = test_bit(i, (unsigned long *)&pseudo_locked);
|
||||
hwb = test_bit(i, &hw_shareable);
|
||||
swb = test_bit(i, &sw_shareable);
|
||||
excl = test_bit(i, &exclusive);
|
||||
psl = test_bit(i, &pseudo_locked);
|
||||
if (hwb && swb)
|
||||
seq_putc(seq, 'X');
|
||||
else if (hwb && !swb)
|
||||
@ -2486,26 +2490,19 @@ out_destroy:
|
||||
*/
|
||||
static void cbm_ensure_valid(u32 *_val, struct rdt_resource *r)
|
||||
{
|
||||
/*
|
||||
* Convert the u32 _val to an unsigned long required by all the bit
|
||||
* operations within this function. No more than 32 bits of this
|
||||
* converted value can be accessed because all bit operations are
|
||||
* additionally provided with cbm_len that is initialized during
|
||||
* hardware enumeration using five bits from the EAX register and
|
||||
* thus never can exceed 32 bits.
|
||||
*/
|
||||
unsigned long *val = (unsigned long *)_val;
|
||||
unsigned long val = *_val;
|
||||
unsigned int cbm_len = r->cache.cbm_len;
|
||||
unsigned long first_bit, zero_bit;
|
||||
|
||||
if (*val == 0)
|
||||
if (val == 0)
|
||||
return;
|
||||
|
||||
first_bit = find_first_bit(val, cbm_len);
|
||||
zero_bit = find_next_zero_bit(val, cbm_len, first_bit);
|
||||
first_bit = find_first_bit(&val, cbm_len);
|
||||
zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
|
||||
|
||||
/* Clear any remaining bits to ensure contiguous region */
|
||||
bitmap_clear(val, zero_bit, cbm_len - zero_bit);
|
||||
bitmap_clear(&val, zero_bit, cbm_len - zero_bit);
|
||||
*_val = (u32)val;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2534,7 +2531,12 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
|
||||
if (closid_allocated(i) && i != closid) {
|
||||
mode = rdtgroup_mode_by_closid(i);
|
||||
if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
|
||||
break;
|
||||
/*
|
||||
* ctrl values for locksetup aren't relevant
|
||||
* until the schemata is written, and the mode
|
||||
* becomes RDT_MODE_PSEUDO_LOCKED.
|
||||
*/
|
||||
continue;
|
||||
/*
|
||||
* If CDP is active include peer domain's
|
||||
* usage to ensure there is no overlap
|
||||
|
@ -102,7 +102,7 @@ static void __kernel_fpu_begin(void)
|
||||
|
||||
kernel_fpu_disable();
|
||||
|
||||
if (current->mm) {
|
||||
if (!(current->flags & PF_KTHREAD)) {
|
||||
if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
|
||||
set_thread_flag(TIF_NEED_FPU_LOAD);
|
||||
/*
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
#include <linux/compat.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/pagemap.h>
|
||||
|
||||
#include <asm/fpu/internal.h>
|
||||
#include <asm/fpu/signal.h>
|
||||
@ -61,6 +62,11 @@ static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
|
||||
struct user_i387_ia32_struct env;
|
||||
struct _fpstate_32 __user *fp = buf;
|
||||
|
||||
fpregs_lock();
|
||||
if (!test_thread_flag(TIF_NEED_FPU_LOAD))
|
||||
copy_fxregs_to_kernel(&tsk->thread.fpu);
|
||||
fpregs_unlock();
|
||||
|
||||
convert_from_fxsr(&env, tsk);
|
||||
|
||||
if (__copy_to_user(buf, &env, sizeof(env)) ||
|
||||
@ -189,15 +195,7 @@ retry:
|
||||
fpregs_unlock();
|
||||
|
||||
if (ret) {
|
||||
int aligned_size;
|
||||
int nr_pages;
|
||||
|
||||
aligned_size = offset_in_page(buf_fx) + fpu_user_xstate_size;
|
||||
nr_pages = DIV_ROUND_UP(aligned_size, PAGE_SIZE);
|
||||
|
||||
ret = get_user_pages_unlocked((unsigned long)buf_fx, nr_pages,
|
||||
NULL, FOLL_WRITE);
|
||||
if (ret == nr_pages)
|
||||
if (!fault_in_pages_writeable(buf_fx, fpu_user_xstate_size))
|
||||
goto retry;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
@ -758,7 +758,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
|
||||
BREAK_INSTR_SIZE);
|
||||
bpt->type = BP_POKE_BREAKPOINT;
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
|
||||
|
@ -199,7 +199,7 @@ static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
|
||||
if (!pgtable_l5_enabled())
|
||||
return (p4d_t *)pgd;
|
||||
|
||||
p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
|
||||
p4d = pgd_val(*pgd) & PTE_PFN_MASK;
|
||||
p4d += __START_KERNEL_map - phys_base;
|
||||
return (p4d_t *)p4d + p4d_index(addr);
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ static __initdata struct kaslr_memory_region {
|
||||
} kaslr_regions[] = {
|
||||
{ &page_offset_base, 0 },
|
||||
{ &vmalloc_base, 0 },
|
||||
{ &vmemmap_base, 1 },
|
||||
{ &vmemmap_base, 0 },
|
||||
};
|
||||
|
||||
/* Get size in bytes used by the memory region */
|
||||
@ -78,6 +78,7 @@ void __init kernel_randomize_memory(void)
|
||||
unsigned long rand, memory_tb;
|
||||
struct rnd_state rand_state;
|
||||
unsigned long remain_entropy;
|
||||
unsigned long vmemmap_size;
|
||||
|
||||
vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4;
|
||||
vaddr = vaddr_start;
|
||||
@ -109,6 +110,14 @@ void __init kernel_randomize_memory(void)
|
||||
if (memory_tb < kaslr_regions[0].size_tb)
|
||||
kaslr_regions[0].size_tb = memory_tb;
|
||||
|
||||
/*
|
||||
* Calculate the vmemmap region size in TBs, aligned to a TB
|
||||
* boundary.
|
||||
*/
|
||||
vmemmap_size = (kaslr_regions[0].size_tb << (TB_SHIFT - PAGE_SHIFT)) *
|
||||
sizeof(struct page);
|
||||
kaslr_regions[2].size_tb = DIV_ROUND_UP(vmemmap_size, 1UL << TB_SHIFT);
|
||||
|
||||
/* Calculate entropy available between regions */
|
||||
remain_entropy = vaddr_end - vaddr_start;
|
||||
for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)
|
||||
|
@ -101,6 +101,7 @@ enum cpuhp_state {
|
||||
CPUHP_AP_IRQ_BCM2836_STARTING,
|
||||
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
|
||||
CPUHP_AP_ARM_MVEBU_COHERENCY,
|
||||
CPUHP_AP_MICROCODE_LOADER,
|
||||
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
|
||||
CPUHP_AP_PERF_X86_STARTING,
|
||||
CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
|
||||
|
14
mm/vmalloc.c
14
mm/vmalloc.c
@ -2123,9 +2123,9 @@ static inline void set_area_direct_map(const struct vm_struct *area,
|
||||
/* Handle removing and resetting vm mappings related to the vm_struct. */
|
||||
static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
|
||||
{
|
||||
unsigned long addr = (unsigned long)area->addr;
|
||||
unsigned long start = ULONG_MAX, end = 0;
|
||||
int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
|
||||
int flush_dmap = 0;
|
||||
int i;
|
||||
|
||||
/*
|
||||
@ -2135,8 +2135,8 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
|
||||
* execute permissions, without leaving a RW+X window.
|
||||
*/
|
||||
if (flush_reset && !IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
|
||||
set_memory_nx(addr, area->nr_pages);
|
||||
set_memory_rw(addr, area->nr_pages);
|
||||
set_memory_nx((unsigned long)area->addr, area->nr_pages);
|
||||
set_memory_rw((unsigned long)area->addr, area->nr_pages);
|
||||
}
|
||||
|
||||
remove_vm_area(area->addr);
|
||||
@ -2160,9 +2160,11 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
|
||||
* the vm_unmap_aliases() flush includes the direct map.
|
||||
*/
|
||||
for (i = 0; i < area->nr_pages; i++) {
|
||||
if (page_address(area->pages[i])) {
|
||||
unsigned long addr = (unsigned long)page_address(area->pages[i]);
|
||||
if (addr) {
|
||||
start = min(addr, start);
|
||||
end = max(addr, end);
|
||||
end = max(addr + PAGE_SIZE, end);
|
||||
flush_dmap = 1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2172,7 +2174,7 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
|
||||
* reset the direct map permissions to the default.
|
||||
*/
|
||||
set_area_direct_map(area, set_direct_map_invalid_noflush);
|
||||
_vm_unmap_aliases(start, end, 1);
|
||||
_vm_unmap_aliases(start, end, flush_dmap);
|
||||
set_area_direct_map(area, set_direct_map_default_noflush);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user