forked from Minki/linux
Merge branch 'fixes' into next
Bring in our fixes branch for this cycle which avoids some small conflicts with upcoming commits.
This commit is contained in:
commit
960e370813
@ -22,7 +22,7 @@
|
||||
| nios2: | TODO |
|
||||
| openrisc: | TODO |
|
||||
| parisc: | TODO |
|
||||
| powerpc: | ok |
|
||||
| powerpc: | TODO |
|
||||
| riscv: | ok |
|
||||
| s390: | ok |
|
||||
| sh: | TODO |
|
||||
|
@ -49,16 +49,18 @@ Register preservation rules
|
||||
Register preservation rules match the ELF ABI calling sequence with the
|
||||
following differences:
|
||||
|
||||
=========== ============= ========================================
|
||||
--- For the sc instruction, differences with the ELF ABI ---
|
||||
=========== ============= ========================================
|
||||
r0 Volatile (System call number.)
|
||||
r3 Volatile (Parameter 1, and return value.)
|
||||
r4-r8 Volatile (Parameters 2-6.)
|
||||
cr0 Volatile (cr0.SO is the return error condition.)
|
||||
cr1, cr5-7 Nonvolatile
|
||||
lr Nonvolatile
|
||||
=========== ============= ========================================
|
||||
|
||||
--- For the scv 0 instruction, differences with the ELF ABI ---
|
||||
=========== ============= ========================================
|
||||
r0 Volatile (System call number.)
|
||||
r3 Volatile (Parameter 1, and return value.)
|
||||
r4-r8 Volatile (Parameters 2-6.)
|
||||
|
@ -116,7 +116,6 @@ config PPC
|
||||
#
|
||||
select ARCH_32BIT_OFF_T if PPC32
|
||||
select ARCH_HAS_DEBUG_VIRTUAL
|
||||
select ARCH_HAS_DEBUG_VM_PGTABLE
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_FORTIFY_SOURCE
|
||||
@ -860,6 +859,18 @@ config PPC_SUBPAGE_PROT
|
||||
|
||||
If unsure, say N here.
|
||||
|
||||
config PPC_PROT_SAO_LPAR
|
||||
bool "Support PROT_SAO mappings in LPARs"
|
||||
depends on PPC_BOOK3S_64
|
||||
help
|
||||
This option adds support for PROT_SAO mappings from userspace
|
||||
inside LPARs on supported CPUs.
|
||||
|
||||
This may cause issues when performing guest migration from
|
||||
a CPU that supports SAO to one that does not.
|
||||
|
||||
If unsure, say N here.
|
||||
|
||||
config PPC_COPRO_BASE
|
||||
bool
|
||||
|
||||
|
@ -239,14 +239,14 @@ static inline void early_init_mmu_secondary(void)
|
||||
|
||||
extern void hash__setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size);
|
||||
extern void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size);
|
||||
static inline void setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size)
|
||||
{
|
||||
if (early_radix_enabled())
|
||||
return radix__setup_initial_memory_limit(first_memblock_base,
|
||||
first_memblock_size);
|
||||
/*
|
||||
* Hash has more strict restrictions. At this point we don't
|
||||
* know which translations we will pick. Hence go with hash
|
||||
* restrictions.
|
||||
*/
|
||||
return hash__setup_initial_memory_limit(first_memblock_base,
|
||||
first_memblock_size);
|
||||
}
|
||||
|
@ -20,13 +20,9 @@
|
||||
#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
|
||||
#define _PAGE_RWX (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
|
||||
#define _PAGE_PRIVILEGED 0x00008 /* kernel access only */
|
||||
|
||||
#define _PAGE_CACHE_CTL 0x00030 /* Bits for the folowing cache modes */
|
||||
/* No bits set is normal cacheable memory */
|
||||
/* 0x00010 unused, is SAO bit on radix POWER9 */
|
||||
#define _PAGE_SAO 0x00010 /* Strong access order */
|
||||
#define _PAGE_NON_IDEMPOTENT 0x00020 /* non idempotent memory */
|
||||
#define _PAGE_TOLERANT 0x00030 /* tolerant memory, cache inhibited */
|
||||
|
||||
#define _PAGE_DIRTY 0x00080 /* C: page changed */
|
||||
#define _PAGE_ACCESSED 0x00100 /* R: page referenced */
|
||||
/*
|
||||
@ -828,6 +824,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
return hash__set_pte_at(mm, addr, ptep, pte, percpu);
|
||||
}
|
||||
|
||||
#define _PAGE_CACHE_CTL (_PAGE_SAO | _PAGE_NON_IDEMPOTENT | _PAGE_TOLERANT)
|
||||
|
||||
#define pgprot_noncached pgprot_noncached
|
||||
static inline pgprot_t pgprot_noncached(pgprot_t prot)
|
||||
{
|
||||
|
@ -196,7 +196,7 @@ static inline void cpu_feature_keys_init(void) { }
|
||||
#define CPU_FTR_SPURR LONG_ASM_CONST(0x0000000001000000)
|
||||
#define CPU_FTR_DSCR LONG_ASM_CONST(0x0000000002000000)
|
||||
#define CPU_FTR_VSX LONG_ASM_CONST(0x0000000004000000)
|
||||
// Free LONG_ASM_CONST(0x0000000008000000)
|
||||
#define CPU_FTR_SAO LONG_ASM_CONST(0x0000000008000000)
|
||||
#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0000000010000000)
|
||||
#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0000000020000000)
|
||||
#define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0000000040000000)
|
||||
@ -441,7 +441,7 @@ static inline void cpu_feature_keys_init(void) { }
|
||||
CPU_FTR_MMCRA | CPU_FTR_SMT | \
|
||||
CPU_FTR_COHERENT_ICACHE | \
|
||||
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
|
||||
CPU_FTR_DSCR | CPU_FTR_ASYM_SMT | \
|
||||
CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \
|
||||
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
|
||||
CPU_FTR_CFAR | CPU_FTR_HVMODE | \
|
||||
CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX )
|
||||
@ -450,7 +450,7 @@ static inline void cpu_feature_keys_init(void) { }
|
||||
CPU_FTR_MMCRA | CPU_FTR_SMT | \
|
||||
CPU_FTR_COHERENT_ICACHE | \
|
||||
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
|
||||
CPU_FTR_DSCR | \
|
||||
CPU_FTR_DSCR | CPU_FTR_SAO | \
|
||||
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
|
||||
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
|
||||
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
|
||||
@ -461,7 +461,7 @@ static inline void cpu_feature_keys_init(void) { }
|
||||
CPU_FTR_MMCRA | CPU_FTR_SMT | \
|
||||
CPU_FTR_COHERENT_ICACHE | \
|
||||
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
|
||||
CPU_FTR_DSCR | \
|
||||
CPU_FTR_DSCR | CPU_FTR_SAO | \
|
||||
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
|
||||
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
|
||||
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
|
||||
@ -479,7 +479,7 @@ static inline void cpu_feature_keys_init(void) { }
|
||||
CPU_FTR_MMCRA | CPU_FTR_SMT | \
|
||||
CPU_FTR_COHERENT_ICACHE | \
|
||||
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
|
||||
CPU_FTR_DSCR | \
|
||||
CPU_FTR_DSCR | CPU_FTR_SAO | \
|
||||
CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
|
||||
CPU_FTR_CFAR | CPU_FTR_HVMODE | CPU_FTR_VMX_COPY | \
|
||||
CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_ARCH_207S | \
|
||||
|
@ -13,20 +13,43 @@
|
||||
#include <linux/pkeys.h>
|
||||
#include <asm/cpu_has_feature.h>
|
||||
|
||||
#ifdef CONFIG_PPC_MEM_KEYS
|
||||
static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
|
||||
unsigned long pkey)
|
||||
{
|
||||
return pkey_to_vmflag_bits(pkey);
|
||||
#ifdef CONFIG_PPC_MEM_KEYS
|
||||
return (((prot & PROT_SAO) ? VM_SAO : 0) | pkey_to_vmflag_bits(pkey));
|
||||
#else
|
||||
return ((prot & PROT_SAO) ? VM_SAO : 0);
|
||||
#endif
|
||||
}
|
||||
#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
|
||||
|
||||
static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
|
||||
{
|
||||
return __pgprot(vmflag_to_pte_pkey_bits(vm_flags));
|
||||
#ifdef CONFIG_PPC_MEM_KEYS
|
||||
return (vm_flags & VM_SAO) ?
|
||||
__pgprot(_PAGE_SAO | vmflag_to_pte_pkey_bits(vm_flags)) :
|
||||
__pgprot(0 | vmflag_to_pte_pkey_bits(vm_flags));
|
||||
#else
|
||||
return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
|
||||
#endif
|
||||
}
|
||||
#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
|
||||
#endif
|
||||
|
||||
static inline bool arch_validate_prot(unsigned long prot, unsigned long addr)
|
||||
{
|
||||
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_SAO))
|
||||
return false;
|
||||
if (prot & PROT_SAO) {
|
||||
if (!cpu_has_feature(CPU_FTR_SAO))
|
||||
return false;
|
||||
if (firmware_has_feature(FW_FEATURE_LPAR) &&
|
||||
!IS_ENABLED(CONFIG_PPC_PROT_SAO_LPAR))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
#define arch_validate_prot arch_validate_prot
|
||||
|
||||
#endif /* CONFIG_PPC64 */
|
||||
#endif /* _ASM_POWERPC_MMAN_H */
|
||||
|
@ -82,6 +82,8 @@
|
||||
*/
|
||||
#include <asm/nohash/pte-book3e.h>
|
||||
|
||||
#define _PAGE_SAO 0
|
||||
|
||||
#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1))
|
||||
|
||||
/*
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include <asm-generic/mman-common.h>
|
||||
|
||||
|
||||
#define PROT_SAO 0x10 /* Unsupported since v5.9 */
|
||||
#define PROT_SAO 0x10 /* Strong Access Ordering */
|
||||
|
||||
#define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */
|
||||
#define MAP_NORESERVE 0x40 /* don't reserve swap pages */
|
||||
|
@ -120,7 +120,8 @@ u64 dma_iommu_get_required_mask(struct device *dev)
|
||||
if (!tbl)
|
||||
return 0;
|
||||
|
||||
mask = 1ULL < (fls_long(tbl->it_offset + tbl->it_size) - 1);
|
||||
mask = 1ULL << (fls_long(tbl->it_offset + tbl->it_size) +
|
||||
tbl->it_page_shift - 1);
|
||||
mask += mask - 1;
|
||||
|
||||
return mask;
|
||||
|
@ -653,7 +653,7 @@ static struct dt_cpu_feature_match __initdata
|
||||
{"processor-control-facility-v3", feat_enable_dbell, CPU_FTR_DBELL},
|
||||
{"processor-utilization-of-resources-register", feat_enable_purr, 0},
|
||||
{"no-execute", feat_enable, 0},
|
||||
/* strong-access-ordering is unused */
|
||||
{"strong-access-ordering", feat_enable, CPU_FTR_SAO},
|
||||
{"cache-inhibited-large-page", feat_enable_large_ci, 0},
|
||||
{"coprocessor-icswx", feat_enable, 0},
|
||||
{"hypervisor-virtualization-interrupt", feat_enable_hvi, 0},
|
||||
|
@ -113,6 +113,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_TM)
|
||||
ld r11,exception_marker@toc(r2)
|
||||
std r11,-16(r10) /* "regshere" marker */
|
||||
|
||||
BEGIN_FTR_SECTION
|
||||
HMT_MEDIUM
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
||||
|
||||
/*
|
||||
* RECONCILE_IRQ_STATE without calling trace_hardirqs_off(), which
|
||||
* would clobber syscall parameters. Also we always enter with IRQs
|
||||
|
@ -548,7 +548,7 @@ void notrace restore_math(struct pt_regs *regs)
|
||||
* are live for the user thread).
|
||||
*/
|
||||
if ((!(msr & MSR_FP)) && should_restore_fp())
|
||||
new_msr |= MSR_FP | current->thread.fpexc_mode;
|
||||
new_msr |= MSR_FP;
|
||||
|
||||
if ((!(msr & MSR_VEC)) && should_restore_altivec())
|
||||
new_msr |= MSR_VEC;
|
||||
@ -559,11 +559,17 @@ void notrace restore_math(struct pt_regs *regs)
|
||||
}
|
||||
|
||||
if (new_msr) {
|
||||
unsigned long fpexc_mode = 0;
|
||||
|
||||
msr_check_and_set(new_msr);
|
||||
|
||||
if (new_msr & MSR_FP)
|
||||
if (new_msr & MSR_FP) {
|
||||
do_restore_fp();
|
||||
|
||||
// This also covers VSX, because VSX implies FP
|
||||
fpexc_mode = current->thread.fpexc_mode;
|
||||
}
|
||||
|
||||
if (new_msr & MSR_VEC)
|
||||
do_restore_altivec();
|
||||
|
||||
@ -572,7 +578,7 @@ void notrace restore_math(struct pt_regs *regs)
|
||||
|
||||
msr_check_and_clear(new_msr);
|
||||
|
||||
regs->msr |= new_msr;
|
||||
regs->msr |= new_msr | fpexc_mode;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -50,7 +50,7 @@ $(obj-vdso32): %.o: %.S FORCE
|
||||
|
||||
# actual build commands
|
||||
quiet_cmd_vdso32ld = VDSO32L $@
|
||||
cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ $(call cc-ldoption, -Wl$(comma)--orphan-handling=warn) -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
|
||||
cmd_vdso32ld = $(VDSOCC) $(c_flags) $(CC32FLAGS) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
|
||||
quiet_cmd_vdso32as = VDSO32A $@
|
||||
cmd_vdso32as = $(VDSOCC) $(a_flags) $(CC32FLAGS) -c -o $@ $<
|
||||
|
||||
|
@ -111,7 +111,6 @@ SECTIONS
|
||||
*(.note.GNU-stack)
|
||||
*(.data .data.* .gnu.linkonce.d.* .sdata*)
|
||||
*(.bss .sbss .dynbss .dynsbss)
|
||||
*(.glink .iplt .plt .rela*)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -34,7 +34,7 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
|
||||
|
||||
# actual build commands
|
||||
quiet_cmd_vdso64ld = VDSO64L $@
|
||||
cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^) $(call cc-ldoption, -Wl$(comma)--orphan-handling=warn)
|
||||
cmd_vdso64ld = $(CC) $(c_flags) -o $@ -Wl,-T$(filter %.lds,$^) $(filter %.o,$^)
|
||||
|
||||
# install commands for the unstripped file
|
||||
quiet_cmd_vdso_install = INSTALL $@
|
||||
|
@ -30,7 +30,7 @@ SECTIONS
|
||||
. = ALIGN(16);
|
||||
.text : {
|
||||
*(.text .stub .text.* .gnu.linkonce.t.* __ftr_alt_*)
|
||||
*(.sfpr)
|
||||
*(.sfpr .glink)
|
||||
} :text
|
||||
PROVIDE(__etext = .);
|
||||
PROVIDE(_etext = .);
|
||||
@ -111,7 +111,6 @@ SECTIONS
|
||||
*(.branch_lt)
|
||||
*(.data .data.* .gnu.linkonce.d.* .sdata*)
|
||||
*(.bss .sbss .dynbss .dynsbss)
|
||||
*(.glink .iplt .plt .rela*)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -232,6 +232,8 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags)
|
||||
rflags |= HPTE_R_I;
|
||||
else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_NON_IDEMPOTENT)
|
||||
rflags |= (HPTE_R_I | HPTE_R_G);
|
||||
else if ((pteflags & _PAGE_CACHE_CTL) == _PAGE_SAO)
|
||||
rflags |= (HPTE_R_W | HPTE_R_I | HPTE_R_M);
|
||||
else
|
||||
/*
|
||||
* Add memory coherence if cache inhibited is not set
|
||||
|
@ -734,21 +734,6 @@ void radix__mmu_cleanup_all(void)
|
||||
}
|
||||
}
|
||||
|
||||
void radix__setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size)
|
||||
{
|
||||
/*
|
||||
* We don't currently support the first MEMBLOCK not mapping 0
|
||||
* physical on those processors
|
||||
*/
|
||||
BUG_ON(first_memblock_base != 0);
|
||||
|
||||
/*
|
||||
* Radix mode is not limited by RMA / VRMA addressing.
|
||||
*/
|
||||
ppc64_rma_size = ULONG_MAX;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
|
||||
{
|
||||
|
@ -452,9 +452,16 @@ void __init mmu_early_init_devtree(void)
|
||||
if (!(mfmsr() & MSR_HV))
|
||||
early_check_vec5();
|
||||
|
||||
if (early_radix_enabled())
|
||||
if (early_radix_enabled()) {
|
||||
radix__early_init_devtree();
|
||||
else
|
||||
/*
|
||||
* We have finalized the translation we are going to use by now.
|
||||
* Radix mode is not limited by RMA / VRMA addressing.
|
||||
* Hence don't limit memblock allocations.
|
||||
*/
|
||||
ppc64_rma_size = ULONG_MAX;
|
||||
memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
|
||||
} else
|
||||
hash__early_init_devtree();
|
||||
}
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
@ -1557,9 +1557,16 @@ nocheck:
|
||||
ret = 0;
|
||||
out:
|
||||
if (has_branch_stack(event)) {
|
||||
power_pmu_bhrb_enable(event);
|
||||
cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
|
||||
event->attr.branch_sample_type);
|
||||
u64 bhrb_filter = -1;
|
||||
|
||||
if (ppmu->bhrb_filter_map)
|
||||
bhrb_filter = ppmu->bhrb_filter_map(
|
||||
event->attr.branch_sample_type);
|
||||
|
||||
if (bhrb_filter != -1) {
|
||||
cpuhw->bhrb_filter = bhrb_filter;
|
||||
power_pmu_bhrb_enable(event);
|
||||
}
|
||||
}
|
||||
|
||||
perf_pmu_enable(event->pmu);
|
||||
@ -1881,7 +1888,6 @@ static int power_pmu_event_init(struct perf_event *event)
|
||||
int n;
|
||||
int err;
|
||||
struct cpu_hw_events *cpuhw;
|
||||
u64 bhrb_filter;
|
||||
|
||||
if (!ppmu)
|
||||
return -ENOENT;
|
||||
@ -1987,7 +1993,10 @@ static int power_pmu_event_init(struct perf_event *event)
|
||||
err = power_check_constraints(cpuhw, events, cflags, n + 1);
|
||||
|
||||
if (has_branch_stack(event)) {
|
||||
bhrb_filter = ppmu->bhrb_filter_map(
|
||||
u64 bhrb_filter = -1;
|
||||
|
||||
if (ppmu->bhrb_filter_map)
|
||||
bhrb_filter = ppmu->bhrb_filter_map(
|
||||
event->attr.branch_sample_type);
|
||||
|
||||
if (bhrb_filter == -1) {
|
||||
|
@ -1289,7 +1289,7 @@ static int trace_imc_prepare_sample(struct trace_imc_data *mem,
|
||||
header->misc = 0;
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_31)) {
|
||||
switch (IMC_TRACE_RECORD_VAL_HVPR(mem->val)) {
|
||||
switch (IMC_TRACE_RECORD_VAL_HVPR(be64_to_cpu(READ_ONCE(mem->val)))) {
|
||||
case 0:/* when MSR HV and PR not set in the trace-record */
|
||||
header->misc |= PERF_RECORD_MISC_GUEST_KERNEL;
|
||||
break;
|
||||
@ -1297,7 +1297,7 @@ static int trace_imc_prepare_sample(struct trace_imc_data *mem,
|
||||
header->misc |= PERF_RECORD_MISC_GUEST_USER;
|
||||
break;
|
||||
case 2: /* MSR HV is 1 and PR is 0 */
|
||||
header->misc |= PERF_RECORD_MISC_HYPERVISOR;
|
||||
header->misc |= PERF_RECORD_MISC_KERNEL;
|
||||
break;
|
||||
case 3: /* MSR HV is 1 and PR is 1 */
|
||||
header->misc |= PERF_RECORD_MISC_USER;
|
||||
|
@ -36,7 +36,7 @@ config PPC_BOOK3S_6xx
|
||||
select PPC_HAVE_PMU_SUPPORT
|
||||
select PPC_HAVE_KUEP
|
||||
select PPC_HAVE_KUAP
|
||||
select HAVE_ARCH_VMAP_STACK
|
||||
select HAVE_ARCH_VMAP_STACK if !ADB_PMU
|
||||
|
||||
config PPC_BOOK3S_601
|
||||
bool "PowerPC 601"
|
||||
|
@ -1223,7 +1223,7 @@ static void __init pnv_probe_idle_states(void)
|
||||
return;
|
||||
}
|
||||
|
||||
if (pvr_version_is(PVR_POWER9))
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
||||
pnv_power9_idle_init();
|
||||
|
||||
for (i = 0; i < nr_pnv_idle_states; i++)
|
||||
|
@ -822,7 +822,7 @@ free_stats:
|
||||
kfree(stats);
|
||||
return rc ? rc : seq_buf_used(&s);
|
||||
}
|
||||
DEVICE_ATTR_RO(perf_stats);
|
||||
DEVICE_ATTR_ADMIN_RO(perf_stats);
|
||||
|
||||
static ssize_t flags_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
|
@ -361,7 +361,10 @@ static void __init fixup_cede0_latency(void)
|
||||
for (i = 0; i < nr_xcede_records; i++) {
|
||||
struct xcede_latency_record *record = &payload->records[i];
|
||||
u64 latency_tb = be64_to_cpu(record->latency_ticks);
|
||||
u64 latency_us = tb_to_ns(latency_tb) / NSEC_PER_USEC;
|
||||
u64 latency_us = DIV_ROUND_UP_ULL(tb_to_ns(latency_tb), NSEC_PER_USEC);
|
||||
|
||||
if (latency_us == 0)
|
||||
pr_warn("cpuidle: xcede record %d has an unrealistic latency of 0us.\n", i);
|
||||
|
||||
if (latency_us < min_latency_us)
|
||||
min_latency_us = latency_us;
|
||||
@ -378,10 +381,14 @@ static void __init fixup_cede0_latency(void)
|
||||
* Perform the fix-up.
|
||||
*/
|
||||
if (min_latency_us < dedicated_states[1].exit_latency) {
|
||||
u64 cede0_latency = min_latency_us - 1;
|
||||
/*
|
||||
* We set a minimum of 1us wakeup latency for cede0 to
|
||||
* distinguish it from snooze
|
||||
*/
|
||||
u64 cede0_latency = 1;
|
||||
|
||||
if (cede0_latency <= 0)
|
||||
cede0_latency = min_latency_us;
|
||||
if (min_latency_us > cede0_latency)
|
||||
cede0_latency = min_latency_us - 1;
|
||||
|
||||
dedicated_states[1].exit_latency = cede0_latency;
|
||||
dedicated_states[1].target_residency = 10 * (cede0_latency);
|
||||
|
@ -49,6 +49,8 @@
|
||||
#include <linux/cuda.h>
|
||||
#ifdef CONFIG_PPC_PMAC
|
||||
#include <asm/prom.h>
|
||||
#endif
|
||||
#ifdef CONFIG_BOOTX_TEXT
|
||||
#include <asm/btext.h>
|
||||
#endif
|
||||
|
||||
|
@ -321,6 +321,8 @@ extern unsigned int kobjsize(const void *objp);
|
||||
|
||||
#if defined(CONFIG_X86)
|
||||
# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
|
||||
#elif defined(CONFIG_PPC)
|
||||
# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
|
||||
#elif defined(CONFIG_PARISC)
|
||||
# define VM_GROWSUP VM_ARCH_1
|
||||
#elif defined(CONFIG_IA64)
|
||||
|
@ -114,6 +114,8 @@ IF_HAVE_PG_IDLE(PG_idle, "idle" )
|
||||
|
||||
#if defined(CONFIG_X86)
|
||||
#define __VM_ARCH_SPECIFIC_1 {VM_PAT, "pat" }
|
||||
#elif defined(CONFIG_PPC)
|
||||
#define __VM_ARCH_SPECIFIC_1 {VM_SAO, "sao" }
|
||||
#elif defined(CONFIG_PARISC) || defined(CONFIG_IA64)
|
||||
#define __VM_ARCH_SPECIFIC_1 {VM_GROWSUP, "growsup" }
|
||||
#elif !defined(CONFIG_MMU)
|
||||
|
4
mm/ksm.c
4
mm/ksm.c
@ -2453,6 +2453,10 @@ int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
|
||||
if (vma_is_dax(vma))
|
||||
return 0;
|
||||
|
||||
#ifdef VM_SAO
|
||||
if (*vm_flags & VM_SAO)
|
||||
return 0;
|
||||
#endif
|
||||
#ifdef VM_SPARC_ADI
|
||||
if (*vm_flags & VM_SPARC_ADI)
|
||||
return 0;
|
||||
|
@ -2,6 +2,7 @@
|
||||
hugetlb_vs_thp_test
|
||||
subpage_prot
|
||||
tempfile
|
||||
prot_sao
|
||||
segv_errors
|
||||
wild_bctr
|
||||
large_vm_fork_separation
|
||||
|
@ -2,7 +2,7 @@
|
||||
noarg:
|
||||
$(MAKE) -C ../
|
||||
|
||||
TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot segv_errors wild_bctr \
|
||||
TEST_GEN_PROGS := hugetlb_vs_thp_test subpage_prot prot_sao segv_errors wild_bctr \
|
||||
large_vm_fork_separation bad_accesses pkey_exec_prot \
|
||||
pkey_siginfo stack_expansion_signal stack_expansion_ldst
|
||||
|
||||
@ -14,6 +14,8 @@ include ../../lib.mk
|
||||
|
||||
$(TEST_GEN_PROGS): ../harness.c ../utils.c
|
||||
|
||||
$(OUTPUT)/prot_sao: ../utils.c
|
||||
|
||||
$(OUTPUT)/wild_bctr: CFLAGS += -m64
|
||||
$(OUTPUT)/large_vm_fork_separation: CFLAGS += -m64
|
||||
$(OUTPUT)/bad_accesses: CFLAGS += -m64
|
||||
|
48
tools/testing/selftests/powerpc/mm/prot_sao.c
Normal file
48
tools/testing/selftests/powerpc/mm/prot_sao.c
Normal file
@ -0,0 +1,48 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright 2016, Michael Ellerman, IBM Corp.
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/mman.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <asm/cputable.h>
|
||||
|
||||
#include "utils.h"
|
||||
|
||||
#define SIZE (64 * 1024)
|
||||
|
||||
int test_prot_sao(void)
|
||||
{
|
||||
char *p;
|
||||
|
||||
/*
|
||||
* SAO was introduced in 2.06 and removed in 3.1. It's disabled in
|
||||
* guests/LPARs by default, so also skip if we are running in a guest.
|
||||
*/
|
||||
SKIP_IF(!have_hwcap(PPC_FEATURE_ARCH_2_06) ||
|
||||
have_hwcap2(PPC_FEATURE2_ARCH_3_1) ||
|
||||
access("/proc/device-tree/rtas/ibm,hypertas-functions", F_OK) == 0);
|
||||
|
||||
/*
|
||||
* Ensure we can ask for PROT_SAO.
|
||||
* We can't really verify that it does the right thing, but at least we
|
||||
* confirm the kernel will accept it.
|
||||
*/
|
||||
p = mmap(NULL, SIZE, PROT_READ | PROT_WRITE | PROT_SAO,
|
||||
MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
|
||||
FAIL_IF(p == MAP_FAILED);
|
||||
|
||||
/* Write to the mapping, to at least cause a fault */
|
||||
memset(p, 0xaa, SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(void)
|
||||
{
|
||||
return test_harness(test_prot_sao, "prot-sao");
|
||||
}
|
Loading…
Reference in New Issue
Block a user