From 97e981324d492340e33baa9680780046377b561f Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Thu, 6 Dec 2018 00:17:50 -0800 Subject: [PATCH 01/14] ARCv2: boot log: BPU return stack depth Signed-off-by: Vineet Gupta --- arch/arc/include/asm/arcregs.h | 2 +- arch/arc/kernel/setup.c | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index 49bfbd879caa..9f10d32ee1bd 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h @@ -283,7 +283,7 @@ struct cpuinfo_arc_cache { }; struct cpuinfo_arc_bpu { - unsigned int ver, full, num_cache, num_pred; + unsigned int ver, full, num_cache, num_pred, ret_stk; }; struct cpuinfo_arc_ccm { diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index 2e018b8c2e19..33e232145d83 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -195,6 +195,7 @@ static void read_arc_build_cfg_regs(void) cpu->bpu.full = bpu.ft; cpu->bpu.num_cache = 256 << bpu.bce; cpu->bpu.num_pred = 2048 << bpu.pte; + cpu->bpu.ret_stk = 4 << bpu.rse; if (cpu->core.family >= 0x54) { unsigned int exec_ctrl; @@ -299,10 +300,10 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) if (cpu->bpu.ver) n += scnprintf(buf + n, len - n, - "BPU\t\t: %s%s match, cache:%d, Predict Table:%d", + "BPU\t\t: %s%s match, cache:%d, Predict Table:%d Return stk: %d", IS_AVAIL1(cpu->bpu.full, "full"), IS_AVAIL1(!cpu->bpu.full, "partial"), - cpu->bpu.num_cache, cpu->bpu.num_pred); + cpu->bpu.num_cache, cpu->bpu.num_pred, cpu->bpu.ret_stk); if (is_isa_arcv2()) { struct bcr_lpb lpb; From 7dd380c338f1ec20ed46607ccd03541a7683cd67 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Wed, 9 Jan 2019 09:36:00 -0800 Subject: [PATCH 02/14] ARC: boot log: print Action point details This now prints the number of action points {2,4,8} and {min,full} targets supported. Signed-off-by: Vineet Gupta --- arch/arc/include/asm/arcregs.h | 10 +++++++++- arch/arc/kernel/setup.c | 22 +++++++++++++++------- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index 9f10d32ee1bd..f1b86cef0905 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h @@ -216,6 +216,14 @@ struct bcr_fp_arcv2 { #endif }; +struct bcr_actionpoint { +#ifdef CONFIG_CPU_BIG_ENDIAN + unsigned int pad:21, min:1, num:2, ver:8; +#else + unsigned int ver:8, num:2, min:1, pad:21; +#endif +}; + #include struct bcr_bpu_arcompact { @@ -302,7 +310,7 @@ struct cpuinfo_arc { struct { unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2, fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4, - debug:1, ap:1, smart:1, rtt:1, pad3:4, + ap_num:4, ap_full:1, smart:1, rtt:1, pad3:1, timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4; } extn; struct bcr_mpy extn_mpy; diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index 33e232145d83..feb90093e6b1 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -123,6 +123,7 @@ static void read_arc_build_cfg_regs(void) struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; const struct id_to_str *tbl; struct bcr_isa_arcv2 isa; + struct bcr_actionpoint ap; FIX_PTR(cpu); @@ -208,8 +209,11 @@ static void read_arc_build_cfg_regs(void) } } - READ_BCR(ARC_REG_AP_BCR, bcr); - cpu->extn.ap = bcr.ver ? 1 : 0; + READ_BCR(ARC_REG_AP_BCR, ap); + if (ap.ver) { + cpu->extn.ap_num = 2 << ap.num; + cpu->extn.ap_full = !!ap.min; + } READ_BCR(ARC_REG_SMART_BCR, bcr); cpu->extn.smart = bcr.ver ? 1 : 0; @@ -217,8 +221,6 @@ static void read_arc_build_cfg_regs(void) READ_BCR(ARC_REG_RTT_BCR, bcr); cpu->extn.rtt = bcr.ver ? 1 : 0; - cpu->extn.debug = cpu->extn.ap | cpu->extn.smart | cpu->extn.rtt; - READ_BCR(ARC_REG_ISA_CFG_BCR, isa); /* some hacks for lack of feature BCR info in old ARC700 cores */ @@ -337,11 +339,17 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) IS_AVAIL1(cpu->extn.fpu_sp, "SP "), IS_AVAIL1(cpu->extn.fpu_dp, "DP ")); - if (cpu->extn.debug) - n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s%s\n", - IS_AVAIL1(cpu->extn.ap, "ActionPoint "), + if (cpu->extn.ap_num | cpu->extn.smart | cpu->extn.rtt) { + n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s", IS_AVAIL1(cpu->extn.smart, "smaRT "), IS_AVAIL1(cpu->extn.rtt, "RTT ")); + if (cpu->extn.ap_num) { + n += scnprintf(buf + n, len - n, "ActionPoint %d/%s", + cpu->extn.ap_num, + cpu->extn.ap_full ? "full":"min"); + } + n += scnprintf(buf + n, len - n, "\n"); + } if (cpu->dccm.sz || cpu->iccm.sz) n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n", From 4e868f8419cb4cb558c5d428e7ab5629cef864c7 Mon Sep 17 00:00:00 2001 From: Eugeniy Paltsev Date: Thu, 13 Dec 2018 18:42:57 +0300 Subject: [PATCH 03/14] ARC: fix __ffs return value to avoid build warnings | CC mm/nobootmem.o |In file included from ./include/asm-generic/bug.h:18:0, | from ./arch/arc/include/asm/bug.h:32, | from ./include/linux/bug.h:5, | from ./include/linux/mmdebug.h:5, | from ./include/linux/gfp.h:5, | from ./include/linux/slab.h:15, | from mm/nobootmem.c:14: |mm/nobootmem.c: In function '__free_pages_memory': |./include/linux/kernel.h:845:29: warning: comparison of distinct pointer types lacks a cast | (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1))) | ^ |./include/linux/kernel.h:859:4: note: in expansion of macro '__typecheck' | (__typecheck(x, y) && __no_side_effects(x, y)) | ^~~~~~~~~~~ |./include/linux/kernel.h:869:24: note: in expansion of macro '__safe_cmp' | __builtin_choose_expr(__safe_cmp(x, y), \ | ^~~~~~~~~~ |./include/linux/kernel.h:878:19: note: in expansion of macro '__careful_cmp' | #define min(x, y) __careful_cmp(x, y, <) | ^~~~~~~~~~~~~ |mm/nobootmem.c:104:11: note: in expansion of macro 'min' | order = min(MAX_ORDER - 1UL, __ffs(start)); Change __ffs return value from 'int' to 'unsigned long' as it is done in other implementations (like asm-generic, x86, etc...) to avoid build-time warnings in places where type is strictly checked. As __ffs may return values in [0-31] interval changing return type to unsigned is valid. Signed-off-by: Eugeniy Paltsev Signed-off-by: Vineet Gupta --- arch/arc/include/asm/bitops.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h index ee9246184033..202b74c339f0 100644 --- a/arch/arc/include/asm/bitops.h +++ b/arch/arc/include/asm/bitops.h @@ -340,7 +340,7 @@ static inline __attribute__ ((const)) int __fls(unsigned long x) /* * __ffs: Similar to ffs, but zero based (0-31) */ -static inline __attribute__ ((const)) int __ffs(unsigned long word) +static inline __attribute__ ((const)) unsigned long __ffs(unsigned long word) { if (!word) return word; @@ -400,9 +400,9 @@ static inline __attribute__ ((const)) int ffs(unsigned long x) /* * __ffs: Similar to ffs, but zero based (0-31) */ -static inline __attribute__ ((const)) int __ffs(unsigned long x) +static inline __attribute__ ((const)) unsigned long __ffs(unsigned long x) { - int n; + unsigned long n; asm volatile( " ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */ From 76e6086760563164d91f61f1e3e58c6a2a031fa5 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sun, 16 Dec 2018 23:16:21 +0900 Subject: [PATCH 04/14] arc: remove redundant kernel-space generic-y This commit removes redundant generic-y defines in arch/arc/include/asm/Kbuild. It is redundant to define generic-y when arch-specific implementation exists in arch/$(ARCH)/include/asm/*.h Remove the following generic-y: dma-mapping.h fb.h kmap_types.h pci.h Signed-off-by: Masahiro Yamada Signed-off-by: Vineet Gupta --- arch/arc/include/asm/Kbuild | 4 ---- 1 file changed, 4 deletions(-) diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index feed50ce89fa..caa270261521 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -3,23 +3,19 @@ generic-y += bugs.h generic-y += compat.h generic-y += device.h generic-y += div64.h -generic-y += dma-mapping.h generic-y += emergency-restart.h generic-y += extable.h -generic-y += fb.h generic-y += ftrace.h generic-y += hardirq.h generic-y += hw_irq.h generic-y += irq_regs.h generic-y += irq_work.h -generic-y += kmap_types.h generic-y += local.h generic-y += local64.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h generic-y += msi.h generic-y += parport.h -generic-y += pci.h generic-y += percpu.h generic-y += preempt.h generic-y += topology.h From a3010a0465383300f909f62b8a83f83ffa7b2517 Mon Sep 17 00:00:00 2001 From: Eugeniy Paltsev Date: Wed, 19 Dec 2018 19:16:16 +0300 Subject: [PATCH 05/14] ARC: adjust memblock_reserve of kernel memory In setup_arch_memory we reserve the memory area wherein the kernel is located. Current implementation may reserve more memory than it actually required in case of CONFIG_LINUX_LINK_BASE is not equal to CONFIG_LINUX_RAM_BASE. This happens because we calculate start of the reserved region relatively to the CONFIG_LINUX_RAM_BASE and end of the region relatively to the CONFIG_LINUX_RAM_BASE. For example in case of HSDK board we wasted 256MiB of physical memory: ------------------->8------------------------------ Memory: 770416K/1048576K available (5496K kernel code, 240K rwdata, 1064K rodata, 2200K init, 275K bss, 278160K reserved, 0K cma-reserved) ------------------->8------------------------------ Fix that. Fixes: 9ed68785f7f2b ("ARC: mm: Decouple RAM base address from kernel link addr") Cc: stable@vger.kernel.org #4.14+ Signed-off-by: Eugeniy Paltsev Signed-off-by: Vineet Gupta --- arch/arc/mm/init.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index 43bf4c3a1290..e1ab2d7f1d64 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -119,7 +119,8 @@ void __init setup_arch_memory(void) */ memblock_add_node(low_mem_start, low_mem_sz, 0); - memblock_reserve(low_mem_start, __pa(_end) - low_mem_start); + memblock_reserve(CONFIG_LINUX_LINK_BASE, + __pa(_end) - CONFIG_LINUX_LINK_BASE); #ifdef CONFIG_BLK_DEV_INITRD if (phys_initrd_size) { From 3affbf0e154ee351add6fcc254c59c3f3947fa8f Mon Sep 17 00:00:00 2001 From: Eugeniy Paltsev Date: Mon, 17 Dec 2018 12:54:23 +0300 Subject: [PATCH 06/14] ARC: perf: map generic branches to correct hardware condition So far we've mapped branches to "ijmp" which also counts conditional branches NOT taken. This makes us different from other architectures such as ARM which seem to be counting only taken branches. So use "ijmptak" hardware condition which only counts (all jump instructions that are taken) 'ijmptak' event is available on both ARCompact and ARCv2 ISA based cores. Signed-off-by: Eugeniy Paltsev Cc: stable@vger.kernel.org Signed-off-by: Vineet Gupta [vgupta: reworked changelog] --- arch/arc/include/asm/perf_event.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/arc/include/asm/perf_event.h b/arch/arc/include/asm/perf_event.h index 9185541035cc..6958545390f0 100644 --- a/arch/arc/include/asm/perf_event.h +++ b/arch/arc/include/asm/perf_event.h @@ -103,7 +103,8 @@ static const char * const arc_pmu_ev_hw_map[] = { /* counts condition */ [PERF_COUNT_HW_INSTRUCTIONS] = "iall", - [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmp", /* Excludes ZOL jumps */ + /* All jump instructions that are taken */ + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak", [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */ #ifdef CONFIG_ISA_ARCV2 [PERF_COUNT_HW_BRANCH_MISSES] = "bpmp", From 14f81a91ad29a57d6c3cf123ad9489f2ca9133fb Mon Sep 17 00:00:00 2001 From: Eugeniy Paltsev Date: Thu, 13 Dec 2018 19:56:18 +0300 Subject: [PATCH 07/14] ARC: perf: trivial code cleanup * Use BIT(), lower_32_bits(), upper_32_bits() macroses, fix code style violations. * Use u32, u64, s64 instead of uint32_t, uint64_t, int64_t * Fix description comment as this code doesn't belong only to ARC700 anymore. * Use SPDX License Identifier. * Remove useless ifdefs. ifdef around 'arc_pmu_match' structure declaration is useless as we refer to 'arc_pmu_match' in several places which aren't guarded with ifdef. Nevertheless 'ARC' option selects 'OF' unconditionally so we can simply get rid of this ifdef. Acked-by: Vineet Gupta Signed-off-by: Eugeniy Paltsev Signed-off-by: Vineet Gupta --- arch/arc/kernel/perf_event.c | 85 ++++++++++++++++++------------------ 1 file changed, 42 insertions(+), 43 deletions(-) diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index 8aec462d90fb..693f32d60c35 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c @@ -1,15 +1,10 @@ -/* - * Linux performance counter support for ARC700 series - * - * Copyright (C) 2013-2015 Synopsys, Inc. (www.synopsys.com) - * - * This code is inspired by the perf support of various other architectures. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ +// SPDX-License-Identifier: GPL-2.0+ +// +// Linux performance counter support for ARC CPUs. +// This code is inspired by the perf support of various other architectures. +// +// Copyright (C) 2013-2018 Synopsys, Inc. (www.synopsys.com) + #include #include #include @@ -19,6 +14,9 @@ #include #include +/* HW holds 8 symbols + one for null terminator */ +#define ARCPMU_EVENT_NAME_LEN 9 + struct arc_pmu { struct pmu pmu; unsigned int irq; @@ -49,6 +47,7 @@ static int callchain_trace(unsigned int addr, void *data) { struct arc_callchain_trace *ctrl = data; struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff; + perf_callchain_store(entry, addr); if (ctrl->depth++ < 3) @@ -57,8 +56,8 @@ static int callchain_trace(unsigned int addr, void *data) return -1; } -void -perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) +void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) { struct arc_callchain_trace ctrl = { .depth = 0, @@ -68,8 +67,8 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re arc_unwind_core(NULL, regs, callchain_trace, &ctrl); } -void -perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) +void perf_callchain_user(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) { /* * User stack can't be unwound trivially with kernel dwarf unwinder @@ -82,10 +81,10 @@ static struct arc_pmu *arc_pmu; static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu); /* read counter #idx; note that counter# != event# on ARC! */ -static uint64_t arc_pmu_read_counter(int idx) +static u64 arc_pmu_read_counter(int idx) { - uint32_t tmp; - uint64_t result; + u32 tmp; + u64 result; /* * ARC supports making 'snapshots' of the counters, so we don't @@ -94,7 +93,7 @@ static uint64_t arc_pmu_read_counter(int idx) write_aux_reg(ARC_REG_PCT_INDEX, idx); tmp = read_aux_reg(ARC_REG_PCT_CONTROL); write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN); - result = (uint64_t) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32; + result = (u64) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32; result |= read_aux_reg(ARC_REG_PCT_SNAPL); return result; @@ -103,9 +102,9 @@ static uint64_t arc_pmu_read_counter(int idx) static void arc_perf_event_update(struct perf_event *event, struct hw_perf_event *hwc, int idx) { - uint64_t prev_raw_count = local64_read(&hwc->prev_count); - uint64_t new_raw_count = arc_pmu_read_counter(idx); - int64_t delta = new_raw_count - prev_raw_count; + u64 prev_raw_count = local64_read(&hwc->prev_count); + u64 new_raw_count = arc_pmu_read_counter(idx); + s64 delta = new_raw_count - prev_raw_count; /* * We aren't afraid of hwc->prev_count changing beneath our feet @@ -155,7 +154,7 @@ static int arc_pmu_event_init(struct perf_event *event) int ret; if (!is_sampling_event(event)) { - hwc->sample_period = arc_pmu->max_period; + hwc->sample_period = arc_pmu->max_period; hwc->last_period = hwc->sample_period; local64_set(&hwc->period_left, hwc->sample_period); } @@ -192,6 +191,7 @@ static int arc_pmu_event_init(struct perf_event *event) pr_debug("init cache event with h/w %08x \'%s\'\n", (int)hwc->config, arc_pmu_ev_hw_map[ret]); return 0; + default: return -ENOENT; } @@ -200,7 +200,7 @@ static int arc_pmu_event_init(struct perf_event *event) /* starts all counters */ static void arc_pmu_enable(struct pmu *pmu) { - uint32_t tmp; + u32 tmp; tmp = read_aux_reg(ARC_REG_PCT_CONTROL); write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1); } @@ -208,7 +208,7 @@ static void arc_pmu_enable(struct pmu *pmu) /* stops all counters */ static void arc_pmu_disable(struct pmu *pmu) { - uint32_t tmp; + u32 tmp; tmp = read_aux_reg(ARC_REG_PCT_CONTROL); write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0); } @@ -228,7 +228,7 @@ static int arc_pmu_event_set_period(struct perf_event *event) local64_set(&hwc->period_left, left); hwc->last_period = period; overflow = 1; - } else if (unlikely(left <= 0)) { + } else if (unlikely(left <= 0)) { /* left underflowed by less than period. */ left += period; local64_set(&hwc->period_left, left); @@ -246,8 +246,8 @@ static int arc_pmu_event_set_period(struct perf_event *event) write_aux_reg(ARC_REG_PCT_INDEX, idx); /* Write value */ - write_aux_reg(ARC_REG_PCT_COUNTL, (u32)value); - write_aux_reg(ARC_REG_PCT_COUNTH, (value >> 32)); + write_aux_reg(ARC_REG_PCT_COUNTL, lower_32_bits(value)); + write_aux_reg(ARC_REG_PCT_COUNTH, upper_32_bits(value)); perf_event_update_userpage(event); @@ -277,7 +277,7 @@ static void arc_pmu_start(struct perf_event *event, int flags) /* Enable interrupt for this counter */ if (is_sampling_event(event)) write_aux_reg(ARC_REG_PCT_INT_CTRL, - read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx)); + read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx)); /* enable ARC pmu here */ write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */ @@ -295,9 +295,9 @@ static void arc_pmu_stop(struct perf_event *event, int flags) * Reset interrupt flag by writing of 1. This is required * to make sure pending interrupt was not left. */ - write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx); + write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx)); write_aux_reg(ARC_REG_PCT_INT_CTRL, - read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~(1 << idx)); + read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~BIT(idx)); } if (!(event->hw.state & PERF_HES_STOPPED)) { @@ -349,9 +349,10 @@ static int arc_pmu_add(struct perf_event *event, int flags) if (is_sampling_event(event)) { /* Mimic full counter overflow as other arches do */ - write_aux_reg(ARC_REG_PCT_INT_CNTL, (u32)arc_pmu->max_period); + write_aux_reg(ARC_REG_PCT_INT_CNTL, + lower_32_bits(arc_pmu->max_period)); write_aux_reg(ARC_REG_PCT_INT_CNTH, - (arc_pmu->max_period >> 32)); + upper_32_bits(arc_pmu->max_period)); } write_aux_reg(ARC_REG_PCT_CONFIG, 0); @@ -392,7 +393,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev) idx = __ffs(active_ints); /* Reset interrupt flag by writing of 1 */ - write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx); + write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx)); /* * On reset of "interrupt active" bit corresponding @@ -400,7 +401,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev) * Now we need to re-enable interrupt for the counter. */ write_aux_reg(ARC_REG_PCT_INT_CTRL, - read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx)); + read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx)); event = pmu_cpu->act_counter[idx]; hwc = &event->hw; @@ -414,7 +415,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev) arc_pmu_stop(event, 0); } - active_ints &= ~(1U << idx); + active_ints &= ~BIT(idx); } while (active_ints); done: @@ -450,10 +451,10 @@ static int arc_pmu_device_probe(struct platform_device *pdev) union cc_name { struct { - uint32_t word0, word1; + u32 word0, word1; char sentinel; } indiv; - char str[9]; + char str[ARCPMU_EVENT_NAME_LEN]; } cc_name; @@ -481,9 +482,9 @@ static int arc_pmu_device_probe(struct platform_device *pdev) pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n", arc_pmu->n_counters, counter_size, cc_bcr.c, - has_interrupts ? ", [overflow IRQ support]":""); + has_interrupts ? ", [overflow IRQ support]" : ""); - cc_name.str[8] = 0; + cc_name.str[ARCPMU_EVENT_NAME_LEN - 1] = 0; for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++) arc_pmu->ev_hw_idx[i] = -1; @@ -538,14 +539,12 @@ static int arc_pmu_device_probe(struct platform_device *pdev) return perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW); } -#ifdef CONFIG_OF static const struct of_device_id arc_pmu_match[] = { { .compatible = "snps,arc700-pct" }, { .compatible = "snps,archs-pct" }, {}, }; MODULE_DEVICE_TABLE(of, arc_pmu_match); -#endif static struct platform_driver arc_pmu_driver = { .driver = { From 0e956150fe09fa4430c42a9bbe48a72967fa0012 Mon Sep 17 00:00:00 2001 From: Eugeniy Paltsev Date: Thu, 13 Dec 2018 19:56:19 +0300 Subject: [PATCH 08/14] ARC: perf: introduce Kernel PMU events support Export all available ARC architected hardware events as kernel PMU events to make non-generic events accessible. ARC PMU HW allow us to read the list of all available events names. So we generate kernel PMU event list dynamically in arc_pmu_device_probe() using human-readable events names we got from HW instead of using pre-defined events list. -------------------------->8-------------------------- $ perf list [snip] arc_pmu/bdata64/ [Kernel PMU event] arc_pmu/bdcstall/ [Kernel PMU event] arc_pmu/bdslot/ [Kernel PMU event] arc_pmu/bfbmp/ [Kernel PMU event] arc_pmu/bfirqex/ [Kernel PMU event] arc_pmu/bflgstal/ [Kernel PMU event] arc_pmu/bflush/ [Kernel PMU event] -------------------------->8-------------------------- Signed-off-by: Eugeniy Paltsev Signed-off-by: Vineet Gupta --- arch/arc/kernel/perf_event.c | 106 ++++++++++++++++++++++++++++++++++- 1 file changed, 105 insertions(+), 1 deletion(-) diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index 693f32d60c35..d60aaaead421 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c @@ -17,12 +17,28 @@ /* HW holds 8 symbols + one for null terminator */ #define ARCPMU_EVENT_NAME_LEN 9 +enum arc_pmu_attr_groups { + ARCPMU_ATTR_GR_EVENTS, + ARCPMU_ATTR_GR_FORMATS, + ARCPMU_NR_ATTR_GR +}; + +struct arc_pmu_raw_event_entry { + char name[ARCPMU_EVENT_NAME_LEN]; +}; + struct arc_pmu { struct pmu pmu; unsigned int irq; int n_counters; + int n_events; u64 max_period; int ev_hw_idx[PERF_COUNT_ARC_HW_MAX]; + + struct arc_pmu_raw_event_entry *raw_entry; + struct attribute **attrs; + struct perf_pmu_events_attr *attr; + const struct attribute_group *attr_groups[ARCPMU_NR_ATTR_GR + 1]; }; struct arc_pmu_cpu { @@ -192,6 +208,17 @@ static int arc_pmu_event_init(struct perf_event *event) (int)hwc->config, arc_pmu_ev_hw_map[ret]); return 0; + case PERF_TYPE_RAW: + if (event->attr.config >= arc_pmu->n_events) + return -ENOENT; + + hwc->config |= event->attr.config; + pr_debug("init raw event with idx %lld \'%s\'\n", + event->attr.config, + arc_pmu->raw_entry[event->attr.config].name); + + return 0; + default: return -ENOENT; } @@ -442,6 +469,67 @@ static void arc_cpu_pmu_irq_init(void *data) write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); } +/* Event field occupies the bottom 15 bits of our config field */ +PMU_FORMAT_ATTR(event, "config:0-14"); +static struct attribute *arc_pmu_format_attrs[] = { + &format_attr_event.attr, + NULL, +}; + +static struct attribute_group arc_pmu_format_attr_gr = { + .name = "format", + .attrs = arc_pmu_format_attrs, +}; + +static ssize_t arc_pmu_events_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *page) +{ + struct perf_pmu_events_attr *pmu_attr; + + pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); + return sprintf(page, "event=0x%04llx\n", pmu_attr->id); +} + +/* + * We don't add attrs here as we don't have pre-defined list of perf events. + * We will generate and add attrs dynamically in probe() after we read HW + * configuration. + */ +static struct attribute_group arc_pmu_events_attr_gr = { + .name = "events", +}; + +static void arc_pmu_add_raw_event_attr(int j, char *str) +{ + memmove(arc_pmu->raw_entry[j].name, str, ARCPMU_EVENT_NAME_LEN - 1); + arc_pmu->attr[j].attr.attr.name = arc_pmu->raw_entry[j].name; + arc_pmu->attr[j].attr.attr.mode = VERIFY_OCTAL_PERMISSIONS(0444); + arc_pmu->attr[j].attr.show = arc_pmu_events_sysfs_show; + arc_pmu->attr[j].id = j; + arc_pmu->attrs[j] = &(arc_pmu->attr[j].attr.attr); +} + +static int arc_pmu_raw_alloc(struct device *dev) +{ + arc_pmu->attr = devm_kmalloc_array(dev, arc_pmu->n_events + 1, + sizeof(*arc_pmu->attr), GFP_KERNEL | __GFP_ZERO); + if (!arc_pmu->attr) + return -ENOMEM; + + arc_pmu->attrs = devm_kmalloc_array(dev, arc_pmu->n_events + 1, + sizeof(*arc_pmu->attrs), GFP_KERNEL | __GFP_ZERO); + if (!arc_pmu->attrs) + return -ENOMEM; + + arc_pmu->raw_entry = devm_kmalloc_array(dev, arc_pmu->n_events, + sizeof(*arc_pmu->raw_entry), GFP_KERNEL | __GFP_ZERO); + if (!arc_pmu->raw_entry) + return -ENOMEM; + + return 0; +} + static int arc_pmu_device_probe(struct platform_device *pdev) { struct arc_reg_pct_build pct_bcr; @@ -473,6 +561,11 @@ static int arc_pmu_device_probe(struct platform_device *pdev) if (!arc_pmu) return -ENOMEM; + arc_pmu->n_events = cc_bcr.c; + + if (arc_pmu_raw_alloc(&pdev->dev)) + return -ENOMEM; + has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0; arc_pmu->n_counters = pct_bcr.c; @@ -504,8 +597,14 @@ static int arc_pmu_device_probe(struct platform_device *pdev) arc_pmu->ev_hw_idx[i] = j; } } + + arc_pmu_add_raw_event_attr(j, cc_name.str); } + arc_pmu_events_attr_gr.attrs = arc_pmu->attrs; + arc_pmu->attr_groups[ARCPMU_ATTR_GR_EVENTS] = &arc_pmu_events_attr_gr; + arc_pmu->attr_groups[ARCPMU_ATTR_GR_FORMATS] = &arc_pmu_format_attr_gr; + arc_pmu->pmu = (struct pmu) { .pmu_enable = arc_pmu_enable, .pmu_disable = arc_pmu_disable, @@ -515,6 +614,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev) .start = arc_pmu_start, .stop = arc_pmu_stop, .read = arc_pmu_read, + .attr_groups = arc_pmu->attr_groups, }; if (has_interrupts) { @@ -536,7 +636,11 @@ static int arc_pmu_device_probe(struct platform_device *pdev) } else arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; - return perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW); + /* + * perf parser doesn't really like '-' symbol in events name, so let's + * use '_' in arc pct name as it goes to kernel PMU event prefix. + */ + return perf_pmu_register(&arc_pmu->pmu, "arc_pct", PERF_TYPE_RAW); } static const struct of_device_id arc_pmu_match[] = { From baf9cc85ba01f32cf2ee79042a64b874a58cfb92 Mon Sep 17 00:00:00 2001 From: Eugeniy Paltsev Date: Thu, 13 Dec 2018 19:56:20 +0300 Subject: [PATCH 09/14] ARC: perf: move HW events mapping to separate function Move HW events mapping to separate function to make code more readable. Signed-off-by: Eugeniy Paltsev Signed-off-by: Vineet Gupta --- arch/arc/kernel/perf_event.c | 48 +++++++++++++++++++++++++----------- 1 file changed, 33 insertions(+), 15 deletions(-) diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index d60aaaead421..248c7b61690a 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c @@ -530,11 +530,39 @@ static int arc_pmu_raw_alloc(struct device *dev) return 0; } +static inline bool event_in_hw_event_map(int i, char *name) +{ + if (!arc_pmu_ev_hw_map[i]) + return false; + + if (!strlen(arc_pmu_ev_hw_map[i])) + return false; + + if (strcmp(arc_pmu_ev_hw_map[i], name)) + return false; + + return true; +} + +static void arc_pmu_map_hw_event(int j, char *str) +{ + int i; + + /* See if HW condition has been mapped to a perf event_id */ + for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) { + if (event_in_hw_event_map(i, str)) { + pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n", + i, str, j); + arc_pmu->ev_hw_idx[i] = j; + } + } +} + static int arc_pmu_device_probe(struct platform_device *pdev) { struct arc_reg_pct_build pct_bcr; struct arc_reg_cc_build cc_bcr; - int i, j, has_interrupts; + int i, has_interrupts; int counter_size; /* in bits */ union cc_name { @@ -582,23 +610,13 @@ static int arc_pmu_device_probe(struct platform_device *pdev) arc_pmu->ev_hw_idx[i] = -1; /* loop thru all available h/w condition indexes */ - for (j = 0; j < cc_bcr.c; j++) { - write_aux_reg(ARC_REG_CC_INDEX, j); + for (i = 0; i < cc_bcr.c; i++) { + write_aux_reg(ARC_REG_CC_INDEX, i); cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0); cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1); - /* See if it has been mapped to a perf event_id */ - for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) { - if (arc_pmu_ev_hw_map[i] && - !strcmp(arc_pmu_ev_hw_map[i], cc_name.str) && - strlen(arc_pmu_ev_hw_map[i])) { - pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n", - i, cc_name.str, j); - arc_pmu->ev_hw_idx[i] = j; - } - } - - arc_pmu_add_raw_event_attr(j, cc_name.str); + arc_pmu_map_hw_event(i, cc_name.str); + arc_pmu_add_raw_event_attr(i, cc_name.str); } arc_pmu_events_attr_gr.attrs = arc_pmu->attrs; From 29133260f7c2e4ce50b4da6bf0674331bc0a4da5 Mon Sep 17 00:00:00 2001 From: Eugeniy Paltsev Date: Thu, 13 Dec 2018 19:56:21 +0300 Subject: [PATCH 10/14] ARC: perf: avoid kernel killing where it is possible No, not gonna die tonight. Signed-off-by: Eugeniy Paltsev Signed-off-by: Vineet Gupta --- arch/arc/kernel/perf_event.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index 248c7b61690a..861a8aea51f9 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c @@ -580,10 +580,12 @@ static int arc_pmu_device_probe(struct platform_device *pdev) return -ENODEV; } BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32); - BUG_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS); + if (WARN_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS)) + return -EINVAL; READ_BCR(ARC_REG_CC_BUILD, cc_bcr); - BUG_ON(!cc_bcr.v); /* Counters exist but No countable conditions ? */ + if (WARN(!cc_bcr.v, "Counters exist but No countable conditions?")) + return -EINVAL; arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL); if (!arc_pmu) From ab6c03676cb190156603cf4c5ecf97aa406c9c53 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Mon, 17 Dec 2018 14:11:19 -0800 Subject: [PATCH 11/14] ARC: show_regs: lockdep: avoid page allocator... and use smaller/on-stack buffer instead The motivation for this change was lockdep splat like below. | potentially unexpected fatal signal 11. | BUG: sleeping function called from invalid context at ../mm/page_alloc.c:4317 | in_atomic(): 1, irqs_disabled(): 0, pid: 57, name: segv | no locks held by segv/57. | Preemption disabled at: | [<8182f17e>] get_signal+0x4a6/0x7c4 | CPU: 0 PID: 57 Comm: segv Not tainted 4.17.0+ #23 | | Stack Trace: | arc_unwind_core.constprop.1+0xd0/0xf4 | __might_sleep+0x1f6/0x234 | __get_free_pages+0x174/0xca0 | show_regs+0x22/0x330 | get_signal+0x4ac/0x7c4 # print_fatal_signals() -> preempt_disable() | do_signal+0x30/0x224 | resume_user_mode_begin+0x90/0xd8 So signal handling core calls show_regs() with preemption disabled but an ensuing GFP_KERNEL page allocator call is flagged by lockdep. We could have switched to GFP_NOWAIT, but turns out that is not enough anways and eliding page allocator call leads to less code and instruction traces to sift thru when debugging pesky crashes. FWIW, this patch doesn't cure the lockdep splat (which next patch does). Reviewed-by: William Kucharski Signed-off-by: Vineet Gupta --- arch/arc/kernel/troubleshoot.c | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index e8d9fb452346..5c6663321e87 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c @@ -18,6 +18,8 @@ #include #include +#define ARC_PATH_MAX 256 + /* * Common routine to print scratch regs (r0-r12) or callee regs (r13-r25) * -Prints 3 regs per line and a CR. @@ -58,11 +60,12 @@ static void show_callee_regs(struct callee_regs *cregs) print_reg_file(&(cregs->r13), 13); } -static void print_task_path_n_nm(struct task_struct *tsk, char *buf) +static void print_task_path_n_nm(struct task_struct *tsk) { char *path_nm = NULL; struct mm_struct *mm; struct file *exe_file; + char buf[ARC_PATH_MAX]; mm = get_task_mm(tsk); if (!mm) @@ -72,7 +75,7 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf) mmput(mm); if (exe_file) { - path_nm = file_path(exe_file, buf, 255); + path_nm = file_path(exe_file, buf, ARC_PATH_MAX-1); fput(exe_file); } @@ -80,10 +83,9 @@ done: pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?"); } -static void show_faulting_vma(unsigned long address, char *buf) +static void show_faulting_vma(unsigned long address) { struct vm_area_struct *vma; - char *nm = buf; struct mm_struct *active_mm = current->active_mm; /* can't use print_vma_addr() yet as it doesn't check for @@ -96,8 +98,11 @@ static void show_faulting_vma(unsigned long address, char *buf) * if the container VMA is not found */ if (vma && (vma->vm_start <= address)) { + char buf[ARC_PATH_MAX]; + char *nm = "?"; + if (vma->vm_file) { - nm = file_path(vma->vm_file, buf, PAGE_SIZE - 1); + nm = file_path(vma->vm_file, buf, ARC_PATH_MAX-1); if (IS_ERR(nm)) nm = "?"; } @@ -173,13 +178,8 @@ void show_regs(struct pt_regs *regs) { struct task_struct *tsk = current; struct callee_regs *cregs; - char *buf; - buf = (char *)__get_free_page(GFP_KERNEL); - if (!buf) - return; - - print_task_path_n_nm(tsk, buf); + print_task_path_n_nm(tsk); show_regs_print_info(KERN_INFO); show_ecr_verbose(regs); @@ -189,7 +189,7 @@ void show_regs(struct pt_regs *regs) (void *)regs->blink, (void *)regs->ret); if (user_mode(regs)) - show_faulting_vma(regs->ret, buf); /* faulting code, not data */ + show_faulting_vma(regs->ret); /* faulting code, not data */ pr_info("[STAT32]: 0x%08lx", regs->status32); @@ -221,8 +221,6 @@ void show_regs(struct pt_regs *regs) cregs = (struct callee_regs *)current->thread.callee_reg; if (cregs) show_callee_regs(cregs); - - free_page((unsigned long)buf); } void show_kernel_fault_diag(const char *str, struct pt_regs *regs, From f731a8e89f8c78985707c626680f3e24c7a60772 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Tue, 18 Dec 2018 10:39:58 -0800 Subject: [PATCH 12/14] ARC: show_regs: lockdep: re-enable preemption signal handling core calls show_regs() with preemption disabled which on ARC takes mmap_sem for mm/vma access, causing lockdep splat. | [ARCLinux]# ./segv-null-ptr | potentially unexpected fatal signal 11. | BUG: sleeping function called from invalid context at kernel/fork.c:1011 | in_atomic(): 1, irqs_disabled(): 0, pid: 70, name: segv-null-ptr | no locks held by segv-null-ptr/70. | CPU: 0 PID: 70 Comm: segv-null-ptr Not tainted 4.18.0+ #69 | | Stack Trace: | arc_unwind_core+0xcc/0x100 | ___might_sleep+0x17a/0x190 | mmput+0x16/0xb8 | show_regs+0x52/0x310 | get_signal+0x5ee/0x610 | do_signal+0x2c/0x218 | resume_user_mode_begin+0x90/0xd8 Workaround by re-enabling preemption temporarily. Note that the preemption disabling in core code around show_regs() was introduced by commit 3a9f84d354ce ("signals, debug: fix BUG: using smp_processor_id() in preemptible code in print_fatal_signal()") to silence a differnt lockdep seen on x86 bakc in 2009. Cc: Signed-off-by: Vineet Gupta --- arch/arc/kernel/troubleshoot.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index 5c6663321e87..215f515442e0 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c @@ -179,6 +179,12 @@ void show_regs(struct pt_regs *regs) struct task_struct *tsk = current; struct callee_regs *cregs; + /* + * generic code calls us with preemption disabled, but some calls + * here could sleep, so re-enable to avoid lockdep splat + */ + preempt_enable(); + print_task_path_n_nm(tsk); show_regs_print_info(KERN_INFO); @@ -221,6 +227,8 @@ void show_regs(struct pt_regs *regs) cregs = (struct callee_regs *)current->thread.callee_reg; if (cregs) show_callee_regs(cregs); + + preempt_disable(); } void show_kernel_fault_diag(const char *str, struct pt_regs *regs, From 4d447455e73b47c43dd35fcc38ed823d3182a474 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Mon, 10 Dec 2018 16:56:45 -0800 Subject: [PATCH 13/14] ARC: mm: do_page_fault fixes #1: relinquish mmap_sem if signal arrives while handle_mm_fault do_page_fault() forgot to relinquish mmap_sem if a signal came while handling handle_mm_fault() - due to say a ctl+c or oom etc. This would later cause a deadlock by acquiring it twice. This came to light when running libc testsuite tst-tls3-malloc test but is likely also the cause for prior seen LTP failures. Using lockdep clearly showed what the issue was. | # while true; do ./tst-tls3-malloc ; done | Didn't expect signal from child: got `Segmentation fault' | ^C | ============================================ | WARNING: possible recursive locking detected | 4.17.0+ #25 Not tainted | -------------------------------------------- | tst-tls3-malloc/510 is trying to acquire lock: | 606c7728 (&mm->mmap_sem){++++}, at: __might_fault+0x28/0x5c | |but task is already holding lock: |606c7728 (&mm->mmap_sem){++++}, at: do_page_fault+0x9c/0x2a0 | | other info that might help us debug this: | Possible unsafe locking scenario: | | CPU0 | ---- | lock(&mm->mmap_sem); | lock(&mm->mmap_sem); | | *** DEADLOCK *** | ------------------------------------------------------------ What the change does is not obvious (note to myself) prior code was | do_page_fault | | down_read() <-- lock taken | handle_mm_fault <-- signal pending as this runs | if fatal_signal_pending | if VM_FAULT_ERROR | up_read | if user_mode | return <-- lock still held, this was the BUG New code | do_page_fault | | down_read() <-- lock taken | handle_mm_fault <-- signal pending as this runs | if fatal_signal_pending | if VM_FAULT_RETRY | return <-- not same case as above, but still OK since | core mm already relinq lock for FAULT_RETRY | ... | | < Now falls through for bug case above > | | up_read() <-- lock relinquished Cc: stable@vger.kernel.org Signed-off-by: Vineet Gupta --- arch/arc/mm/fault.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index a1d723197084..8df1638259f3 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c @@ -141,12 +141,17 @@ good_area: */ fault = handle_mm_fault(vma, address, flags); - /* If Pagefault was interrupted by SIGKILL, exit page fault "early" */ if (fatal_signal_pending(current)) { - if ((fault & VM_FAULT_ERROR) && !(fault & VM_FAULT_RETRY)) - up_read(&mm->mmap_sem); - if (user_mode(regs)) + + /* + * if fault retry, mmap_sem already relinquished by core mm + * so OK to return to user mode (with signal handled first) + */ + if (fault & VM_FAULT_RETRY) { + if (!user_mode(regs)) + goto no_context; return; + } } perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); From e6a72b7daeeb521753803550f0ed711152bb2555 Mon Sep 17 00:00:00 2001 From: Eugeniy Paltsev Date: Mon, 14 Jan 2019 18:16:48 +0300 Subject: [PATCH 14/14] ARCv2: lib: memeset: fix doing prefetchw outside of buffer ARCv2 optimized memset uses PREFETCHW instruction for prefetching the next cache line but doesn't ensure that the line is not past the end of the buffer. PRETECHW changes the line ownership and marks it dirty, which can cause issues in SMP config when next line was already owned by other core. Fix the issue by avoiding the PREFETCHW Some more details: The current code has 3 logical loops (ignroing the unaligned part) (a) Big loop for doing aligned 64 bytes per iteration with PREALLOC (b) Loop for 32 x 2 bytes with PREFETCHW (c) any left over bytes loop (a) was already eliding the last 64 bytes, so PREALLOC was safe. The fix was removing PREFETCW from (b). Another potential issue (applicable to configs with 32 or 128 byte L1 cache line) is that PREALLOC assumes 64 byte cache line and may not do the right thing specially for 32b. While it would be easy to adapt, there are no known configs with those lie sizes, so for now, just compile out PREALLOC in such cases. Signed-off-by: Eugeniy Paltsev Cc: stable@vger.kernel.org #4.4+ Signed-off-by: Vineet Gupta [vgupta: rewrote changelog, used asm .macro vs. "C" macro] --- arch/arc/lib/memset-archs.S | 40 +++++++++++++++++++++++++++++-------- 1 file changed, 32 insertions(+), 8 deletions(-) diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S index 62ad4bcb841a..f230bb7092fd 100644 --- a/arch/arc/lib/memset-archs.S +++ b/arch/arc/lib/memset-archs.S @@ -7,11 +7,39 @@ */ #include +#include -#undef PREALLOC_NOT_AVAIL +/* + * The memset implementation below is optimized to use prefetchw and prealloc + * instruction in case of CPU with 64B L1 data cache line (L1_CACHE_SHIFT == 6) + * If you want to implement optimized memset for other possible L1 data cache + * line lengths (32B and 128B) you should rewrite code carefully checking + * we don't call any prefetchw/prealloc instruction for L1 cache lines which + * don't belongs to memset area. + */ + +#if L1_CACHE_SHIFT == 6 + +.macro PREALLOC_INSTR reg, off + prealloc [\reg, \off] +.endm + +.macro PREFETCHW_INSTR reg, off + prefetchw [\reg, \off] +.endm + +#else + +.macro PREALLOC_INSTR +.endm + +.macro PREFETCHW_INSTR +.endm + +#endif ENTRY_CFI(memset) - prefetchw [r0] ; Prefetch the write location + PREFETCHW_INSTR r0, 0 ; Prefetch the first write location mov.f 0, r2 ;;; if size is zero jz.d [blink] @@ -48,11 +76,8 @@ ENTRY_CFI(memset) lpnz @.Lset64bytes ;; LOOP START -#ifdef PREALLOC_NOT_AVAIL - prefetchw [r3, 64] ;Prefetch the next write location -#else - prealloc [r3, 64] -#endif + PREALLOC_INSTR r3, 64 ; alloc next line w/o fetching + #ifdef CONFIG_ARC_HAS_LL64 std.ab r4, [r3, 8] std.ab r4, [r3, 8] @@ -85,7 +110,6 @@ ENTRY_CFI(memset) lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes lpnz .Lset32bytes ;; LOOP START - prefetchw [r3, 32] ;Prefetch the next write location #ifdef CONFIG_ARC_HAS_LL64 std.ab r4, [r3, 8] std.ab r4, [r3, 8]