mirror of
https://github.com/torvalds/linux.git
synced 2024-12-03 17:41:22 +00:00
ec6347bb43
In reaction to a proposal to introduce a memcpy_mcsafe_fast() implementation Linus points out that memcpy_mcsafe() is poorly named relative to communicating the scope of the interface. Specifically what addresses are valid to pass as source, destination, and what faults / exceptions are handled. Of particular concern is that even though x86 might be able to handle the semantics of copy_mc_to_user() with its common copy_user_generic() implementation other archs likely need / want an explicit path for this case: On Fri, May 1, 2020 at 11:28 AM Linus Torvalds <torvalds@linux-foundation.org> wrote: > > On Thu, Apr 30, 2020 at 6:21 PM Dan Williams <dan.j.williams@intel.com> wrote: > > > > However now I see that copy_user_generic() works for the wrong reason. > > It works because the exception on the source address due to poison > > looks no different than a write fault on the user address to the > > caller, it's still just a short copy. So it makes copy_to_user() work > > for the wrong reason relative to the name. > > Right. > > And it won't work that way on other architectures. On x86, we have a > generic function that can take faults on either side, and we use it > for both cases (and for the "in_user" case too), but that's an > artifact of the architecture oddity. > > In fact, it's probably wrong even on x86 - because it can hide bugs - > but writing those things is painful enough that everybody prefers > having just one function. Replace a single top-level memcpy_mcsafe() with either copy_mc_to_user(), or copy_mc_to_kernel(). Introduce an x86 copy_mc_fragile() name as the rename for the low-level x86 implementation formerly named memcpy_mcsafe(). It is used as the slow / careful backend that is supplanted by a fast copy_mc_generic() in a follow-on patch. One side-effect of this reorganization is that separating copy_mc_64.S to its own file means that perf no longer needs to track dependencies for its memcpy_64.S benchmarks. [ bp: Massage a bit. ] Signed-off-by: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Borislav Petkov <bp@suse.de> Reviewed-by: Tony Luck <tony.luck@intel.com> Acked-by: Michael Ellerman <mpe@ellerman.id.au> Cc: <stable@vger.kernel.org> Link: http://lore.kernel.org/r/CAHk-=wjSqtXAqfUJxFtWNwmguFASTgB0dz1dT3V-78Quiezqbg@mail.gmail.com Link: https://lkml.kernel.org/r/160195561680.2163339.11574962055305783722.stgit@dwillia2-desk3.amr.corp.intel.com
67 lines
1.8 KiB
Makefile
67 lines
1.8 KiB
Makefile
# SPDX-License-Identifier: GPL-2.0
|
|
#
|
|
# Makefile for ppc-specific library files..
|
|
#
|
|
|
|
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
|
|
|
|
CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE)
|
|
CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
|
|
|
|
KASAN_SANITIZE_code-patching.o := n
|
|
KASAN_SANITIZE_feature-fixups.o := n
|
|
|
|
ifdef CONFIG_KASAN
|
|
CFLAGS_code-patching.o += -DDISABLE_BRANCH_PROFILING
|
|
CFLAGS_feature-fixups.o += -DDISABLE_BRANCH_PROFILING
|
|
endif
|
|
|
|
obj-y += alloc.o code-patching.o feature-fixups.o pmem.o inst.o test_code-patching.o
|
|
|
|
ifndef CONFIG_KASAN
|
|
obj-y += string.o memcmp_$(BITS).o
|
|
obj-$(CONFIG_PPC32) += strlen_32.o
|
|
endif
|
|
|
|
obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o
|
|
|
|
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
|
|
|
|
# See corresponding test in arch/powerpc/Makefile
|
|
# 64-bit linker creates .sfpr on demand for final link (vmlinux),
|
|
# so it is only needed for modules, and only for older linkers which
|
|
# do not support --save-restore-funcs
|
|
ifeq ($(call ld-ifversion, -lt, 225000000, y),y)
|
|
extra-$(CONFIG_PPC64) += crtsavres.o
|
|
endif
|
|
|
|
obj-$(CONFIG_PPC_BOOK3S_64) += copyuser_power7.o copypage_power7.o \
|
|
memcpy_power7.o
|
|
|
|
obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \
|
|
memcpy_64.o copy_mc_64.o
|
|
|
|
ifndef CONFIG_PPC_QUEUED_SPINLOCKS
|
|
obj64-$(CONFIG_SMP) += locks.o
|
|
endif
|
|
|
|
obj64-$(CONFIG_ALTIVEC) += vmx-helper.o
|
|
obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o \
|
|
test_emulate_step_exec_instr.o
|
|
|
|
obj-y += checksum_$(BITS).o checksum_wrappers.o \
|
|
string_$(BITS).o
|
|
|
|
obj-y += sstep.o
|
|
obj-$(CONFIG_PPC_FPU) += ldstfp.o
|
|
obj64-y += quad.o
|
|
|
|
obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
|
|
|
|
obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o
|
|
|
|
obj-$(CONFIG_ALTIVEC) += xor_vmx.o xor_vmx_glue.o
|
|
CFLAGS_xor_vmx.o += -maltivec $(call cc-option,-mabi=altivec)
|
|
|
|
obj-$(CONFIG_PPC64) += $(obj64-y)
|