2005-09-26 06:04:21 +00:00
|
|
|
#
|
|
|
|
# Makefile for ppc-specific library files..
|
|
|
|
#
|
|
|
|
|
2009-06-09 20:48:51 +00:00
|
|
|
subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
|
|
|
|
|
2012-11-26 17:41:08 +00:00
|
|
|
ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
|
2006-06-10 10:23:54 +00:00
|
|
|
|
2016-03-03 04:26:58 +00:00
|
|
|
CFLAGS_REMOVE_code-patching.o = $(CC_FLAGS_FTRACE)
|
|
|
|
CFLAGS_REMOVE_feature-fixups.o = $(CC_FLAGS_FTRACE)
|
2008-11-26 20:54:46 +00:00
|
|
|
|
2017-05-11 15:56:49 +00:00
|
|
|
obj-y += string.o alloc.o code-patching.o feature-fixups.o
|
|
|
|
|
2017-05-11 15:56:52 +00:00
|
|
|
obj-$(CONFIG_PPC32) += div64.o copy_32.o crtsavres.o
|
2014-12-22 04:14:24 +00:00
|
|
|
|
2017-05-11 15:56:52 +00:00
|
|
|
# See corresponding test in arch/powerpc/Makefile
|
|
|
|
# 64-bit linker creates .sfpr on demand for final link (vmlinux),
|
|
|
|
# so it is only needed for modules, and only for older linkers which
|
|
|
|
# do not support --save-restore-funcs
|
|
|
|
ifeq ($(call ld-ifversion, -lt, 225000000, y),y)
|
|
|
|
extra-$(CONFIG_PPC64) += crtsavres.o
|
|
|
|
endif
|
2005-10-10 12:50:37 +00:00
|
|
|
|
2017-03-21 20:35:08 +00:00
|
|
|
obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \
|
2014-12-22 04:18:43 +00:00
|
|
|
copyuser_power7.o string_64.o copypage_power7.o memcpy_power7.o \
|
|
|
|
memcpy_64.o memcmp_64.o
|
|
|
|
|
|
|
|
obj64-$(CONFIG_SMP) += locks.o
|
|
|
|
obj64-$(CONFIG_ALTIVEC) += vmx-helper.o
|
2017-02-14 09:16:43 +00:00
|
|
|
obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o
|
2014-12-22 04:18:43 +00:00
|
|
|
|
2016-08-11 06:03:14 +00:00
|
|
|
obj-y += checksum_$(BITS).o checksum_wrappers.o
|
2013-09-23 02:04:51 +00:00
|
|
|
|
2013-01-07 00:26:57 +00:00
|
|
|
obj-$(CONFIG_PPC_EMULATE_SSTEP) += sstep.o ldstfp.o
|
powerpc: Handle most loads and stores in instruction emulation code
This extends the instruction emulation infrastructure in sstep.c to
handle all the load and store instructions defined in the Power ISA
v3.0, except for the atomic memory operations, ldmx (which was never
implemented), lfdp/stfdp, and the vector element load/stores.
The instructions added are:
Integer loads and stores: lbarx, lharx, lqarx, stbcx., sthcx., stqcx.,
lq, stq.
VSX loads and stores: lxsiwzx, lxsiwax, stxsiwx, lxvx, lxvl, lxvll,
lxvdsx, lxvwsx, stxvx, stxvl, stxvll, lxsspx, lxsdx, stxsspx, stxsdx,
lxvw4x, lxsibzx, lxvh8x, lxsihzx, lxvb16x, stxvw4x, stxsibx, stxvh8x,
stxsihx, stxvb16x, lxsd, lxssp, lxv, stxsd, stxssp, stxv.
These instructions are handled both in the analyse_instr phase and in
the emulate_step phase.
The code for lxvd2ux and stxvd2ux has been taken out, as those
instructions were never implemented in any processor and have been
taken out of the architecture, and their opcodes have been reused for
other instructions in POWER9 (lxvb16x and stxvb16x).
The emulation for the VSX loads and stores uses helper functions
which don't access registers or memory directly, which can hopefully
be reused by KVM later.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-08-30 04:12:27 +00:00
|
|
|
obj64-$(CONFIG_PPC_EMULATE_SSTEP) += quad.o
|
2005-10-28 12:53:37 +00:00
|
|
|
|
2007-09-16 10:53:25 +00:00
|
|
|
obj-$(CONFIG_PPC_LIB_RHEAP) += rheap.o
|
2008-06-24 01:32:21 +00:00
|
|
|
|
2008-06-24 01:33:03 +00:00
|
|
|
obj-$(CONFIG_FTR_FIXUP_SELFTEST) += feature-fixups-test.o
|
2013-10-14 10:03:58 +00:00
|
|
|
|
powerpc/lib/xor_vmx: Ensure no altivec code executes before enable_kernel_altivec()
The xor_vmx.c file is used for the RAID5 xor operations. In these functions
altivec is enabled to run the operation and then disabled.
The code uses enable_kernel_altivec() around the core of the algorithm, however
the whole file is built with -maltivec, so the compiler is within its rights to
generate altivec code anywhere. This has been seen at least once in the wild:
0:mon> di $xor_altivec_2
c0000000000b97d0 3c4c01d9 addis r2,r12,473
c0000000000b97d4 3842db30 addi r2,r2,-9424
c0000000000b97d8 7c0802a6 mflr r0
c0000000000b97dc f8010010 std r0,16(r1)
c0000000000b97e0 60000000 nop
c0000000000b97e4 7c0802a6 mflr r0
c0000000000b97e8 faa1ffa8 std r21,-88(r1)
...
c0000000000b981c f821ff41 stdu r1,-192(r1)
c0000000000b9820 7f8101ce stvx v28,r1,r0 <-- POP
c0000000000b9824 38000030 li r0,48
c0000000000b9828 7fa101ce stvx v29,r1,r0
...
c0000000000b984c 4bf6a06d bl c0000000000238b8 # enable_kernel_altivec
This patch splits the non-altivec code into xor_vmx_glue.c which calls the
altivec functions in xor_vmx.c. By compiling xor_vmx_glue.c without
-maltivec we can guarantee that altivec instruction will not be executed
outside of the enable/disable block.
Signed-off-by: Matt Brown <matthew.brown.dev@gmail.com>
[mpe: Rework change log and include disassembly]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2017-05-23 23:45:59 +00:00
|
|
|
obj-$(CONFIG_ALTIVEC) += xor_vmx.o xor_vmx_glue.o
|
2015-05-25 22:53:26 +00:00
|
|
|
CFLAGS_xor_vmx.o += -maltivec $(call cc-option,-mabi=altivec)
|
2014-12-22 04:18:43 +00:00
|
|
|
|
|
|
|
obj-$(CONFIG_PPC64) += $(obj64-y)
|