mirror of
https://github.com/torvalds/linux.git
synced 2024-11-23 12:42:02 +00:00
Merge 4.16-rc3 into staging-next
We want the IIO/Staging fixes in here, and to resolve a merge problem with the move of the fsl-mc code. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
36e9f7203e
4
.gitignore
vendored
4
.gitignore
vendored
@ -127,3 +127,7 @@ all.config
|
||||
|
||||
# Kdevelop4
|
||||
*.kdev4
|
||||
|
||||
#Automatically generated by ASN.1 compiler
|
||||
net/ipv4/netfilter/nf_nat_snmp_basic-asn1.c
|
||||
net/ipv4/netfilter/nf_nat_snmp_basic-asn1.h
|
||||
|
39
Documentation/ABI/testing/sysfs-devices-platform-dock
Normal file
39
Documentation/ABI/testing/sysfs-devices-platform-dock
Normal file
@ -0,0 +1,39 @@
|
||||
What: /sys/devices/platform/dock.N/docked
|
||||
Date: Dec, 2006
|
||||
KernelVersion: 2.6.19
|
||||
Contact: linux-acpi@vger.kernel.org
|
||||
Description:
|
||||
(RO) Value 1 or 0 indicates whether the software believes the
|
||||
laptop is docked in a docking station.
|
||||
|
||||
What: /sys/devices/platform/dock.N/undock
|
||||
Date: Dec, 2006
|
||||
KernelVersion: 2.6.19
|
||||
Contact: linux-acpi@vger.kernel.org
|
||||
Description:
|
||||
(WO) Writing to this file causes the software to initiate an
|
||||
undock request to the firmware.
|
||||
|
||||
What: /sys/devices/platform/dock.N/uid
|
||||
Date: Feb, 2007
|
||||
KernelVersion: v2.6.21
|
||||
Contact: linux-acpi@vger.kernel.org
|
||||
Description:
|
||||
(RO) Displays the docking station the laptop is docked to.
|
||||
|
||||
What: /sys/devices/platform/dock.N/flags
|
||||
Date: May, 2007
|
||||
KernelVersion: v2.6.21
|
||||
Contact: linux-acpi@vger.kernel.org
|
||||
Description:
|
||||
(RO) Show dock station flags, useful for checking if undock
|
||||
request has been made by the user (from the immediate_undock
|
||||
option).
|
||||
|
||||
What: /sys/devices/platform/dock.N/type
|
||||
Date: Aug, 2008
|
||||
KernelVersion: v2.6.27
|
||||
Contact: linux-acpi@vger.kernel.org
|
||||
Description:
|
||||
(RO) Display the dock station type- dock_station, ata_bay or
|
||||
battery_bay.
|
@ -108,6 +108,8 @@ Description: CPU topology files that describe a logical CPU's relationship
|
||||
|
||||
What: /sys/devices/system/cpu/cpuidle/current_driver
|
||||
/sys/devices/system/cpu/cpuidle/current_governer_ro
|
||||
/sys/devices/system/cpu/cpuidle/available_governors
|
||||
/sys/devices/system/cpu/cpuidle/current_governor
|
||||
Date: September 2007
|
||||
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
|
||||
Description: Discover cpuidle policy and mechanism
|
||||
@ -119,13 +121,84 @@ Description: Discover cpuidle policy and mechanism
|
||||
Idle policy (governor) is differentiated from idle mechanism
|
||||
(driver)
|
||||
|
||||
current_driver: displays current idle mechanism
|
||||
current_driver: (RO) displays current idle mechanism
|
||||
|
||||
current_governor_ro: displays current idle policy
|
||||
current_governor_ro: (RO) displays current idle policy
|
||||
|
||||
With the cpuidle_sysfs_switch boot option enabled (meant for
|
||||
developer testing), the following three attributes are visible
|
||||
instead:
|
||||
|
||||
current_driver: same as described above
|
||||
|
||||
available_governors: (RO) displays a space separated list of
|
||||
available governors
|
||||
|
||||
current_governor: (RW) displays current idle policy. Users can
|
||||
switch the governor at runtime by writing to this file.
|
||||
|
||||
See files in Documentation/cpuidle/ for more information.
|
||||
|
||||
|
||||
What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/name
|
||||
/sys/devices/system/cpu/cpuX/cpuidle/stateN/latency
|
||||
/sys/devices/system/cpu/cpuX/cpuidle/stateN/power
|
||||
/sys/devices/system/cpu/cpuX/cpuidle/stateN/time
|
||||
/sys/devices/system/cpu/cpuX/cpuidle/stateN/usage
|
||||
Date: September 2007
|
||||
KernelVersion: v2.6.24
|
||||
Contact: Linux power management list <linux-pm@vger.kernel.org>
|
||||
Description:
|
||||
The directory /sys/devices/system/cpu/cpuX/cpuidle contains per
|
||||
logical CPU specific cpuidle information for each online cpu X.
|
||||
The processor idle states which are available for use have the
|
||||
following attributes:
|
||||
|
||||
name: (RO) Name of the idle state (string).
|
||||
|
||||
latency: (RO) The latency to exit out of this idle state (in
|
||||
microseconds).
|
||||
|
||||
power: (RO) The power consumed while in this idle state (in
|
||||
milliwatts).
|
||||
|
||||
time: (RO) The total time spent in this idle state (in microseconds).
|
||||
|
||||
usage: (RO) Number of times this state was entered (a count).
|
||||
|
||||
|
||||
What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/desc
|
||||
Date: February 2008
|
||||
KernelVersion: v2.6.25
|
||||
Contact: Linux power management list <linux-pm@vger.kernel.org>
|
||||
Description:
|
||||
(RO) A small description about the idle state (string).
|
||||
|
||||
|
||||
What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/disable
|
||||
Date: March 2012
|
||||
KernelVersion: v3.10
|
||||
Contact: Linux power management list <linux-pm@vger.kernel.org>
|
||||
Description:
|
||||
(RW) Option to disable this idle state (bool). The behavior and
|
||||
the effect of the disable variable depends on the implementation
|
||||
of a particular governor. In the ladder governor, for example,
|
||||
it is not coherent, i.e. if one is disabling a light state, then
|
||||
all deeper states are disabled as well, but the disable variable
|
||||
does not reflect it. Likewise, if one enables a deep state but a
|
||||
lighter state still is disabled, then this has no effect.
|
||||
|
||||
|
||||
What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/residency
|
||||
Date: March 2014
|
||||
KernelVersion: v3.15
|
||||
Contact: Linux power management list <linux-pm@vger.kernel.org>
|
||||
Description:
|
||||
(RO) Display the target residency i.e. the minimum amount of
|
||||
time (in microseconds) this cpu should spend in this idle state
|
||||
to make the transition worth the effort.
|
||||
|
||||
|
||||
What: /sys/devices/system/cpu/cpu#/cpufreq/*
|
||||
Date: pre-git history
|
||||
Contact: linux-pm@vger.kernel.org
|
||||
|
40
Documentation/ABI/testing/sysfs-platform-dptf
Normal file
40
Documentation/ABI/testing/sysfs-platform-dptf
Normal file
@ -0,0 +1,40 @@
|
||||
What: /sys/bus/platform/devices/INT3407:00/dptf_power/charger_type
|
||||
Date: Jul, 2016
|
||||
KernelVersion: v4.10
|
||||
Contact: linux-acpi@vger.kernel.org
|
||||
Description:
|
||||
(RO) The charger type - Traditional, Hybrid or NVDC.
|
||||
|
||||
What: /sys/bus/platform/devices/INT3407:00/dptf_power/adapter_rating_mw
|
||||
Date: Jul, 2016
|
||||
KernelVersion: v4.10
|
||||
Contact: linux-acpi@vger.kernel.org
|
||||
Description:
|
||||
(RO) Adapter rating in milliwatts (the maximum Adapter power).
|
||||
Must be 0 if no AC Adaptor is plugged in.
|
||||
|
||||
What: /sys/bus/platform/devices/INT3407:00/dptf_power/max_platform_power_mw
|
||||
Date: Jul, 2016
|
||||
KernelVersion: v4.10
|
||||
Contact: linux-acpi@vger.kernel.org
|
||||
Description:
|
||||
(RO) Maximum platform power that can be supported by the battery
|
||||
in milliwatts.
|
||||
|
||||
What: /sys/bus/platform/devices/INT3407:00/dptf_power/platform_power_source
|
||||
Date: Jul, 2016
|
||||
KernelVersion: v4.10
|
||||
Contact: linux-acpi@vger.kernel.org
|
||||
Description:
|
||||
(RO) Display the platform power source
|
||||
0x00 = DC
|
||||
0x01 = AC
|
||||
0x02 = USB
|
||||
0x03 = Wireless Charger
|
||||
|
||||
What: /sys/bus/platform/devices/INT3407:00/dptf_power/battery_steady_power
|
||||
Date: Jul, 2016
|
||||
KernelVersion: v4.10
|
||||
Contact: linux-acpi@vger.kernel.org
|
||||
Description:
|
||||
(RO) The maximum sustained power for battery in milliwatts.
|
@ -58,7 +58,12 @@ Like with atomic_t, the rule of thumb is:
|
||||
|
||||
- RMW operations that have a return value are fully ordered.
|
||||
|
||||
Except for test_and_set_bit_lock() which has ACQUIRE semantics and
|
||||
- RMW operations that are conditional are unordered on FAILURE,
|
||||
otherwise the above rules apply. In the case of test_and_{}_bit() operations,
|
||||
if the bit in memory is unchanged by the operation then it is deemed to have
|
||||
failed.
|
||||
|
||||
Except for a successful test_and_set_bit_lock() which has ACQUIRE semantics and
|
||||
clear_bit_unlock() which has RELEASE semantics.
|
||||
|
||||
Since a platform only has a single means of achieving atomic operations
|
||||
|
8
Documentation/devicetree/bindings/power/mti,mips-cpc.txt
Normal file
8
Documentation/devicetree/bindings/power/mti,mips-cpc.txt
Normal file
@ -0,0 +1,8 @@
|
||||
Binding for MIPS Cluster Power Controller (CPC).
|
||||
|
||||
This binding allows a system to specify where the CPC registers are
|
||||
located.
|
||||
|
||||
Required properties:
|
||||
compatible : Should be "mti,mips-cpc".
|
||||
regs: Should describe the address & size of the CPC register region.
|
@ -0,0 +1,62 @@
|
||||
#
|
||||
# Feature name: membarrier-sync-core
|
||||
# Kconfig: ARCH_HAS_MEMBARRIER_SYNC_CORE
|
||||
# description: arch supports core serializing membarrier
|
||||
#
|
||||
# Architecture requirements
|
||||
#
|
||||
# * arm64
|
||||
#
|
||||
# Rely on eret context synchronization when returning from IPI handler, and
|
||||
# when returning to user-space.
|
||||
#
|
||||
# * x86
|
||||
#
|
||||
# x86-32 uses IRET as return from interrupt, which takes care of the IPI.
|
||||
# However, it uses both IRET and SYSEXIT to go back to user-space. The IRET
|
||||
# instruction is core serializing, but not SYSEXIT.
|
||||
#
|
||||
# x86-64 uses IRET as return from interrupt, which takes care of the IPI.
|
||||
# However, it can return to user-space through either SYSRETL (compat code),
|
||||
# SYSRETQ, or IRET.
|
||||
#
|
||||
# Given that neither SYSRET{L,Q}, nor SYSEXIT, are core serializing, we rely
|
||||
# instead on write_cr3() performed by switch_mm() to provide core serialization
|
||||
# after changing the current mm, and deal with the special case of kthread ->
|
||||
# uthread (temporarily keeping current mm into active_mm) by issuing a
|
||||
# sync_core_before_usermode() in that specific case.
|
||||
#
|
||||
-----------------------
|
||||
| arch |status|
|
||||
-----------------------
|
||||
| alpha: | TODO |
|
||||
| arc: | TODO |
|
||||
| arm: | TODO |
|
||||
| arm64: | ok |
|
||||
| blackfin: | TODO |
|
||||
| c6x: | TODO |
|
||||
| cris: | TODO |
|
||||
| frv: | TODO |
|
||||
| h8300: | TODO |
|
||||
| hexagon: | TODO |
|
||||
| ia64: | TODO |
|
||||
| m32r: | TODO |
|
||||
| m68k: | TODO |
|
||||
| metag: | TODO |
|
||||
| microblaze: | TODO |
|
||||
| mips: | TODO |
|
||||
| mn10300: | TODO |
|
||||
| nios2: | TODO |
|
||||
| openrisc: | TODO |
|
||||
| parisc: | TODO |
|
||||
| powerpc: | TODO |
|
||||
| s390: | TODO |
|
||||
| score: | TODO |
|
||||
| sh: | TODO |
|
||||
| sparc: | TODO |
|
||||
| tile: | TODO |
|
||||
| um: | TODO |
|
||||
| unicore32: | TODO |
|
||||
| x86: | ok |
|
||||
| xtensa: | TODO |
|
||||
-----------------------
|
@ -3,4 +3,4 @@
|
||||
==================================
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/tve200/tve200_drv.c
|
||||
:doc: Faraday TV Encoder 200
|
||||
:doc: Faraday TV Encoder TVE200 DRM Driver
|
||||
|
@ -28,8 +28,10 @@ Supported adapters:
|
||||
* Intel Wildcat Point (PCH)
|
||||
* Intel Wildcat Point-LP (PCH)
|
||||
* Intel BayTrail (SOC)
|
||||
* Intel Braswell (SOC)
|
||||
* Intel Sunrise Point-H (PCH)
|
||||
* Intel Sunrise Point-LP (PCH)
|
||||
* Intel Kaby Lake-H (PCH)
|
||||
* Intel DNV (SOC)
|
||||
* Intel Broxton (SOC)
|
||||
* Intel Lewisburg (PCH)
|
||||
|
@ -21,37 +21,23 @@ Implementation
|
||||
--------------
|
||||
|
||||
Mutexes are represented by 'struct mutex', defined in include/linux/mutex.h
|
||||
and implemented in kernel/locking/mutex.c. These locks use a three
|
||||
state atomic counter (->count) to represent the different possible
|
||||
transitions that can occur during the lifetime of a lock:
|
||||
|
||||
1: unlocked
|
||||
0: locked, no waiters
|
||||
negative: locked, with potential waiters
|
||||
|
||||
In its most basic form it also includes a wait-queue and a spinlock
|
||||
that serializes access to it. CONFIG_SMP systems can also include
|
||||
a pointer to the lock task owner (->owner) as well as a spinner MCS
|
||||
lock (->osq), both described below in (ii).
|
||||
and implemented in kernel/locking/mutex.c. These locks use an atomic variable
|
||||
(->owner) to keep track of the lock state during its lifetime. Field owner
|
||||
actually contains 'struct task_struct *' to the current lock owner and it is
|
||||
therefore NULL if not currently owned. Since task_struct pointers are aligned
|
||||
at at least L1_CACHE_BYTES, low bits (3) are used to store extra state (e.g.,
|
||||
if waiter list is non-empty). In its most basic form it also includes a
|
||||
wait-queue and a spinlock that serializes access to it. Furthermore,
|
||||
CONFIG_MUTEX_SPIN_ON_OWNER=y systems use a spinner MCS lock (->osq), described
|
||||
below in (ii).
|
||||
|
||||
When acquiring a mutex, there are three possible paths that can be
|
||||
taken, depending on the state of the lock:
|
||||
|
||||
(i) fastpath: tries to atomically acquire the lock by decrementing the
|
||||
counter. If it was already taken by another task it goes to the next
|
||||
possible path. This logic is architecture specific. On x86-64, the
|
||||
locking fastpath is 2 instructions:
|
||||
|
||||
0000000000000e10 <mutex_lock>:
|
||||
e21: f0 ff 0b lock decl (%rbx)
|
||||
e24: 79 08 jns e2e <mutex_lock+0x1e>
|
||||
|
||||
the unlocking fastpath is equally tight:
|
||||
|
||||
0000000000000bc0 <mutex_unlock>:
|
||||
bc8: f0 ff 07 lock incl (%rdi)
|
||||
bcb: 7f 0a jg bd7 <mutex_unlock+0x17>
|
||||
|
||||
(i) fastpath: tries to atomically acquire the lock by cmpxchg()ing the owner with
|
||||
the current task. This only works in the uncontended case (cmpxchg() checks
|
||||
against 0UL, so all 3 state bits above have to be 0). If the lock is
|
||||
contended it goes to the next possible path.
|
||||
|
||||
(ii) midpath: aka optimistic spinning, tries to spin for acquisition
|
||||
while the lock owner is running and there are no other tasks ready
|
||||
@ -143,11 +129,10 @@ Test if the mutex is taken:
|
||||
Disadvantages
|
||||
-------------
|
||||
|
||||
Unlike its original design and purpose, 'struct mutex' is larger than
|
||||
most locks in the kernel. E.g: on x86-64 it is 40 bytes, almost twice
|
||||
as large as 'struct semaphore' (24 bytes) and tied, along with rwsems,
|
||||
for the largest lock in the kernel. Larger structure sizes mean more
|
||||
CPU cache and memory footprint.
|
||||
Unlike its original design and purpose, 'struct mutex' is among the largest
|
||||
locks in the kernel. E.g: on x86-64 it is 32 bytes, where 'struct semaphore'
|
||||
is 24 bytes and rw_semaphore is 40 bytes. Larger structure sizes mean more CPU
|
||||
cache and memory footprint.
|
||||
|
||||
When to use mutexes
|
||||
-------------------
|
||||
|
@ -13,6 +13,7 @@ The following technologies are described:
|
||||
* Generic Segmentation Offload - GSO
|
||||
* Generic Receive Offload - GRO
|
||||
* Partial Generic Segmentation Offload - GSO_PARTIAL
|
||||
* SCTP accelleration with GSO - GSO_BY_FRAGS
|
||||
|
||||
TCP Segmentation Offload
|
||||
========================
|
||||
@ -49,6 +50,10 @@ datagram into multiple IPv4 fragments. Many of the requirements for UDP
|
||||
fragmentation offload are the same as TSO. However the IPv4 ID for
|
||||
fragments should not increment as a single IPv4 datagram is fragmented.
|
||||
|
||||
UFO is deprecated: modern kernels will no longer generate UFO skbs, but can
|
||||
still receive them from tuntap and similar devices. Offload of UDP-based
|
||||
tunnel protocols is still supported.
|
||||
|
||||
IPIP, SIT, GRE, UDP Tunnel, and Remote Checksum Offloads
|
||||
========================================================
|
||||
|
||||
@ -83,10 +88,10 @@ SKB_GSO_UDP_TUNNEL_CSUM. These two additional tunnel types reflect the
|
||||
fact that the outer header also requests to have a non-zero checksum
|
||||
included in the outer header.
|
||||
|
||||
Finally there is SKB_GSO_REMCSUM which indicates that a given tunnel header
|
||||
has requested a remote checksum offload. In this case the inner headers
|
||||
will be left with a partial checksum and only the outer header checksum
|
||||
will be computed.
|
||||
Finally there is SKB_GSO_TUNNEL_REMCSUM which indicates that a given tunnel
|
||||
header has requested a remote checksum offload. In this case the inner
|
||||
headers will be left with a partial checksum and only the outer header
|
||||
checksum will be computed.
|
||||
|
||||
Generic Segmentation Offload
|
||||
============================
|
||||
@ -128,3 +133,28 @@ values for if the header was simply duplicated. The one exception to this
|
||||
is the outer IPv4 ID field. It is up to the device drivers to guarantee
|
||||
that the IPv4 ID field is incremented in the case that a given header does
|
||||
not have the DF bit set.
|
||||
|
||||
SCTP accelleration with GSO
|
||||
===========================
|
||||
|
||||
SCTP - despite the lack of hardware support - can still take advantage of
|
||||
GSO to pass one large packet through the network stack, rather than
|
||||
multiple small packets.
|
||||
|
||||
This requires a different approach to other offloads, as SCTP packets
|
||||
cannot be just segmented to (P)MTU. Rather, the chunks must be contained in
|
||||
IP segments, padding respected. So unlike regular GSO, SCTP can't just
|
||||
generate a big skb, set gso_size to the fragmentation point and deliver it
|
||||
to IP layer.
|
||||
|
||||
Instead, the SCTP protocol layer builds an skb with the segments correctly
|
||||
padded and stored as chained skbs, and skb_segment() splits based on those.
|
||||
To signal this, gso_size is set to the special value GSO_BY_FRAGS.
|
||||
|
||||
Therefore, any code in the core networking stack must be aware of the
|
||||
possibility that gso_size will be GSO_BY_FRAGS and handle that case
|
||||
appropriately. (For size checks, the skb_gso_validate_*_len family of
|
||||
helpers do this automatically.)
|
||||
|
||||
This also affects drivers with the NETIF_F_FRAGLIST & NETIF_F_GSO_SCTP bits
|
||||
set. Note also that NETIF_F_GSO_SCTP is included in NETIF_F_GSO_SOFTWARE.
|
||||
|
@ -108,7 +108,7 @@ The topology of a system is described in the units of:
|
||||
|
||||
The number of online threads is also printed in /proc/cpuinfo "siblings."
|
||||
|
||||
- topology_sibling_mask():
|
||||
- topology_sibling_cpumask():
|
||||
|
||||
The cpumask contains all online threads in the core to which a thread
|
||||
belongs.
|
||||
|
@ -7909,7 +7909,6 @@ S: Maintained
|
||||
F: scripts/leaking_addresses.pl
|
||||
|
||||
LED SUBSYSTEM
|
||||
M: Richard Purdie <rpurdie@rpsys.net>
|
||||
M: Jacek Anaszewski <jacek.anaszewski@gmail.com>
|
||||
M: Pavel Machek <pavel@ucw.cz>
|
||||
L: linux-leds@vger.kernel.org
|
||||
@ -9213,6 +9212,7 @@ MIPS GENERIC PLATFORM
|
||||
M: Paul Burton <paul.burton@mips.com>
|
||||
L: linux-mips@linux-mips.org
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/power/mti,mips-cpc.txt
|
||||
F: arch/mips/generic/
|
||||
F: arch/mips/tools/generic-board-config.sh
|
||||
|
||||
@ -9952,6 +9952,7 @@ F: drivers/nfc/nxp-nci
|
||||
|
||||
OBJTOOL
|
||||
M: Josh Poimboeuf <jpoimboe@redhat.com>
|
||||
M: Peter Zijlstra <peterz@infradead.org>
|
||||
S: Supported
|
||||
F: tools/objtool/
|
||||
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 16
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -6,7 +6,6 @@
|
||||
* Atomic exchange routines.
|
||||
*/
|
||||
|
||||
#define __ASM__MB
|
||||
#define ____xchg(type, args...) __xchg ## type ## _local(args)
|
||||
#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
|
||||
#include <asm/xchg.h>
|
||||
@ -33,10 +32,6 @@
|
||||
cmpxchg_local((ptr), (o), (n)); \
|
||||
})
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#undef __ASM__MB
|
||||
#define __ASM__MB "\tmb\n"
|
||||
#endif
|
||||
#undef ____xchg
|
||||
#undef ____cmpxchg
|
||||
#define ____xchg(type, args...) __xchg ##type(args)
|
||||
@ -64,7 +59,6 @@
|
||||
cmpxchg((ptr), (o), (n)); \
|
||||
})
|
||||
|
||||
#undef __ASM__MB
|
||||
#undef ____cmpxchg
|
||||
|
||||
#endif /* _ALPHA_CMPXCHG_H */
|
||||
|
@ -12,6 +12,10 @@
|
||||
* Atomic exchange.
|
||||
* Since it can be used to implement critical sections
|
||||
* it must clobber "memory" (also for interrupts in UP).
|
||||
*
|
||||
* The leading and the trailing memory barriers guarantee that these
|
||||
* operations are fully ordered.
|
||||
*
|
||||
*/
|
||||
|
||||
static inline unsigned long
|
||||
@ -19,6 +23,7 @@ ____xchg(_u8, volatile char *m, unsigned long val)
|
||||
{
|
||||
unsigned long ret, tmp, addr64;
|
||||
|
||||
smp_mb();
|
||||
__asm__ __volatile__(
|
||||
" andnot %4,7,%3\n"
|
||||
" insbl %1,%4,%1\n"
|
||||
@ -28,12 +33,12 @@ ____xchg(_u8, volatile char *m, unsigned long val)
|
||||
" or %1,%2,%2\n"
|
||||
" stq_c %2,0(%3)\n"
|
||||
" beq %2,2f\n"
|
||||
__ASM__MB
|
||||
".subsection 2\n"
|
||||
"2: br 1b\n"
|
||||
".previous"
|
||||
: "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
|
||||
: "r" ((long)m), "1" (val) : "memory");
|
||||
smp_mb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -43,6 +48,7 @@ ____xchg(_u16, volatile short *m, unsigned long val)
|
||||
{
|
||||
unsigned long ret, tmp, addr64;
|
||||
|
||||
smp_mb();
|
||||
__asm__ __volatile__(
|
||||
" andnot %4,7,%3\n"
|
||||
" inswl %1,%4,%1\n"
|
||||
@ -52,12 +58,12 @@ ____xchg(_u16, volatile short *m, unsigned long val)
|
||||
" or %1,%2,%2\n"
|
||||
" stq_c %2,0(%3)\n"
|
||||
" beq %2,2f\n"
|
||||
__ASM__MB
|
||||
".subsection 2\n"
|
||||
"2: br 1b\n"
|
||||
".previous"
|
||||
: "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64)
|
||||
: "r" ((long)m), "1" (val) : "memory");
|
||||
smp_mb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -67,17 +73,18 @@ ____xchg(_u32, volatile int *m, unsigned long val)
|
||||
{
|
||||
unsigned long dummy;
|
||||
|
||||
smp_mb();
|
||||
__asm__ __volatile__(
|
||||
"1: ldl_l %0,%4\n"
|
||||
" bis $31,%3,%1\n"
|
||||
" stl_c %1,%2\n"
|
||||
" beq %1,2f\n"
|
||||
__ASM__MB
|
||||
".subsection 2\n"
|
||||
"2: br 1b\n"
|
||||
".previous"
|
||||
: "=&r" (val), "=&r" (dummy), "=m" (*m)
|
||||
: "rI" (val), "m" (*m) : "memory");
|
||||
smp_mb();
|
||||
|
||||
return val;
|
||||
}
|
||||
@ -87,17 +94,18 @@ ____xchg(_u64, volatile long *m, unsigned long val)
|
||||
{
|
||||
unsigned long dummy;
|
||||
|
||||
smp_mb();
|
||||
__asm__ __volatile__(
|
||||
"1: ldq_l %0,%4\n"
|
||||
" bis $31,%3,%1\n"
|
||||
" stq_c %1,%2\n"
|
||||
" beq %1,2f\n"
|
||||
__ASM__MB
|
||||
".subsection 2\n"
|
||||
"2: br 1b\n"
|
||||
".previous"
|
||||
: "=&r" (val), "=&r" (dummy), "=m" (*m)
|
||||
: "rI" (val), "m" (*m) : "memory");
|
||||
smp_mb();
|
||||
|
||||
return val;
|
||||
}
|
||||
@ -128,10 +136,12 @@ ____xchg(, volatile void *ptr, unsigned long x, int size)
|
||||
* store NEW in MEM. Return the initial value in MEM. Success is
|
||||
* indicated by comparing RETURN with OLD.
|
||||
*
|
||||
* The memory barrier should be placed in SMP only when we actually
|
||||
* make the change. If we don't change anything (so if the returned
|
||||
* prev is equal to old) then we aren't acquiring anything new and
|
||||
* we don't need any memory barrier as far I can tell.
|
||||
* The leading and the trailing memory barriers guarantee that these
|
||||
* operations are fully ordered.
|
||||
*
|
||||
* The trailing memory barrier is placed in SMP unconditionally, in
|
||||
* order to guarantee that dependency ordering is preserved when a
|
||||
* dependency is headed by an unsuccessful operation.
|
||||
*/
|
||||
|
||||
static inline unsigned long
|
||||
@ -139,6 +149,7 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
|
||||
{
|
||||
unsigned long prev, tmp, cmp, addr64;
|
||||
|
||||
smp_mb();
|
||||
__asm__ __volatile__(
|
||||
" andnot %5,7,%4\n"
|
||||
" insbl %1,%5,%1\n"
|
||||
@ -150,13 +161,13 @@ ____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new)
|
||||
" or %1,%2,%2\n"
|
||||
" stq_c %2,0(%4)\n"
|
||||
" beq %2,3f\n"
|
||||
__ASM__MB
|
||||
"2:\n"
|
||||
".subsection 2\n"
|
||||
"3: br 1b\n"
|
||||
".previous"
|
||||
: "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
|
||||
: "r" ((long)m), "Ir" (old), "1" (new) : "memory");
|
||||
smp_mb();
|
||||
|
||||
return prev;
|
||||
}
|
||||
@ -166,6 +177,7 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
|
||||
{
|
||||
unsigned long prev, tmp, cmp, addr64;
|
||||
|
||||
smp_mb();
|
||||
__asm__ __volatile__(
|
||||
" andnot %5,7,%4\n"
|
||||
" inswl %1,%5,%1\n"
|
||||
@ -177,13 +189,13 @@ ____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new)
|
||||
" or %1,%2,%2\n"
|
||||
" stq_c %2,0(%4)\n"
|
||||
" beq %2,3f\n"
|
||||
__ASM__MB
|
||||
"2:\n"
|
||||
".subsection 2\n"
|
||||
"3: br 1b\n"
|
||||
".previous"
|
||||
: "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64)
|
||||
: "r" ((long)m), "Ir" (old), "1" (new) : "memory");
|
||||
smp_mb();
|
||||
|
||||
return prev;
|
||||
}
|
||||
@ -193,6 +205,7 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
|
||||
{
|
||||
unsigned long prev, cmp;
|
||||
|
||||
smp_mb();
|
||||
__asm__ __volatile__(
|
||||
"1: ldl_l %0,%5\n"
|
||||
" cmpeq %0,%3,%1\n"
|
||||
@ -200,13 +213,13 @@ ____cmpxchg(_u32, volatile int *m, int old, int new)
|
||||
" mov %4,%1\n"
|
||||
" stl_c %1,%2\n"
|
||||
" beq %1,3f\n"
|
||||
__ASM__MB
|
||||
"2:\n"
|
||||
".subsection 2\n"
|
||||
"3: br 1b\n"
|
||||
".previous"
|
||||
: "=&r"(prev), "=&r"(cmp), "=m"(*m)
|
||||
: "r"((long) old), "r"(new), "m"(*m) : "memory");
|
||||
smp_mb();
|
||||
|
||||
return prev;
|
||||
}
|
||||
@ -216,6 +229,7 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
|
||||
{
|
||||
unsigned long prev, cmp;
|
||||
|
||||
smp_mb();
|
||||
__asm__ __volatile__(
|
||||
"1: ldq_l %0,%5\n"
|
||||
" cmpeq %0,%3,%1\n"
|
||||
@ -223,13 +237,13 @@ ____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new)
|
||||
" mov %4,%1\n"
|
||||
" stq_c %1,%2\n"
|
||||
" beq %1,3f\n"
|
||||
__ASM__MB
|
||||
"2:\n"
|
||||
".subsection 2\n"
|
||||
"3: br 1b\n"
|
||||
".previous"
|
||||
: "=&r"(prev), "=&r"(cmp), "=m"(*m)
|
||||
: "r"((long) old), "r"(new), "m"(*m) : "memory");
|
||||
smp_mb();
|
||||
|
||||
return prev;
|
||||
}
|
||||
|
@ -23,7 +23,8 @@ void die(const char *str, struct pt_regs *regs, unsigned long address);
|
||||
|
||||
#define BUG() do { \
|
||||
pr_warn("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
|
||||
dump_stack(); \
|
||||
barrier_before_unreachable(); \
|
||||
__builtin_trap(); \
|
||||
} while (0)
|
||||
|
||||
#define HAVE_ARCH_BUG
|
||||
|
@ -373,7 +373,7 @@ static void arc_chk_core_config(void)
|
||||
{
|
||||
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
|
||||
int saved = 0, present = 0;
|
||||
char *opt_nm = NULL;;
|
||||
char *opt_nm = NULL;
|
||||
|
||||
if (!cpu->extn.timer0)
|
||||
panic("Timer0 is not present!\n");
|
||||
|
@ -366,7 +366,7 @@ static void init_unwind_hdr(struct unwind_table *table,
|
||||
return;
|
||||
|
||||
ret_err:
|
||||
panic("Attention !!! Dwarf FDE parsing errors\n");;
|
||||
panic("Attention !!! Dwarf FDE parsing errors\n");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
|
@ -83,7 +83,7 @@ static void dummy_clock_access(struct timespec64 *ts)
|
||||
}
|
||||
|
||||
static clock_access_fn __read_persistent_clock = dummy_clock_access;
|
||||
static clock_access_fn __read_boot_clock = dummy_clock_access;;
|
||||
static clock_access_fn __read_boot_clock = dummy_clock_access;
|
||||
|
||||
void read_persistent_clock64(struct timespec64 *ts)
|
||||
{
|
||||
|
@ -23,7 +23,6 @@
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/perf/arm_pmu.h>
|
||||
#include <linux/regulator/machine.h>
|
||||
|
||||
#include <asm/outercache.h>
|
||||
@ -112,37 +111,6 @@ static void ux500_restart(enum reboot_mode mode, const char *cmd)
|
||||
prcmu_system_reset(0);
|
||||
}
|
||||
|
||||
/*
|
||||
* The PMU IRQ lines of two cores are wired together into a single interrupt.
|
||||
* Bounce the interrupt to the other core if it's not ours.
|
||||
*/
|
||||
static irqreturn_t db8500_pmu_handler(int irq, void *dev, irq_handler_t handler)
|
||||
{
|
||||
irqreturn_t ret = handler(irq, dev);
|
||||
int other = !smp_processor_id();
|
||||
|
||||
if (ret == IRQ_NONE && cpu_online(other))
|
||||
irq_set_affinity(irq, cpumask_of(other));
|
||||
|
||||
/*
|
||||
* We should be able to get away with the amount of IRQ_NONEs we give,
|
||||
* while still having the spurious IRQ detection code kick in if the
|
||||
* interrupt really starts hitting spuriously.
|
||||
*/
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct arm_pmu_platdata db8500_pmu_platdata = {
|
||||
.handle_irq = db8500_pmu_handler,
|
||||
.irq_flags = IRQF_NOBALANCING | IRQF_NO_THREAD,
|
||||
};
|
||||
|
||||
static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
|
||||
/* Requires call-back bindings. */
|
||||
OF_DEV_AUXDATA("arm,cortex-a9-pmu", 0, "arm-pmu", &db8500_pmu_platdata),
|
||||
{},
|
||||
};
|
||||
|
||||
static struct of_dev_auxdata u8540_auxdata_lookup[] __initdata = {
|
||||
OF_DEV_AUXDATA("stericsson,db8500-prcmu", 0x80157000, "db8500-prcmu", NULL),
|
||||
{},
|
||||
@ -165,9 +133,6 @@ static void __init u8500_init_machine(void)
|
||||
if (of_machine_is_compatible("st-ericsson,u8540"))
|
||||
of_platform_populate(NULL, u8500_local_bus_nodes,
|
||||
u8540_auxdata_lookup, NULL);
|
||||
else
|
||||
of_platform_populate(NULL, u8500_local_bus_nodes,
|
||||
u8500_auxdata_lookup, NULL);
|
||||
}
|
||||
|
||||
static const char * stericsson_dt_platform_compat[] = {
|
||||
|
@ -20,7 +20,7 @@
|
||||
|
||||
#define MPIDR_UP_BITMASK (0x1 << 30)
|
||||
#define MPIDR_MT_BITMASK (0x1 << 24)
|
||||
#define MPIDR_HWID_BITMASK 0xff00ffffff
|
||||
#define MPIDR_HWID_BITMASK UL(0xff00ffffff)
|
||||
|
||||
#define MPIDR_LEVEL_BITS_SHIFT 3
|
||||
#define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT)
|
||||
|
@ -22,7 +22,7 @@
|
||||
|
||||
static inline pte_t huge_ptep_get(pte_t *ptep)
|
||||
{
|
||||
return *ptep;
|
||||
return READ_ONCE(*ptep);
|
||||
}
|
||||
|
||||
|
||||
|
@ -185,42 +185,42 @@ static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
|
||||
return pmd;
|
||||
}
|
||||
|
||||
static inline void kvm_set_s2pte_readonly(pte_t *pte)
|
||||
static inline void kvm_set_s2pte_readonly(pte_t *ptep)
|
||||
{
|
||||
pteval_t old_pteval, pteval;
|
||||
|
||||
pteval = READ_ONCE(pte_val(*pte));
|
||||
pteval = READ_ONCE(pte_val(*ptep));
|
||||
do {
|
||||
old_pteval = pteval;
|
||||
pteval &= ~PTE_S2_RDWR;
|
||||
pteval |= PTE_S2_RDONLY;
|
||||
pteval = cmpxchg_relaxed(&pte_val(*pte), old_pteval, pteval);
|
||||
pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
|
||||
} while (pteval != old_pteval);
|
||||
}
|
||||
|
||||
static inline bool kvm_s2pte_readonly(pte_t *pte)
|
||||
static inline bool kvm_s2pte_readonly(pte_t *ptep)
|
||||
{
|
||||
return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
|
||||
return (READ_ONCE(pte_val(*ptep)) & PTE_S2_RDWR) == PTE_S2_RDONLY;
|
||||
}
|
||||
|
||||
static inline bool kvm_s2pte_exec(pte_t *pte)
|
||||
static inline bool kvm_s2pte_exec(pte_t *ptep)
|
||||
{
|
||||
return !(pte_val(*pte) & PTE_S2_XN);
|
||||
return !(READ_ONCE(pte_val(*ptep)) & PTE_S2_XN);
|
||||
}
|
||||
|
||||
static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
|
||||
static inline void kvm_set_s2pmd_readonly(pmd_t *pmdp)
|
||||
{
|
||||
kvm_set_s2pte_readonly((pte_t *)pmd);
|
||||
kvm_set_s2pte_readonly((pte_t *)pmdp);
|
||||
}
|
||||
|
||||
static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
|
||||
static inline bool kvm_s2pmd_readonly(pmd_t *pmdp)
|
||||
{
|
||||
return kvm_s2pte_readonly((pte_t *)pmd);
|
||||
return kvm_s2pte_readonly((pte_t *)pmdp);
|
||||
}
|
||||
|
||||
static inline bool kvm_s2pmd_exec(pmd_t *pmd)
|
||||
static inline bool kvm_s2pmd_exec(pmd_t *pmdp)
|
||||
{
|
||||
return !(pmd_val(*pmd) & PMD_S2_XN);
|
||||
return !(READ_ONCE(pmd_val(*pmdp)) & PMD_S2_XN);
|
||||
}
|
||||
|
||||
static inline bool kvm_page_empty(void *ptr)
|
||||
|
@ -141,13 +141,13 @@ static inline void cpu_install_idmap(void)
|
||||
* Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
|
||||
* avoiding the possibility of conflicting TLB entries being allocated.
|
||||
*/
|
||||
static inline void cpu_replace_ttbr1(pgd_t *pgd)
|
||||
static inline void cpu_replace_ttbr1(pgd_t *pgdp)
|
||||
{
|
||||
typedef void (ttbr_replace_func)(phys_addr_t);
|
||||
extern ttbr_replace_func idmap_cpu_replace_ttbr1;
|
||||
ttbr_replace_func *replace_phys;
|
||||
|
||||
phys_addr_t pgd_phys = virt_to_phys(pgd);
|
||||
phys_addr_t pgd_phys = virt_to_phys(pgdp);
|
||||
|
||||
replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
|
||||
|
||||
|
@ -36,23 +36,23 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
return (pmd_t *)__get_free_page(PGALLOC_GFP);
|
||||
}
|
||||
|
||||
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
||||
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
|
||||
{
|
||||
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
|
||||
free_page((unsigned long)pmd);
|
||||
BUG_ON((unsigned long)pmdp & (PAGE_SIZE-1));
|
||||
free_page((unsigned long)pmdp);
|
||||
}
|
||||
|
||||
static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
|
||||
static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
|
||||
{
|
||||
set_pud(pud, __pud(__phys_to_pud_val(pmd) | prot));
|
||||
set_pud(pudp, __pud(__phys_to_pud_val(pmdp) | prot));
|
||||
}
|
||||
|
||||
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
||||
static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmdp)
|
||||
{
|
||||
__pud_populate(pud, __pa(pmd), PMD_TYPE_TABLE);
|
||||
__pud_populate(pudp, __pa(pmdp), PMD_TYPE_TABLE);
|
||||
}
|
||||
#else
|
||||
static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
|
||||
static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
|
||||
{
|
||||
BUILD_BUG();
|
||||
}
|
||||
@ -65,30 +65,30 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
return (pud_t *)__get_free_page(PGALLOC_GFP);
|
||||
}
|
||||
|
||||
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
||||
static inline void pud_free(struct mm_struct *mm, pud_t *pudp)
|
||||
{
|
||||
BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
|
||||
free_page((unsigned long)pud);
|
||||
BUG_ON((unsigned long)pudp & (PAGE_SIZE-1));
|
||||
free_page((unsigned long)pudp);
|
||||
}
|
||||
|
||||
static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
|
||||
static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pudp, pgdval_t prot)
|
||||
{
|
||||
set_pgd(pgdp, __pgd(__phys_to_pgd_val(pud) | prot));
|
||||
set_pgd(pgdp, __pgd(__phys_to_pgd_val(pudp) | prot));
|
||||
}
|
||||
|
||||
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
|
||||
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgdp, pud_t *pudp)
|
||||
{
|
||||
__pgd_populate(pgd, __pa(pud), PUD_TYPE_TABLE);
|
||||
__pgd_populate(pgdp, __pa(pudp), PUD_TYPE_TABLE);
|
||||
}
|
||||
#else
|
||||
static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
|
||||
static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pudp, pgdval_t prot)
|
||||
{
|
||||
BUILD_BUG();
|
||||
}
|
||||
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
|
||||
|
||||
extern pgd_t *pgd_alloc(struct mm_struct *mm);
|
||||
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
|
||||
extern void pgd_free(struct mm_struct *mm, pgd_t *pgdp);
|
||||
|
||||
static inline pte_t *
|
||||
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
|
||||
@ -114,10 +114,10 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
/*
|
||||
* Free a PTE table.
|
||||
*/
|
||||
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
|
||||
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *ptep)
|
||||
{
|
||||
if (pte)
|
||||
free_page((unsigned long)pte);
|
||||
if (ptep)
|
||||
free_page((unsigned long)ptep);
|
||||
}
|
||||
|
||||
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
|
||||
@ -126,10 +126,10 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
|
||||
__free_page(pte);
|
||||
}
|
||||
|
||||
static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
|
||||
static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
|
||||
pmdval_t prot)
|
||||
{
|
||||
set_pmd(pmdp, __pmd(__phys_to_pmd_val(pte) | prot));
|
||||
set_pmd(pmdp, __pmd(__phys_to_pmd_val(ptep) | prot));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -218,7 +218,7 @@ static inline pmd_t pmd_mkcont(pmd_t pmd)
|
||||
|
||||
static inline void set_pte(pte_t *ptep, pte_t pte)
|
||||
{
|
||||
*ptep = pte;
|
||||
WRITE_ONCE(*ptep, pte);
|
||||
|
||||
/*
|
||||
* Only if the new pte is valid and kernel, otherwise TLB maintenance
|
||||
@ -250,6 +250,8 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
|
||||
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
pte_t old_pte;
|
||||
|
||||
if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
|
||||
__sync_icache_dcache(pte, addr);
|
||||
|
||||
@ -258,14 +260,15 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
* hardware updates of the pte (ptep_set_access_flags safely changes
|
||||
* valid ptes without going through an invalid entry).
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_DEBUG_VM) && pte_valid(*ptep) && pte_valid(pte) &&
|
||||
old_pte = READ_ONCE(*ptep);
|
||||
if (IS_ENABLED(CONFIG_DEBUG_VM) && pte_valid(old_pte) && pte_valid(pte) &&
|
||||
(mm == current->active_mm || atomic_read(&mm->mm_users) > 1)) {
|
||||
VM_WARN_ONCE(!pte_young(pte),
|
||||
"%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
|
||||
__func__, pte_val(*ptep), pte_val(pte));
|
||||
VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
|
||||
__func__, pte_val(old_pte), pte_val(pte));
|
||||
VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
|
||||
"%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
|
||||
__func__, pte_val(*ptep), pte_val(pte));
|
||||
__func__, pte_val(old_pte), pte_val(pte));
|
||||
}
|
||||
|
||||
set_pte(ptep, pte);
|
||||
@ -431,7 +434,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
|
||||
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
|
||||
{
|
||||
*pmdp = pmd;
|
||||
WRITE_ONCE(*pmdp, pmd);
|
||||
dsb(ishst);
|
||||
isb();
|
||||
}
|
||||
@ -482,7 +485,7 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
|
||||
|
||||
static inline void set_pud(pud_t *pudp, pud_t pud)
|
||||
{
|
||||
*pudp = pud;
|
||||
WRITE_ONCE(*pudp, pud);
|
||||
dsb(ishst);
|
||||
isb();
|
||||
}
|
||||
@ -500,7 +503,7 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
|
||||
/* Find an entry in the second-level page table. */
|
||||
#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
|
||||
|
||||
#define pmd_offset_phys(dir, addr) (pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t))
|
||||
#define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
|
||||
#define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
|
||||
|
||||
#define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
|
||||
@ -535,7 +538,7 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
|
||||
|
||||
static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
|
||||
{
|
||||
*pgdp = pgd;
|
||||
WRITE_ONCE(*pgdp, pgd);
|
||||
dsb(ishst);
|
||||
}
|
||||
|
||||
@ -552,7 +555,7 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
|
||||
/* Find an entry in the frst-level page table. */
|
||||
#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
|
||||
|
||||
#define pud_offset_phys(dir, addr) (pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t))
|
||||
#define pud_offset_phys(dir, addr) (pgd_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
|
||||
#define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr))))
|
||||
|
||||
#define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
|
||||
|
@ -28,7 +28,7 @@ struct stackframe {
|
||||
unsigned long fp;
|
||||
unsigned long pc;
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
unsigned int graph;
|
||||
int graph;
|
||||
#endif
|
||||
};
|
||||
|
||||
|
@ -72,15 +72,15 @@ static inline void set_fs(mm_segment_t fs)
|
||||
* This is equivalent to the following test:
|
||||
* (u65)addr + (u65)size <= (u65)current->addr_limit + 1
|
||||
*/
|
||||
static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
|
||||
static inline unsigned long __range_ok(const void __user *addr, unsigned long size)
|
||||
{
|
||||
unsigned long limit = current_thread_info()->addr_limit;
|
||||
unsigned long ret, limit = current_thread_info()->addr_limit;
|
||||
|
||||
__chk_user_ptr(addr);
|
||||
asm volatile(
|
||||
// A + B <= C + 1 for all A,B,C, in four easy steps:
|
||||
// 1: X = A + B; X' = X % 2^64
|
||||
" adds %0, %0, %2\n"
|
||||
" adds %0, %3, %2\n"
|
||||
// 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
|
||||
" csel %1, xzr, %1, hi\n"
|
||||
// 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
|
||||
@ -92,9 +92,9 @@ static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
|
||||
// testing X' - C == 0, subject to the previous adjustments.
|
||||
" sbcs xzr, %0, %1\n"
|
||||
" cset %0, ls\n"
|
||||
: "+r" (addr), "+r" (limit) : "Ir" (size) : "cc");
|
||||
: "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc");
|
||||
|
||||
return addr;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -104,7 +104,7 @@ static inline unsigned long __range_ok(unsigned long addr, unsigned long size)
|
||||
*/
|
||||
#define untagged_addr(addr) sign_extend64(addr, 55)
|
||||
|
||||
#define access_ok(type, addr, size) __range_ok((unsigned long)(addr), size)
|
||||
#define access_ok(type, addr, size) __range_ok(addr, size)
|
||||
#define user_addr_max get_fs
|
||||
|
||||
#define _ASM_EXTABLE(from, to) \
|
||||
|
@ -370,6 +370,7 @@ static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr)
|
||||
static int swp_handler(struct pt_regs *regs, u32 instr)
|
||||
{
|
||||
u32 destreg, data, type, address = 0;
|
||||
const void __user *user_ptr;
|
||||
int rn, rt2, res = 0;
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
|
||||
@ -401,7 +402,8 @@ static int swp_handler(struct pt_regs *regs, u32 instr)
|
||||
aarch32_insn_extract_reg_num(instr, A32_RT2_OFFSET), data);
|
||||
|
||||
/* Check access in reasonable access range for both SWP and SWPB */
|
||||
if (!access_ok(VERIFY_WRITE, (address & ~3), 4)) {
|
||||
user_ptr = (const void __user *)(unsigned long)(address & ~3);
|
||||
if (!access_ok(VERIFY_WRITE, user_ptr, 4)) {
|
||||
pr_debug("SWP{B} emulation: access to 0x%08x not allowed!\n",
|
||||
address);
|
||||
goto fault;
|
||||
|
@ -406,6 +406,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
.capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
|
||||
},
|
||||
{
|
||||
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
|
||||
.enable = qcom_enable_link_stack_sanitization,
|
||||
},
|
||||
{
|
||||
.capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
|
||||
},
|
||||
{
|
||||
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
|
||||
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
|
||||
|
@ -199,9 +199,11 @@ static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
|
||||
};
|
||||
|
||||
static const struct arm64_ftr_bits ftr_ctr[] = {
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RAO */
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1), /* DIC */
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 24, 4, 0), /* CWG */
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0), /* ERG */
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_SAFE, 20, 4, 0), /* ERG */
|
||||
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 1), /* DminLine */
|
||||
/*
|
||||
* Linux can handle differing I-cache policies. Userspace JITs will
|
||||
|
@ -90,7 +90,7 @@ static int __init set_permissions(pte_t *ptep, pgtable_t token,
|
||||
unsigned long addr, void *data)
|
||||
{
|
||||
efi_memory_desc_t *md = data;
|
||||
pte_t pte = *ptep;
|
||||
pte_t pte = READ_ONCE(*ptep);
|
||||
|
||||
if (md->attribute & EFI_MEMORY_RO)
|
||||
pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
|
||||
|
@ -202,10 +202,10 @@ static int create_safe_exec_page(void *src_start, size_t length,
|
||||
gfp_t mask)
|
||||
{
|
||||
int rc = 0;
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
pgd_t *pgdp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep;
|
||||
unsigned long dst = (unsigned long)allocator(mask);
|
||||
|
||||
if (!dst) {
|
||||
@ -216,38 +216,38 @@ static int create_safe_exec_page(void *src_start, size_t length,
|
||||
memcpy((void *)dst, src_start, length);
|
||||
flush_icache_range(dst, dst + length);
|
||||
|
||||
pgd = pgd_offset_raw(allocator(mask), dst_addr);
|
||||
if (pgd_none(*pgd)) {
|
||||
pud = allocator(mask);
|
||||
if (!pud) {
|
||||
pgdp = pgd_offset_raw(allocator(mask), dst_addr);
|
||||
if (pgd_none(READ_ONCE(*pgdp))) {
|
||||
pudp = allocator(mask);
|
||||
if (!pudp) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
pgd_populate(&init_mm, pgd, pud);
|
||||
pgd_populate(&init_mm, pgdp, pudp);
|
||||
}
|
||||
|
||||
pud = pud_offset(pgd, dst_addr);
|
||||
if (pud_none(*pud)) {
|
||||
pmd = allocator(mask);
|
||||
if (!pmd) {
|
||||
pudp = pud_offset(pgdp, dst_addr);
|
||||
if (pud_none(READ_ONCE(*pudp))) {
|
||||
pmdp = allocator(mask);
|
||||
if (!pmdp) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
pud_populate(&init_mm, pud, pmd);
|
||||
pud_populate(&init_mm, pudp, pmdp);
|
||||
}
|
||||
|
||||
pmd = pmd_offset(pud, dst_addr);
|
||||
if (pmd_none(*pmd)) {
|
||||
pte = allocator(mask);
|
||||
if (!pte) {
|
||||
pmdp = pmd_offset(pudp, dst_addr);
|
||||
if (pmd_none(READ_ONCE(*pmdp))) {
|
||||
ptep = allocator(mask);
|
||||
if (!ptep) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
pmd_populate_kernel(&init_mm, pmd, pte);
|
||||
pmd_populate_kernel(&init_mm, pmdp, ptep);
|
||||
}
|
||||
|
||||
pte = pte_offset_kernel(pmd, dst_addr);
|
||||
set_pte(pte, pfn_pte(virt_to_pfn(dst), PAGE_KERNEL_EXEC));
|
||||
ptep = pte_offset_kernel(pmdp, dst_addr);
|
||||
set_pte(ptep, pfn_pte(virt_to_pfn(dst), PAGE_KERNEL_EXEC));
|
||||
|
||||
/*
|
||||
* Load our new page tables. A strict BBM approach requires that we
|
||||
@ -263,7 +263,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
|
||||
*/
|
||||
cpu_set_reserved_ttbr0();
|
||||
local_flush_tlb_all();
|
||||
write_sysreg(phys_to_ttbr(virt_to_phys(pgd)), ttbr0_el1);
|
||||
write_sysreg(phys_to_ttbr(virt_to_phys(pgdp)), ttbr0_el1);
|
||||
isb();
|
||||
|
||||
*phys_dst_addr = virt_to_phys((void *)dst);
|
||||
@ -320,9 +320,9 @@ int swsusp_arch_suspend(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr)
|
||||
static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
|
||||
{
|
||||
pte_t pte = *src_pte;
|
||||
pte_t pte = READ_ONCE(*src_ptep);
|
||||
|
||||
if (pte_valid(pte)) {
|
||||
/*
|
||||
@ -330,7 +330,7 @@ static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr)
|
||||
* read only (code, rodata). Clear the RDONLY bit from
|
||||
* the temporary mappings we use during restore.
|
||||
*/
|
||||
set_pte(dst_pte, pte_mkwrite(pte));
|
||||
set_pte(dst_ptep, pte_mkwrite(pte));
|
||||
} else if (debug_pagealloc_enabled() && !pte_none(pte)) {
|
||||
/*
|
||||
* debug_pagealloc will removed the PTE_VALID bit if
|
||||
@ -343,112 +343,116 @@ static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr)
|
||||
*/
|
||||
BUG_ON(!pfn_valid(pte_pfn(pte)));
|
||||
|
||||
set_pte(dst_pte, pte_mkpresent(pte_mkwrite(pte)));
|
||||
set_pte(dst_ptep, pte_mkpresent(pte_mkwrite(pte)));
|
||||
}
|
||||
}
|
||||
|
||||
static int copy_pte(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long start,
|
||||
static int copy_pte(pmd_t *dst_pmdp, pmd_t *src_pmdp, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
pte_t *src_pte;
|
||||
pte_t *dst_pte;
|
||||
pte_t *src_ptep;
|
||||
pte_t *dst_ptep;
|
||||
unsigned long addr = start;
|
||||
|
||||
dst_pte = (pte_t *)get_safe_page(GFP_ATOMIC);
|
||||
if (!dst_pte)
|
||||
dst_ptep = (pte_t *)get_safe_page(GFP_ATOMIC);
|
||||
if (!dst_ptep)
|
||||
return -ENOMEM;
|
||||
pmd_populate_kernel(&init_mm, dst_pmd, dst_pte);
|
||||
dst_pte = pte_offset_kernel(dst_pmd, start);
|
||||
pmd_populate_kernel(&init_mm, dst_pmdp, dst_ptep);
|
||||
dst_ptep = pte_offset_kernel(dst_pmdp, start);
|
||||
|
||||
src_pte = pte_offset_kernel(src_pmd, start);
|
||||
src_ptep = pte_offset_kernel(src_pmdp, start);
|
||||
do {
|
||||
_copy_pte(dst_pte, src_pte, addr);
|
||||
} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
|
||||
_copy_pte(dst_ptep, src_ptep, addr);
|
||||
} while (dst_ptep++, src_ptep++, addr += PAGE_SIZE, addr != end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int copy_pmd(pud_t *dst_pud, pud_t *src_pud, unsigned long start,
|
||||
static int copy_pmd(pud_t *dst_pudp, pud_t *src_pudp, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
pmd_t *src_pmd;
|
||||
pmd_t *dst_pmd;
|
||||
pmd_t *src_pmdp;
|
||||
pmd_t *dst_pmdp;
|
||||
unsigned long next;
|
||||
unsigned long addr = start;
|
||||
|
||||
if (pud_none(*dst_pud)) {
|
||||
dst_pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
|
||||
if (!dst_pmd)
|
||||
if (pud_none(READ_ONCE(*dst_pudp))) {
|
||||
dst_pmdp = (pmd_t *)get_safe_page(GFP_ATOMIC);
|
||||
if (!dst_pmdp)
|
||||
return -ENOMEM;
|
||||
pud_populate(&init_mm, dst_pud, dst_pmd);
|
||||
pud_populate(&init_mm, dst_pudp, dst_pmdp);
|
||||
}
|
||||
dst_pmd = pmd_offset(dst_pud, start);
|
||||
dst_pmdp = pmd_offset(dst_pudp, start);
|
||||
|
||||
src_pmd = pmd_offset(src_pud, start);
|
||||
src_pmdp = pmd_offset(src_pudp, start);
|
||||
do {
|
||||
pmd_t pmd = READ_ONCE(*src_pmdp);
|
||||
|
||||
next = pmd_addr_end(addr, end);
|
||||
if (pmd_none(*src_pmd))
|
||||
if (pmd_none(pmd))
|
||||
continue;
|
||||
if (pmd_table(*src_pmd)) {
|
||||
if (copy_pte(dst_pmd, src_pmd, addr, next))
|
||||
if (pmd_table(pmd)) {
|
||||
if (copy_pte(dst_pmdp, src_pmdp, addr, next))
|
||||
return -ENOMEM;
|
||||
} else {
|
||||
set_pmd(dst_pmd,
|
||||
__pmd(pmd_val(*src_pmd) & ~PMD_SECT_RDONLY));
|
||||
set_pmd(dst_pmdp,
|
||||
__pmd(pmd_val(pmd) & ~PMD_SECT_RDONLY));
|
||||
}
|
||||
} while (dst_pmd++, src_pmd++, addr = next, addr != end);
|
||||
} while (dst_pmdp++, src_pmdp++, addr = next, addr != end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int copy_pud(pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long start,
|
||||
static int copy_pud(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
pud_t *dst_pud;
|
||||
pud_t *src_pud;
|
||||
pud_t *dst_pudp;
|
||||
pud_t *src_pudp;
|
||||
unsigned long next;
|
||||
unsigned long addr = start;
|
||||
|
||||
if (pgd_none(*dst_pgd)) {
|
||||
dst_pud = (pud_t *)get_safe_page(GFP_ATOMIC);
|
||||
if (!dst_pud)
|
||||
if (pgd_none(READ_ONCE(*dst_pgdp))) {
|
||||
dst_pudp = (pud_t *)get_safe_page(GFP_ATOMIC);
|
||||
if (!dst_pudp)
|
||||
return -ENOMEM;
|
||||
pgd_populate(&init_mm, dst_pgd, dst_pud);
|
||||
pgd_populate(&init_mm, dst_pgdp, dst_pudp);
|
||||
}
|
||||
dst_pud = pud_offset(dst_pgd, start);
|
||||
dst_pudp = pud_offset(dst_pgdp, start);
|
||||
|
||||
src_pud = pud_offset(src_pgd, start);
|
||||
src_pudp = pud_offset(src_pgdp, start);
|
||||
do {
|
||||
pud_t pud = READ_ONCE(*src_pudp);
|
||||
|
||||
next = pud_addr_end(addr, end);
|
||||
if (pud_none(*src_pud))
|
||||
if (pud_none(pud))
|
||||
continue;
|
||||
if (pud_table(*(src_pud))) {
|
||||
if (copy_pmd(dst_pud, src_pud, addr, next))
|
||||
if (pud_table(pud)) {
|
||||
if (copy_pmd(dst_pudp, src_pudp, addr, next))
|
||||
return -ENOMEM;
|
||||
} else {
|
||||
set_pud(dst_pud,
|
||||
__pud(pud_val(*src_pud) & ~PMD_SECT_RDONLY));
|
||||
set_pud(dst_pudp,
|
||||
__pud(pud_val(pud) & ~PMD_SECT_RDONLY));
|
||||
}
|
||||
} while (dst_pud++, src_pud++, addr = next, addr != end);
|
||||
} while (dst_pudp++, src_pudp++, addr = next, addr != end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int copy_page_tables(pgd_t *dst_pgd, unsigned long start,
|
||||
static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
unsigned long next;
|
||||
unsigned long addr = start;
|
||||
pgd_t *src_pgd = pgd_offset_k(start);
|
||||
pgd_t *src_pgdp = pgd_offset_k(start);
|
||||
|
||||
dst_pgd = pgd_offset_raw(dst_pgd, start);
|
||||
dst_pgdp = pgd_offset_raw(dst_pgdp, start);
|
||||
do {
|
||||
next = pgd_addr_end(addr, end);
|
||||
if (pgd_none(*src_pgd))
|
||||
if (pgd_none(READ_ONCE(*src_pgdp)))
|
||||
continue;
|
||||
if (copy_pud(dst_pgd, src_pgd, addr, next))
|
||||
if (copy_pud(dst_pgdp, src_pgdp, addr, next))
|
||||
return -ENOMEM;
|
||||
} while (dst_pgd++, src_pgd++, addr = next, addr != end);
|
||||
} while (dst_pgdp++, src_pgdp++, addr = next, addr != end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -908,9 +908,9 @@ static void __armv8pmu_probe_pmu(void *info)
|
||||
int pmuver;
|
||||
|
||||
dfr0 = read_sysreg(id_aa64dfr0_el1);
|
||||
pmuver = cpuid_feature_extract_signed_field(dfr0,
|
||||
pmuver = cpuid_feature_extract_unsigned_field(dfr0,
|
||||
ID_AA64DFR0_PMUVER_SHIFT);
|
||||
if (pmuver < 1)
|
||||
if (pmuver == 0xf || pmuver == 0)
|
||||
return;
|
||||
|
||||
probe->present = true;
|
||||
|
@ -220,8 +220,15 @@ void __show_regs(struct pt_regs *regs)
|
||||
|
||||
show_regs_print_info(KERN_DEFAULT);
|
||||
print_pstate(regs);
|
||||
printk("pc : %pS\n", (void *)regs->pc);
|
||||
printk("lr : %pS\n", (void *)lr);
|
||||
|
||||
if (!user_mode(regs)) {
|
||||
printk("pc : %pS\n", (void *)regs->pc);
|
||||
printk("lr : %pS\n", (void *)lr);
|
||||
} else {
|
||||
printk("pc : %016llx\n", regs->pc);
|
||||
printk("lr : %016llx\n", lr);
|
||||
}
|
||||
|
||||
printk("sp : %016llx\n", sp);
|
||||
|
||||
i = top_reg;
|
||||
|
@ -1419,7 +1419,7 @@ static int compat_ptrace_hbp_get(unsigned int note_type,
|
||||
u64 addr = 0;
|
||||
u32 ctrl = 0;
|
||||
|
||||
int err, idx = compat_ptrace_hbp_num_to_idx(num);;
|
||||
int err, idx = compat_ptrace_hbp_num_to_idx(num);
|
||||
|
||||
if (num & 1) {
|
||||
err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr);
|
||||
|
@ -59,6 +59,11 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
if (tsk->ret_stack &&
|
||||
(frame->pc == (unsigned long)return_to_handler)) {
|
||||
if (WARN_ON_ONCE(frame->graph == -1))
|
||||
return -EINVAL;
|
||||
if (frame->graph < -1)
|
||||
frame->graph += FTRACE_NOTRACE_DEPTH;
|
||||
|
||||
/*
|
||||
* This is a case where function graph tracer has
|
||||
* modified a return address (LR) in a stack frame
|
||||
|
@ -57,7 +57,7 @@ do_compat_cache_op(unsigned long start, unsigned long end, int flags)
|
||||
if (end < start || flags)
|
||||
return -EINVAL;
|
||||
|
||||
if (!access_ok(VERIFY_READ, start, end - start))
|
||||
if (!access_ok(VERIFY_READ, (const void __user *)start, end - start))
|
||||
return -EFAULT;
|
||||
|
||||
return __do_compat_cache_op(start, end);
|
||||
|
@ -52,7 +52,7 @@ unsigned long profile_pc(struct pt_regs *regs)
|
||||
frame.fp = regs->regs[29];
|
||||
frame.pc = regs->pc;
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
frame.graph = -1; /* no task info */
|
||||
frame.graph = current->curr_ret_stack;
|
||||
#endif
|
||||
do {
|
||||
int ret = unwind_frame(NULL, &frame);
|
||||
|
@ -57,7 +57,7 @@ static const char *handler[]= {
|
||||
"Error"
|
||||
};
|
||||
|
||||
int show_unhandled_signals = 1;
|
||||
int show_unhandled_signals = 0;
|
||||
|
||||
static void dump_backtrace_entry(unsigned long where)
|
||||
{
|
||||
@ -526,14 +526,6 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
|
||||
}
|
||||
#endif
|
||||
|
||||
if (show_unhandled_signals_ratelimited()) {
|
||||
pr_info("%s[%d]: syscall %d\n", current->comm,
|
||||
task_pid_nr(current), regs->syscallno);
|
||||
dump_instr("", regs);
|
||||
if (user_mode(regs))
|
||||
__show_regs(regs);
|
||||
}
|
||||
|
||||
return sys_ni_syscall();
|
||||
}
|
||||
|
||||
|
@ -407,8 +407,10 @@ again:
|
||||
u32 midr = read_cpuid_id();
|
||||
|
||||
/* Apply BTAC predictors mitigation to all Falkor chips */
|
||||
if ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)
|
||||
if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
|
||||
((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) {
|
||||
__qcom_hyp_sanitize_btac_predictors();
|
||||
}
|
||||
}
|
||||
|
||||
fp_enabled = __fpsimd_enabled();
|
||||
|
@ -286,48 +286,52 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
|
||||
|
||||
}
|
||||
|
||||
static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
|
||||
static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start)
|
||||
{
|
||||
pte_t *pte = pte_offset_kernel(pmd, 0UL);
|
||||
pte_t *ptep = pte_offset_kernel(pmdp, 0UL);
|
||||
unsigned long addr;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
|
||||
for (i = 0; i < PTRS_PER_PTE; i++, ptep++) {
|
||||
addr = start + i * PAGE_SIZE;
|
||||
note_page(st, addr, 4, pte_val(*pte));
|
||||
note_page(st, addr, 4, READ_ONCE(pte_val(*ptep)));
|
||||
}
|
||||
}
|
||||
|
||||
static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
|
||||
static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start)
|
||||
{
|
||||
pmd_t *pmd = pmd_offset(pud, 0UL);
|
||||
pmd_t *pmdp = pmd_offset(pudp, 0UL);
|
||||
unsigned long addr;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
|
||||
for (i = 0; i < PTRS_PER_PMD; i++, pmdp++) {
|
||||
pmd_t pmd = READ_ONCE(*pmdp);
|
||||
|
||||
addr = start + i * PMD_SIZE;
|
||||
if (pmd_none(*pmd) || pmd_sect(*pmd)) {
|
||||
note_page(st, addr, 3, pmd_val(*pmd));
|
||||
if (pmd_none(pmd) || pmd_sect(pmd)) {
|
||||
note_page(st, addr, 3, pmd_val(pmd));
|
||||
} else {
|
||||
BUG_ON(pmd_bad(*pmd));
|
||||
walk_pte(st, pmd, addr);
|
||||
BUG_ON(pmd_bad(pmd));
|
||||
walk_pte(st, pmdp, addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
|
||||
static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start)
|
||||
{
|
||||
pud_t *pud = pud_offset(pgd, 0UL);
|
||||
pud_t *pudp = pud_offset(pgdp, 0UL);
|
||||
unsigned long addr;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
|
||||
for (i = 0; i < PTRS_PER_PUD; i++, pudp++) {
|
||||
pud_t pud = READ_ONCE(*pudp);
|
||||
|
||||
addr = start + i * PUD_SIZE;
|
||||
if (pud_none(*pud) || pud_sect(*pud)) {
|
||||
note_page(st, addr, 2, pud_val(*pud));
|
||||
if (pud_none(pud) || pud_sect(pud)) {
|
||||
note_page(st, addr, 2, pud_val(pud));
|
||||
} else {
|
||||
BUG_ON(pud_bad(*pud));
|
||||
walk_pmd(st, pud, addr);
|
||||
BUG_ON(pud_bad(pud));
|
||||
walk_pmd(st, pudp, addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -335,17 +339,19 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
|
||||
static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
|
||||
unsigned long start)
|
||||
{
|
||||
pgd_t *pgd = pgd_offset(mm, 0UL);
|
||||
pgd_t *pgdp = pgd_offset(mm, 0UL);
|
||||
unsigned i;
|
||||
unsigned long addr;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
|
||||
for (i = 0; i < PTRS_PER_PGD; i++, pgdp++) {
|
||||
pgd_t pgd = READ_ONCE(*pgdp);
|
||||
|
||||
addr = start + i * PGDIR_SIZE;
|
||||
if (pgd_none(*pgd)) {
|
||||
note_page(st, addr, 1, pgd_val(*pgd));
|
||||
if (pgd_none(pgd)) {
|
||||
note_page(st, addr, 1, pgd_val(pgd));
|
||||
} else {
|
||||
BUG_ON(pgd_bad(*pgd));
|
||||
walk_pud(st, pgd, addr);
|
||||
BUG_ON(pgd_bad(pgd));
|
||||
walk_pud(st, pgdp, addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -130,7 +130,8 @@ static void mem_abort_decode(unsigned int esr)
|
||||
void show_pte(unsigned long addr)
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
pgd_t *pgd;
|
||||
pgd_t *pgdp;
|
||||
pgd_t pgd;
|
||||
|
||||
if (addr < TASK_SIZE) {
|
||||
/* TTBR0 */
|
||||
@ -149,33 +150,37 @@ void show_pte(unsigned long addr)
|
||||
return;
|
||||
}
|
||||
|
||||
pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgd = %p\n",
|
||||
pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp = %p\n",
|
||||
mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
|
||||
VA_BITS, mm->pgd);
|
||||
pgd = pgd_offset(mm, addr);
|
||||
pr_alert("[%016lx] *pgd=%016llx", addr, pgd_val(*pgd));
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
pgd = READ_ONCE(*pgdp);
|
||||
pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
|
||||
|
||||
do {
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
pud_t *pudp, pud;
|
||||
pmd_t *pmdp, pmd;
|
||||
pte_t *ptep, pte;
|
||||
|
||||
if (pgd_none(*pgd) || pgd_bad(*pgd))
|
||||
if (pgd_none(pgd) || pgd_bad(pgd))
|
||||
break;
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
pr_cont(", *pud=%016llx", pud_val(*pud));
|
||||
if (pud_none(*pud) || pud_bad(*pud))
|
||||
pudp = pud_offset(pgdp, addr);
|
||||
pud = READ_ONCE(*pudp);
|
||||
pr_cont(", pud=%016llx", pud_val(pud));
|
||||
if (pud_none(pud) || pud_bad(pud))
|
||||
break;
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
pr_cont(", *pmd=%016llx", pmd_val(*pmd));
|
||||
if (pmd_none(*pmd) || pmd_bad(*pmd))
|
||||
pmdp = pmd_offset(pudp, addr);
|
||||
pmd = READ_ONCE(*pmdp);
|
||||
pr_cont(", pmd=%016llx", pmd_val(pmd));
|
||||
if (pmd_none(pmd) || pmd_bad(pmd))
|
||||
break;
|
||||
|
||||
pte = pte_offset_map(pmd, addr);
|
||||
pr_cont(", *pte=%016llx", pte_val(*pte));
|
||||
pte_unmap(pte);
|
||||
ptep = pte_offset_map(pmdp, addr);
|
||||
pte = READ_ONCE(*ptep);
|
||||
pr_cont(", pte=%016llx", pte_val(pte));
|
||||
pte_unmap(ptep);
|
||||
} while(0);
|
||||
|
||||
pr_cont("\n");
|
||||
@ -196,8 +201,9 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
pte_t entry, int dirty)
|
||||
{
|
||||
pteval_t old_pteval, pteval;
|
||||
pte_t pte = READ_ONCE(*ptep);
|
||||
|
||||
if (pte_same(*ptep, entry))
|
||||
if (pte_same(pte, entry))
|
||||
return 0;
|
||||
|
||||
/* only preserve the access flags and write permission */
|
||||
@ -210,7 +216,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
* (calculated as: a & b == ~(~a | ~b)).
|
||||
*/
|
||||
pte_val(entry) ^= PTE_RDONLY;
|
||||
pteval = READ_ONCE(pte_val(*ptep));
|
||||
pteval = pte_val(pte);
|
||||
do {
|
||||
old_pteval = pteval;
|
||||
pteval ^= PTE_RDONLY;
|
||||
|
@ -54,14 +54,14 @@ static inline pgprot_t pte_pgprot(pte_t pte)
|
||||
static int find_num_contig(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, size_t *pgsize)
|
||||
{
|
||||
pgd_t *pgd = pgd_offset(mm, addr);
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pgd_t *pgdp = pgd_offset(mm, addr);
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
|
||||
*pgsize = PAGE_SIZE;
|
||||
pud = pud_offset(pgd, addr);
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if ((pte_t *)pmd == ptep) {
|
||||
pudp = pud_offset(pgdp, addr);
|
||||
pmdp = pmd_offset(pudp, addr);
|
||||
if ((pte_t *)pmdp == ptep) {
|
||||
*pgsize = PMD_SIZE;
|
||||
return CONT_PMDS;
|
||||
}
|
||||
@ -181,11 +181,8 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
|
||||
clear_flush(mm, addr, ptep, pgsize, ncontig);
|
||||
|
||||
for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) {
|
||||
pr_debug("%s: set pte %p to 0x%llx\n", __func__, ptep,
|
||||
pte_val(pfn_pte(pfn, hugeprot)));
|
||||
for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
|
||||
set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
|
||||
}
|
||||
}
|
||||
|
||||
void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
@ -203,20 +200,20 @@ void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *huge_pte_alloc(struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long sz)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pte_t *pte = NULL;
|
||||
pgd_t *pgdp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep = NULL;
|
||||
|
||||
pr_debug("%s: addr:0x%lx sz:0x%lx\n", __func__, addr, sz);
|
||||
pgd = pgd_offset(mm, addr);
|
||||
pud = pud_alloc(mm, pgd, addr);
|
||||
if (!pud)
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
pudp = pud_alloc(mm, pgdp, addr);
|
||||
if (!pudp)
|
||||
return NULL;
|
||||
|
||||
if (sz == PUD_SIZE) {
|
||||
pte = (pte_t *)pud;
|
||||
ptep = (pte_t *)pudp;
|
||||
} else if (sz == (PAGE_SIZE * CONT_PTES)) {
|
||||
pmd_t *pmd = pmd_alloc(mm, pud, addr);
|
||||
pmdp = pmd_alloc(mm, pudp, addr);
|
||||
|
||||
WARN_ON(addr & (sz - 1));
|
||||
/*
|
||||
@ -226,60 +223,55 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
|
||||
* will be no pte_unmap() to correspond with this
|
||||
* pte_alloc_map().
|
||||
*/
|
||||
pte = pte_alloc_map(mm, pmd, addr);
|
||||
ptep = pte_alloc_map(mm, pmdp, addr);
|
||||
} else if (sz == PMD_SIZE) {
|
||||
if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) &&
|
||||
pud_none(*pud))
|
||||
pte = huge_pmd_share(mm, addr, pud);
|
||||
pud_none(READ_ONCE(*pudp)))
|
||||
ptep = huge_pmd_share(mm, addr, pudp);
|
||||
else
|
||||
pte = (pte_t *)pmd_alloc(mm, pud, addr);
|
||||
ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
|
||||
} else if (sz == (PMD_SIZE * CONT_PMDS)) {
|
||||
pmd_t *pmd;
|
||||
|
||||
pmd = pmd_alloc(mm, pud, addr);
|
||||
pmdp = pmd_alloc(mm, pudp, addr);
|
||||
WARN_ON(addr & (sz - 1));
|
||||
return (pte_t *)pmd;
|
||||
return (pte_t *)pmdp;
|
||||
}
|
||||
|
||||
pr_debug("%s: addr:0x%lx sz:0x%lx ret pte=%p/0x%llx\n", __func__, addr,
|
||||
sz, pte, pte_val(*pte));
|
||||
return pte;
|
||||
return ptep;
|
||||
}
|
||||
|
||||
pte_t *huge_pte_offset(struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long sz)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pgd_t *pgdp;
|
||||
pud_t *pudp, pud;
|
||||
pmd_t *pmdp, pmd;
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
pr_debug("%s: addr:0x%lx pgd:%p\n", __func__, addr, pgd);
|
||||
if (!pgd_present(*pgd))
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
if (!pgd_present(READ_ONCE(*pgdp)))
|
||||
return NULL;
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (sz != PUD_SIZE && pud_none(*pud))
|
||||
pudp = pud_offset(pgdp, addr);
|
||||
pud = READ_ONCE(*pudp);
|
||||
if (sz != PUD_SIZE && pud_none(pud))
|
||||
return NULL;
|
||||
/* hugepage or swap? */
|
||||
if (pud_huge(*pud) || !pud_present(*pud))
|
||||
return (pte_t *)pud;
|
||||
if (pud_huge(pud) || !pud_present(pud))
|
||||
return (pte_t *)pudp;
|
||||
/* table; check the next level */
|
||||
|
||||
if (sz == CONT_PMD_SIZE)
|
||||
addr &= CONT_PMD_MASK;
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
pmdp = pmd_offset(pudp, addr);
|
||||
pmd = READ_ONCE(*pmdp);
|
||||
if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
|
||||
pmd_none(*pmd))
|
||||
pmd_none(pmd))
|
||||
return NULL;
|
||||
if (pmd_huge(*pmd) || !pmd_present(*pmd))
|
||||
return (pte_t *)pmd;
|
||||
if (pmd_huge(pmd) || !pmd_present(pmd))
|
||||
return (pte_t *)pmdp;
|
||||
|
||||
if (sz == CONT_PTE_SIZE) {
|
||||
pte_t *pte = pte_offset_kernel(pmd, (addr & CONT_PTE_MASK));
|
||||
return pte;
|
||||
}
|
||||
if (sz == CONT_PTE_SIZE)
|
||||
return pte_offset_kernel(pmdp, (addr & CONT_PTE_MASK));
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@ -367,7 +359,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||
size_t pgsize;
|
||||
pte_t pte;
|
||||
|
||||
if (!pte_cont(*ptep)) {
|
||||
if (!pte_cont(READ_ONCE(*ptep))) {
|
||||
ptep_set_wrprotect(mm, addr, ptep);
|
||||
return;
|
||||
}
|
||||
@ -391,7 +383,7 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
size_t pgsize;
|
||||
int ncontig;
|
||||
|
||||
if (!pte_cont(*ptep)) {
|
||||
if (!pte_cont(READ_ONCE(*ptep))) {
|
||||
ptep_clear_flush(vma, addr, ptep);
|
||||
return;
|
||||
}
|
||||
|
@ -44,92 +44,92 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
|
||||
return __pa(p);
|
||||
}
|
||||
|
||||
static pte_t *__init kasan_pte_offset(pmd_t *pmd, unsigned long addr, int node,
|
||||
static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
|
||||
bool early)
|
||||
{
|
||||
if (pmd_none(*pmd)) {
|
||||
if (pmd_none(READ_ONCE(*pmdp))) {
|
||||
phys_addr_t pte_phys = early ? __pa_symbol(kasan_zero_pte)
|
||||
: kasan_alloc_zeroed_page(node);
|
||||
__pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
|
||||
__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
|
||||
}
|
||||
|
||||
return early ? pte_offset_kimg(pmd, addr)
|
||||
: pte_offset_kernel(pmd, addr);
|
||||
return early ? pte_offset_kimg(pmdp, addr)
|
||||
: pte_offset_kernel(pmdp, addr);
|
||||
}
|
||||
|
||||
static pmd_t *__init kasan_pmd_offset(pud_t *pud, unsigned long addr, int node,
|
||||
static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
|
||||
bool early)
|
||||
{
|
||||
if (pud_none(*pud)) {
|
||||
if (pud_none(READ_ONCE(*pudp))) {
|
||||
phys_addr_t pmd_phys = early ? __pa_symbol(kasan_zero_pmd)
|
||||
: kasan_alloc_zeroed_page(node);
|
||||
__pud_populate(pud, pmd_phys, PMD_TYPE_TABLE);
|
||||
__pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE);
|
||||
}
|
||||
|
||||
return early ? pmd_offset_kimg(pud, addr) : pmd_offset(pud, addr);
|
||||
return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
|
||||
}
|
||||
|
||||
static pud_t *__init kasan_pud_offset(pgd_t *pgd, unsigned long addr, int node,
|
||||
static pud_t *__init kasan_pud_offset(pgd_t *pgdp, unsigned long addr, int node,
|
||||
bool early)
|
||||
{
|
||||
if (pgd_none(*pgd)) {
|
||||
if (pgd_none(READ_ONCE(*pgdp))) {
|
||||
phys_addr_t pud_phys = early ? __pa_symbol(kasan_zero_pud)
|
||||
: kasan_alloc_zeroed_page(node);
|
||||
__pgd_populate(pgd, pud_phys, PMD_TYPE_TABLE);
|
||||
__pgd_populate(pgdp, pud_phys, PMD_TYPE_TABLE);
|
||||
}
|
||||
|
||||
return early ? pud_offset_kimg(pgd, addr) : pud_offset(pgd, addr);
|
||||
return early ? pud_offset_kimg(pgdp, addr) : pud_offset(pgdp, addr);
|
||||
}
|
||||
|
||||
static void __init kasan_pte_populate(pmd_t *pmd, unsigned long addr,
|
||||
static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
|
||||
unsigned long end, int node, bool early)
|
||||
{
|
||||
unsigned long next;
|
||||
pte_t *pte = kasan_pte_offset(pmd, addr, node, early);
|
||||
pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
|
||||
|
||||
do {
|
||||
phys_addr_t page_phys = early ? __pa_symbol(kasan_zero_page)
|
||||
: kasan_alloc_zeroed_page(node);
|
||||
next = addr + PAGE_SIZE;
|
||||
set_pte(pte, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
|
||||
} while (pte++, addr = next, addr != end && pte_none(*pte));
|
||||
set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
|
||||
} while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
|
||||
}
|
||||
|
||||
static void __init kasan_pmd_populate(pud_t *pud, unsigned long addr,
|
||||
static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
|
||||
unsigned long end, int node, bool early)
|
||||
{
|
||||
unsigned long next;
|
||||
pmd_t *pmd = kasan_pmd_offset(pud, addr, node, early);
|
||||
pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
|
||||
|
||||
do {
|
||||
next = pmd_addr_end(addr, end);
|
||||
kasan_pte_populate(pmd, addr, next, node, early);
|
||||
} while (pmd++, addr = next, addr != end && pmd_none(*pmd));
|
||||
kasan_pte_populate(pmdp, addr, next, node, early);
|
||||
} while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
|
||||
}
|
||||
|
||||
static void __init kasan_pud_populate(pgd_t *pgd, unsigned long addr,
|
||||
static void __init kasan_pud_populate(pgd_t *pgdp, unsigned long addr,
|
||||
unsigned long end, int node, bool early)
|
||||
{
|
||||
unsigned long next;
|
||||
pud_t *pud = kasan_pud_offset(pgd, addr, node, early);
|
||||
pud_t *pudp = kasan_pud_offset(pgdp, addr, node, early);
|
||||
|
||||
do {
|
||||
next = pud_addr_end(addr, end);
|
||||
kasan_pmd_populate(pud, addr, next, node, early);
|
||||
} while (pud++, addr = next, addr != end && pud_none(*pud));
|
||||
kasan_pmd_populate(pudp, addr, next, node, early);
|
||||
} while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
|
||||
}
|
||||
|
||||
static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
|
||||
int node, bool early)
|
||||
{
|
||||
unsigned long next;
|
||||
pgd_t *pgd;
|
||||
pgd_t *pgdp;
|
||||
|
||||
pgd = pgd_offset_k(addr);
|
||||
pgdp = pgd_offset_k(addr);
|
||||
do {
|
||||
next = pgd_addr_end(addr, end);
|
||||
kasan_pud_populate(pgd, addr, next, node, early);
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
kasan_pud_populate(pgdp, addr, next, node, early);
|
||||
} while (pgdp++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
/* The early shadow maps everything to a single page of zeroes */
|
||||
@ -155,14 +155,14 @@ static void __init kasan_map_populate(unsigned long start, unsigned long end,
|
||||
*/
|
||||
void __init kasan_copy_shadow(pgd_t *pgdir)
|
||||
{
|
||||
pgd_t *pgd, *pgd_new, *pgd_end;
|
||||
pgd_t *pgdp, *pgdp_new, *pgdp_end;
|
||||
|
||||
pgd = pgd_offset_k(KASAN_SHADOW_START);
|
||||
pgd_end = pgd_offset_k(KASAN_SHADOW_END);
|
||||
pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
|
||||
pgdp = pgd_offset_k(KASAN_SHADOW_START);
|
||||
pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
|
||||
pgdp_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
|
||||
do {
|
||||
set_pgd(pgd_new, *pgd);
|
||||
} while (pgd++, pgd_new++, pgd != pgd_end);
|
||||
set_pgd(pgdp_new, READ_ONCE(*pgdp));
|
||||
} while (pgdp++, pgdp_new++, pgdp != pgdp_end);
|
||||
}
|
||||
|
||||
static void __init clear_pgds(unsigned long start,
|
||||
|
@ -125,45 +125,48 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
|
||||
return ((old ^ new) & ~mask) == 0;
|
||||
}
|
||||
|
||||
static void init_pte(pmd_t *pmd, unsigned long addr, unsigned long end,
|
||||
static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
|
||||
phys_addr_t phys, pgprot_t prot)
|
||||
{
|
||||
pte_t *pte;
|
||||
pte_t *ptep;
|
||||
|
||||
pte = pte_set_fixmap_offset(pmd, addr);
|
||||
ptep = pte_set_fixmap_offset(pmdp, addr);
|
||||
do {
|
||||
pte_t old_pte = *pte;
|
||||
pte_t old_pte = READ_ONCE(*ptep);
|
||||
|
||||
set_pte(pte, pfn_pte(__phys_to_pfn(phys), prot));
|
||||
set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
|
||||
|
||||
/*
|
||||
* After the PTE entry has been populated once, we
|
||||
* only allow updates to the permission attributes.
|
||||
*/
|
||||
BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte)));
|
||||
BUG_ON(!pgattr_change_is_safe(pte_val(old_pte),
|
||||
READ_ONCE(pte_val(*ptep))));
|
||||
|
||||
phys += PAGE_SIZE;
|
||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||
} while (ptep++, addr += PAGE_SIZE, addr != end);
|
||||
|
||||
pte_clear_fixmap();
|
||||
}
|
||||
|
||||
static void alloc_init_cont_pte(pmd_t *pmd, unsigned long addr,
|
||||
static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
|
||||
unsigned long end, phys_addr_t phys,
|
||||
pgprot_t prot,
|
||||
phys_addr_t (*pgtable_alloc)(void),
|
||||
int flags)
|
||||
{
|
||||
unsigned long next;
|
||||
pmd_t pmd = READ_ONCE(*pmdp);
|
||||
|
||||
BUG_ON(pmd_sect(*pmd));
|
||||
if (pmd_none(*pmd)) {
|
||||
BUG_ON(pmd_sect(pmd));
|
||||
if (pmd_none(pmd)) {
|
||||
phys_addr_t pte_phys;
|
||||
BUG_ON(!pgtable_alloc);
|
||||
pte_phys = pgtable_alloc();
|
||||
__pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
|
||||
__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
|
||||
pmd = READ_ONCE(*pmdp);
|
||||
}
|
||||
BUG_ON(pmd_bad(*pmd));
|
||||
BUG_ON(pmd_bad(pmd));
|
||||
|
||||
do {
|
||||
pgprot_t __prot = prot;
|
||||
@ -175,67 +178,69 @@ static void alloc_init_cont_pte(pmd_t *pmd, unsigned long addr,
|
||||
(flags & NO_CONT_MAPPINGS) == 0)
|
||||
__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
|
||||
|
||||
init_pte(pmd, addr, next, phys, __prot);
|
||||
init_pte(pmdp, addr, next, phys, __prot);
|
||||
|
||||
phys += next - addr;
|
||||
} while (addr = next, addr != end);
|
||||
}
|
||||
|
||||
static void init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
|
||||
static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
|
||||
phys_addr_t phys, pgprot_t prot,
|
||||
phys_addr_t (*pgtable_alloc)(void), int flags)
|
||||
{
|
||||
unsigned long next;
|
||||
pmd_t *pmd;
|
||||
pmd_t *pmdp;
|
||||
|
||||
pmd = pmd_set_fixmap_offset(pud, addr);
|
||||
pmdp = pmd_set_fixmap_offset(pudp, addr);
|
||||
do {
|
||||
pmd_t old_pmd = *pmd;
|
||||
pmd_t old_pmd = READ_ONCE(*pmdp);
|
||||
|
||||
next = pmd_addr_end(addr, end);
|
||||
|
||||
/* try section mapping first */
|
||||
if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
|
||||
(flags & NO_BLOCK_MAPPINGS) == 0) {
|
||||
pmd_set_huge(pmd, phys, prot);
|
||||
pmd_set_huge(pmdp, phys, prot);
|
||||
|
||||
/*
|
||||
* After the PMD entry has been populated once, we
|
||||
* only allow updates to the permission attributes.
|
||||
*/
|
||||
BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
|
||||
pmd_val(*pmd)));
|
||||
READ_ONCE(pmd_val(*pmdp))));
|
||||
} else {
|
||||
alloc_init_cont_pte(pmd, addr, next, phys, prot,
|
||||
alloc_init_cont_pte(pmdp, addr, next, phys, prot,
|
||||
pgtable_alloc, flags);
|
||||
|
||||
BUG_ON(pmd_val(old_pmd) != 0 &&
|
||||
pmd_val(old_pmd) != pmd_val(*pmd));
|
||||
pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
|
||||
}
|
||||
phys += next - addr;
|
||||
} while (pmd++, addr = next, addr != end);
|
||||
} while (pmdp++, addr = next, addr != end);
|
||||
|
||||
pmd_clear_fixmap();
|
||||
}
|
||||
|
||||
static void alloc_init_cont_pmd(pud_t *pud, unsigned long addr,
|
||||
static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
|
||||
unsigned long end, phys_addr_t phys,
|
||||
pgprot_t prot,
|
||||
phys_addr_t (*pgtable_alloc)(void), int flags)
|
||||
{
|
||||
unsigned long next;
|
||||
pud_t pud = READ_ONCE(*pudp);
|
||||
|
||||
/*
|
||||
* Check for initial section mappings in the pgd/pud.
|
||||
*/
|
||||
BUG_ON(pud_sect(*pud));
|
||||
if (pud_none(*pud)) {
|
||||
BUG_ON(pud_sect(pud));
|
||||
if (pud_none(pud)) {
|
||||
phys_addr_t pmd_phys;
|
||||
BUG_ON(!pgtable_alloc);
|
||||
pmd_phys = pgtable_alloc();
|
||||
__pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
|
||||
__pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
|
||||
pud = READ_ONCE(*pudp);
|
||||
}
|
||||
BUG_ON(pud_bad(*pud));
|
||||
BUG_ON(pud_bad(pud));
|
||||
|
||||
do {
|
||||
pgprot_t __prot = prot;
|
||||
@ -247,7 +252,7 @@ static void alloc_init_cont_pmd(pud_t *pud, unsigned long addr,
|
||||
(flags & NO_CONT_MAPPINGS) == 0)
|
||||
__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
|
||||
|
||||
init_pmd(pud, addr, next, phys, __prot, pgtable_alloc, flags);
|
||||
init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags);
|
||||
|
||||
phys += next - addr;
|
||||
} while (addr = next, addr != end);
|
||||
@ -265,25 +270,27 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
|
||||
return true;
|
||||
}
|
||||
|
||||
static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
|
||||
phys_addr_t phys, pgprot_t prot,
|
||||
phys_addr_t (*pgtable_alloc)(void),
|
||||
int flags)
|
||||
static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
|
||||
phys_addr_t phys, pgprot_t prot,
|
||||
phys_addr_t (*pgtable_alloc)(void),
|
||||
int flags)
|
||||
{
|
||||
pud_t *pud;
|
||||
unsigned long next;
|
||||
pud_t *pudp;
|
||||
pgd_t pgd = READ_ONCE(*pgdp);
|
||||
|
||||
if (pgd_none(*pgd)) {
|
||||
if (pgd_none(pgd)) {
|
||||
phys_addr_t pud_phys;
|
||||
BUG_ON(!pgtable_alloc);
|
||||
pud_phys = pgtable_alloc();
|
||||
__pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
|
||||
__pgd_populate(pgdp, pud_phys, PUD_TYPE_TABLE);
|
||||
pgd = READ_ONCE(*pgdp);
|
||||
}
|
||||
BUG_ON(pgd_bad(*pgd));
|
||||
BUG_ON(pgd_bad(pgd));
|
||||
|
||||
pud = pud_set_fixmap_offset(pgd, addr);
|
||||
pudp = pud_set_fixmap_offset(pgdp, addr);
|
||||
do {
|
||||
pud_t old_pud = *pud;
|
||||
pud_t old_pud = READ_ONCE(*pudp);
|
||||
|
||||
next = pud_addr_end(addr, end);
|
||||
|
||||
@ -292,23 +299,23 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
|
||||
*/
|
||||
if (use_1G_block(addr, next, phys) &&
|
||||
(flags & NO_BLOCK_MAPPINGS) == 0) {
|
||||
pud_set_huge(pud, phys, prot);
|
||||
pud_set_huge(pudp, phys, prot);
|
||||
|
||||
/*
|
||||
* After the PUD entry has been populated once, we
|
||||
* only allow updates to the permission attributes.
|
||||
*/
|
||||
BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
|
||||
pud_val(*pud)));
|
||||
READ_ONCE(pud_val(*pudp))));
|
||||
} else {
|
||||
alloc_init_cont_pmd(pud, addr, next, phys, prot,
|
||||
alloc_init_cont_pmd(pudp, addr, next, phys, prot,
|
||||
pgtable_alloc, flags);
|
||||
|
||||
BUG_ON(pud_val(old_pud) != 0 &&
|
||||
pud_val(old_pud) != pud_val(*pud));
|
||||
pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
|
||||
}
|
||||
phys += next - addr;
|
||||
} while (pud++, addr = next, addr != end);
|
||||
} while (pudp++, addr = next, addr != end);
|
||||
|
||||
pud_clear_fixmap();
|
||||
}
|
||||
@ -320,7 +327,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
|
||||
int flags)
|
||||
{
|
||||
unsigned long addr, length, end, next;
|
||||
pgd_t *pgd = pgd_offset_raw(pgdir, virt);
|
||||
pgd_t *pgdp = pgd_offset_raw(pgdir, virt);
|
||||
|
||||
/*
|
||||
* If the virtual and physical address don't have the same offset
|
||||
@ -336,10 +343,10 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
|
||||
end = addr + length;
|
||||
do {
|
||||
next = pgd_addr_end(addr, end);
|
||||
alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc,
|
||||
alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc,
|
||||
flags);
|
||||
phys += next - addr;
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
} while (pgdp++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
static phys_addr_t pgd_pgtable_alloc(void)
|
||||
@ -401,10 +408,10 @@ static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
|
||||
flush_tlb_kernel_range(virt, virt + size);
|
||||
}
|
||||
|
||||
static void __init __map_memblock(pgd_t *pgd, phys_addr_t start,
|
||||
static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start,
|
||||
phys_addr_t end, pgprot_t prot, int flags)
|
||||
{
|
||||
__create_pgd_mapping(pgd, start, __phys_to_virt(start), end - start,
|
||||
__create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start,
|
||||
prot, early_pgtable_alloc, flags);
|
||||
}
|
||||
|
||||
@ -418,7 +425,7 @@ void __init mark_linear_text_alias_ro(void)
|
||||
PAGE_KERNEL_RO);
|
||||
}
|
||||
|
||||
static void __init map_mem(pgd_t *pgd)
|
||||
static void __init map_mem(pgd_t *pgdp)
|
||||
{
|
||||
phys_addr_t kernel_start = __pa_symbol(_text);
|
||||
phys_addr_t kernel_end = __pa_symbol(__init_begin);
|
||||
@ -451,7 +458,7 @@ static void __init map_mem(pgd_t *pgd)
|
||||
if (memblock_is_nomap(reg))
|
||||
continue;
|
||||
|
||||
__map_memblock(pgd, start, end, PAGE_KERNEL, flags);
|
||||
__map_memblock(pgdp, start, end, PAGE_KERNEL, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -464,7 +471,7 @@ static void __init map_mem(pgd_t *pgd)
|
||||
* Note that contiguous mappings cannot be remapped in this way,
|
||||
* so we should avoid them here.
|
||||
*/
|
||||
__map_memblock(pgd, kernel_start, kernel_end,
|
||||
__map_memblock(pgdp, kernel_start, kernel_end,
|
||||
PAGE_KERNEL, NO_CONT_MAPPINGS);
|
||||
memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
|
||||
|
||||
@ -475,7 +482,7 @@ static void __init map_mem(pgd_t *pgd)
|
||||
* through /sys/kernel/kexec_crash_size interface.
|
||||
*/
|
||||
if (crashk_res.end) {
|
||||
__map_memblock(pgd, crashk_res.start, crashk_res.end + 1,
|
||||
__map_memblock(pgdp, crashk_res.start, crashk_res.end + 1,
|
||||
PAGE_KERNEL,
|
||||
NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
|
||||
memblock_clear_nomap(crashk_res.start,
|
||||
@ -499,7 +506,7 @@ void mark_rodata_ro(void)
|
||||
debug_checkwx();
|
||||
}
|
||||
|
||||
static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
|
||||
static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
|
||||
pgprot_t prot, struct vm_struct *vma,
|
||||
int flags, unsigned long vm_flags)
|
||||
{
|
||||
@ -509,7 +516,7 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
|
||||
BUG_ON(!PAGE_ALIGNED(pa_start));
|
||||
BUG_ON(!PAGE_ALIGNED(size));
|
||||
|
||||
__create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
|
||||
__create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot,
|
||||
early_pgtable_alloc, flags);
|
||||
|
||||
if (!(vm_flags & VM_NO_GUARD))
|
||||
@ -562,7 +569,7 @@ core_initcall(map_entry_trampoline);
|
||||
/*
|
||||
* Create fine-grained mappings for the kernel.
|
||||
*/
|
||||
static void __init map_kernel(pgd_t *pgd)
|
||||
static void __init map_kernel(pgd_t *pgdp)
|
||||
{
|
||||
static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext,
|
||||
vmlinux_initdata, vmlinux_data;
|
||||
@ -578,24 +585,24 @@ static void __init map_kernel(pgd_t *pgd)
|
||||
* Only rodata will be remapped with different permissions later on,
|
||||
* all other segments are allowed to use contiguous mappings.
|
||||
*/
|
||||
map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0,
|
||||
map_kernel_segment(pgdp, _text, _etext, text_prot, &vmlinux_text, 0,
|
||||
VM_NO_GUARD);
|
||||
map_kernel_segment(pgd, __start_rodata, __inittext_begin, PAGE_KERNEL,
|
||||
map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL,
|
||||
&vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
|
||||
map_kernel_segment(pgd, __inittext_begin, __inittext_end, text_prot,
|
||||
map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot,
|
||||
&vmlinux_inittext, 0, VM_NO_GUARD);
|
||||
map_kernel_segment(pgd, __initdata_begin, __initdata_end, PAGE_KERNEL,
|
||||
map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL,
|
||||
&vmlinux_initdata, 0, VM_NO_GUARD);
|
||||
map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
|
||||
map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
|
||||
|
||||
if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
|
||||
if (!READ_ONCE(pgd_val(*pgd_offset_raw(pgdp, FIXADDR_START)))) {
|
||||
/*
|
||||
* The fixmap falls in a separate pgd to the kernel, and doesn't
|
||||
* live in the carveout for the swapper_pg_dir. We can simply
|
||||
* re-use the existing dir for the fixmap.
|
||||
*/
|
||||
set_pgd(pgd_offset_raw(pgd, FIXADDR_START),
|
||||
*pgd_offset_k(FIXADDR_START));
|
||||
set_pgd(pgd_offset_raw(pgdp, FIXADDR_START),
|
||||
READ_ONCE(*pgd_offset_k(FIXADDR_START)));
|
||||
} else if (CONFIG_PGTABLE_LEVELS > 3) {
|
||||
/*
|
||||
* The fixmap shares its top level pgd entry with the kernel
|
||||
@ -604,14 +611,15 @@ static void __init map_kernel(pgd_t *pgd)
|
||||
* entry instead.
|
||||
*/
|
||||
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
|
||||
pud_populate(&init_mm, pud_set_fixmap_offset(pgd, FIXADDR_START),
|
||||
pud_populate(&init_mm,
|
||||
pud_set_fixmap_offset(pgdp, FIXADDR_START),
|
||||
lm_alias(bm_pmd));
|
||||
pud_clear_fixmap();
|
||||
} else {
|
||||
BUG();
|
||||
}
|
||||
|
||||
kasan_copy_shadow(pgd);
|
||||
kasan_copy_shadow(pgdp);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -621,10 +629,10 @@ static void __init map_kernel(pgd_t *pgd)
|
||||
void __init paging_init(void)
|
||||
{
|
||||
phys_addr_t pgd_phys = early_pgtable_alloc();
|
||||
pgd_t *pgd = pgd_set_fixmap(pgd_phys);
|
||||
pgd_t *pgdp = pgd_set_fixmap(pgd_phys);
|
||||
|
||||
map_kernel(pgd);
|
||||
map_mem(pgd);
|
||||
map_kernel(pgdp);
|
||||
map_mem(pgdp);
|
||||
|
||||
/*
|
||||
* We want to reuse the original swapper_pg_dir so we don't have to
|
||||
@ -635,7 +643,7 @@ void __init paging_init(void)
|
||||
* To do this we need to go via a temporary pgd.
|
||||
*/
|
||||
cpu_replace_ttbr1(__va(pgd_phys));
|
||||
memcpy(swapper_pg_dir, pgd, PGD_SIZE);
|
||||
memcpy(swapper_pg_dir, pgdp, PGD_SIZE);
|
||||
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
|
||||
|
||||
pgd_clear_fixmap();
|
||||
@ -655,37 +663,40 @@ void __init paging_init(void)
|
||||
*/
|
||||
int kern_addr_valid(unsigned long addr)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
pgd_t *pgdp;
|
||||
pud_t *pudp, pud;
|
||||
pmd_t *pmdp, pmd;
|
||||
pte_t *ptep, pte;
|
||||
|
||||
if ((((long)addr) >> VA_BITS) != -1UL)
|
||||
return 0;
|
||||
|
||||
pgd = pgd_offset_k(addr);
|
||||
if (pgd_none(*pgd))
|
||||
pgdp = pgd_offset_k(addr);
|
||||
if (pgd_none(READ_ONCE(*pgdp)))
|
||||
return 0;
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (pud_none(*pud))
|
||||
pudp = pud_offset(pgdp, addr);
|
||||
pud = READ_ONCE(*pudp);
|
||||
if (pud_none(pud))
|
||||
return 0;
|
||||
|
||||
if (pud_sect(*pud))
|
||||
return pfn_valid(pud_pfn(*pud));
|
||||
if (pud_sect(pud))
|
||||
return pfn_valid(pud_pfn(pud));
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (pmd_none(*pmd))
|
||||
pmdp = pmd_offset(pudp, addr);
|
||||
pmd = READ_ONCE(*pmdp);
|
||||
if (pmd_none(pmd))
|
||||
return 0;
|
||||
|
||||
if (pmd_sect(*pmd))
|
||||
return pfn_valid(pmd_pfn(*pmd));
|
||||
if (pmd_sect(pmd))
|
||||
return pfn_valid(pmd_pfn(pmd));
|
||||
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
if (pte_none(*pte))
|
||||
ptep = pte_offset_kernel(pmdp, addr);
|
||||
pte = READ_ONCE(*ptep);
|
||||
if (pte_none(pte))
|
||||
return 0;
|
||||
|
||||
return pfn_valid(pte_pfn(*pte));
|
||||
return pfn_valid(pte_pfn(pte));
|
||||
}
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
#if !ARM64_SWAPPER_USES_SECTION_MAPS
|
||||
@ -700,32 +711,32 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
|
||||
{
|
||||
unsigned long addr = start;
|
||||
unsigned long next;
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pgd_t *pgdp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
|
||||
do {
|
||||
next = pmd_addr_end(addr, end);
|
||||
|
||||
pgd = vmemmap_pgd_populate(addr, node);
|
||||
if (!pgd)
|
||||
pgdp = vmemmap_pgd_populate(addr, node);
|
||||
if (!pgdp)
|
||||
return -ENOMEM;
|
||||
|
||||
pud = vmemmap_pud_populate(pgd, addr, node);
|
||||
if (!pud)
|
||||
pudp = vmemmap_pud_populate(pgdp, addr, node);
|
||||
if (!pudp)
|
||||
return -ENOMEM;
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (pmd_none(*pmd)) {
|
||||
pmdp = pmd_offset(pudp, addr);
|
||||
if (pmd_none(READ_ONCE(*pmdp))) {
|
||||
void *p = NULL;
|
||||
|
||||
p = vmemmap_alloc_block_buf(PMD_SIZE, node);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
pmd_set_huge(pmd, __pa(p), __pgprot(PROT_SECT_NORMAL));
|
||||
pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
|
||||
} else
|
||||
vmemmap_verify((pte_t *)pmd, node, addr, next);
|
||||
vmemmap_verify((pte_t *)pmdp, node, addr, next);
|
||||
} while (addr = next, addr != end);
|
||||
|
||||
return 0;
|
||||
@ -739,20 +750,22 @@ void vmemmap_free(unsigned long start, unsigned long end,
|
||||
|
||||
static inline pud_t * fixmap_pud(unsigned long addr)
|
||||
{
|
||||
pgd_t *pgd = pgd_offset_k(addr);
|
||||
pgd_t *pgdp = pgd_offset_k(addr);
|
||||
pgd_t pgd = READ_ONCE(*pgdp);
|
||||
|
||||
BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
|
||||
BUG_ON(pgd_none(pgd) || pgd_bad(pgd));
|
||||
|
||||
return pud_offset_kimg(pgd, addr);
|
||||
return pud_offset_kimg(pgdp, addr);
|
||||
}
|
||||
|
||||
static inline pmd_t * fixmap_pmd(unsigned long addr)
|
||||
{
|
||||
pud_t *pud = fixmap_pud(addr);
|
||||
pud_t *pudp = fixmap_pud(addr);
|
||||
pud_t pud = READ_ONCE(*pudp);
|
||||
|
||||
BUG_ON(pud_none(*pud) || pud_bad(*pud));
|
||||
BUG_ON(pud_none(pud) || pud_bad(pud));
|
||||
|
||||
return pmd_offset_kimg(pud, addr);
|
||||
return pmd_offset_kimg(pudp, addr);
|
||||
}
|
||||
|
||||
static inline pte_t * fixmap_pte(unsigned long addr)
|
||||
@ -768,30 +781,31 @@ static inline pte_t * fixmap_pte(unsigned long addr)
|
||||
*/
|
||||
void __init early_fixmap_init(void)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pgd_t *pgdp, pgd;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
unsigned long addr = FIXADDR_START;
|
||||
|
||||
pgd = pgd_offset_k(addr);
|
||||
pgdp = pgd_offset_k(addr);
|
||||
pgd = READ_ONCE(*pgdp);
|
||||
if (CONFIG_PGTABLE_LEVELS > 3 &&
|
||||
!(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa_symbol(bm_pud))) {
|
||||
!(pgd_none(pgd) || pgd_page_paddr(pgd) == __pa_symbol(bm_pud))) {
|
||||
/*
|
||||
* We only end up here if the kernel mapping and the fixmap
|
||||
* share the top level pgd entry, which should only happen on
|
||||
* 16k/4 levels configurations.
|
||||
*/
|
||||
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
|
||||
pud = pud_offset_kimg(pgd, addr);
|
||||
pudp = pud_offset_kimg(pgdp, addr);
|
||||
} else {
|
||||
if (pgd_none(*pgd))
|
||||
__pgd_populate(pgd, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
|
||||
pud = fixmap_pud(addr);
|
||||
if (pgd_none(pgd))
|
||||
__pgd_populate(pgdp, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
|
||||
pudp = fixmap_pud(addr);
|
||||
}
|
||||
if (pud_none(*pud))
|
||||
__pud_populate(pud, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
|
||||
pmd = fixmap_pmd(addr);
|
||||
__pmd_populate(pmd, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
|
||||
if (pud_none(READ_ONCE(*pudp)))
|
||||
__pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
|
||||
pmdp = fixmap_pmd(addr);
|
||||
__pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
|
||||
|
||||
/*
|
||||
* The boot-ioremap range spans multiple pmds, for which
|
||||
@ -800,11 +814,11 @@ void __init early_fixmap_init(void)
|
||||
BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
|
||||
!= (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
|
||||
|
||||
if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
|
||||
|| pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
|
||||
if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
|
||||
|| pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
|
||||
WARN_ON(1);
|
||||
pr_warn("pmd %p != %p, %p\n",
|
||||
pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
|
||||
pr_warn("pmdp %p != %p, %p\n",
|
||||
pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
|
||||
fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
|
||||
pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
|
||||
fix_to_virt(FIX_BTMAP_BEGIN));
|
||||
@ -824,16 +838,16 @@ void __set_fixmap(enum fixed_addresses idx,
|
||||
phys_addr_t phys, pgprot_t flags)
|
||||
{
|
||||
unsigned long addr = __fix_to_virt(idx);
|
||||
pte_t *pte;
|
||||
pte_t *ptep;
|
||||
|
||||
BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
|
||||
|
||||
pte = fixmap_pte(addr);
|
||||
ptep = fixmap_pte(addr);
|
||||
|
||||
if (pgprot_val(flags)) {
|
||||
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
|
||||
set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
|
||||
} else {
|
||||
pte_clear(&init_mm, addr, pte);
|
||||
pte_clear(&init_mm, addr, ptep);
|
||||
flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
@ -915,36 +929,46 @@ int __init arch_ioremap_pmd_supported(void)
|
||||
return 1;
|
||||
}
|
||||
|
||||
int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
|
||||
int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
|
||||
{
|
||||
pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
|
||||
pgprot_val(mk_sect_prot(prot)));
|
||||
|
||||
/* ioremap_page_range doesn't honour BBM */
|
||||
if (pud_present(READ_ONCE(*pudp)))
|
||||
return 0;
|
||||
|
||||
BUG_ON(phys & ~PUD_MASK);
|
||||
set_pud(pud, pfn_pud(__phys_to_pfn(phys), sect_prot));
|
||||
set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot));
|
||||
return 1;
|
||||
}
|
||||
|
||||
int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
|
||||
int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
|
||||
{
|
||||
pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
|
||||
pgprot_val(mk_sect_prot(prot)));
|
||||
|
||||
/* ioremap_page_range doesn't honour BBM */
|
||||
if (pmd_present(READ_ONCE(*pmdp)))
|
||||
return 0;
|
||||
|
||||
BUG_ON(phys & ~PMD_MASK);
|
||||
set_pmd(pmd, pfn_pmd(__phys_to_pfn(phys), sect_prot));
|
||||
set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot));
|
||||
return 1;
|
||||
}
|
||||
|
||||
int pud_clear_huge(pud_t *pud)
|
||||
int pud_clear_huge(pud_t *pudp)
|
||||
{
|
||||
if (!pud_sect(*pud))
|
||||
if (!pud_sect(READ_ONCE(*pudp)))
|
||||
return 0;
|
||||
pud_clear(pud);
|
||||
pud_clear(pudp);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int pmd_clear_huge(pmd_t *pmd)
|
||||
int pmd_clear_huge(pmd_t *pmdp)
|
||||
{
|
||||
if (!pmd_sect(*pmd))
|
||||
if (!pmd_sect(READ_ONCE(*pmdp)))
|
||||
return 0;
|
||||
pmd_clear(pmd);
|
||||
pmd_clear(pmdp);
|
||||
return 1;
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
|
||||
void *data)
|
||||
{
|
||||
struct page_change_data *cdata = data;
|
||||
pte_t pte = *ptep;
|
||||
pte_t pte = READ_ONCE(*ptep);
|
||||
|
||||
pte = clear_pte_bit(pte, cdata->clear_mask);
|
||||
pte = set_pte_bit(pte, cdata->set_mask);
|
||||
@ -156,30 +156,32 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
|
||||
*/
|
||||
bool kernel_page_present(struct page *page)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
pgd_t *pgdp;
|
||||
pud_t *pudp, pud;
|
||||
pmd_t *pmdp, pmd;
|
||||
pte_t *ptep;
|
||||
unsigned long addr = (unsigned long)page_address(page);
|
||||
|
||||
pgd = pgd_offset_k(addr);
|
||||
if (pgd_none(*pgd))
|
||||
pgdp = pgd_offset_k(addr);
|
||||
if (pgd_none(READ_ONCE(*pgdp)))
|
||||
return false;
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (pud_none(*pud))
|
||||
pudp = pud_offset(pgdp, addr);
|
||||
pud = READ_ONCE(*pudp);
|
||||
if (pud_none(pud))
|
||||
return false;
|
||||
if (pud_sect(*pud))
|
||||
if (pud_sect(pud))
|
||||
return true;
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (pmd_none(*pmd))
|
||||
pmdp = pmd_offset(pudp, addr);
|
||||
pmd = READ_ONCE(*pmdp);
|
||||
if (pmd_none(pmd))
|
||||
return false;
|
||||
if (pmd_sect(*pmd))
|
||||
if (pmd_sect(pmd))
|
||||
return true;
|
||||
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
return pte_valid(*pte);
|
||||
ptep = pte_offset_kernel(pmdp, addr);
|
||||
return pte_valid(READ_ONCE(*ptep));
|
||||
}
|
||||
#endif /* CONFIG_HIBERNATION */
|
||||
#endif /* CONFIG_DEBUG_PAGEALLOC */
|
||||
|
@ -205,7 +205,8 @@ ENDPROC(idmap_cpu_replace_ttbr1)
|
||||
dc cvac, cur_\()\type\()p // Ensure any existing dirty
|
||||
dmb sy // lines are written back before
|
||||
ldr \type, [cur_\()\type\()p] // loading the entry
|
||||
tbz \type, #0, next_\()\type // Skip invalid entries
|
||||
tbz \type, #0, skip_\()\type // Skip invalid and
|
||||
tbnz \type, #11, skip_\()\type // non-global entries
|
||||
.endm
|
||||
|
||||
.macro __idmap_kpti_put_pgtable_ent_ng, type
|
||||
@ -265,8 +266,9 @@ ENTRY(idmap_kpti_install_ng_mappings)
|
||||
add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
|
||||
do_pgd: __idmap_kpti_get_pgtable_ent pgd
|
||||
tbnz pgd, #1, walk_puds
|
||||
__idmap_kpti_put_pgtable_ent_ng pgd
|
||||
next_pgd:
|
||||
__idmap_kpti_put_pgtable_ent_ng pgd
|
||||
skip_pgd:
|
||||
add cur_pgdp, cur_pgdp, #8
|
||||
cmp cur_pgdp, end_pgdp
|
||||
b.ne do_pgd
|
||||
@ -294,8 +296,9 @@ walk_puds:
|
||||
add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
|
||||
do_pud: __idmap_kpti_get_pgtable_ent pud
|
||||
tbnz pud, #1, walk_pmds
|
||||
__idmap_kpti_put_pgtable_ent_ng pud
|
||||
next_pud:
|
||||
__idmap_kpti_put_pgtable_ent_ng pud
|
||||
skip_pud:
|
||||
add cur_pudp, cur_pudp, 8
|
||||
cmp cur_pudp, end_pudp
|
||||
b.ne do_pud
|
||||
@ -314,8 +317,9 @@ walk_pmds:
|
||||
add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
|
||||
do_pmd: __idmap_kpti_get_pgtable_ent pmd
|
||||
tbnz pmd, #1, walk_ptes
|
||||
__idmap_kpti_put_pgtable_ent_ng pmd
|
||||
next_pmd:
|
||||
__idmap_kpti_put_pgtable_ent_ng pmd
|
||||
skip_pmd:
|
||||
add cur_pmdp, cur_pmdp, #8
|
||||
cmp cur_pmdp, end_pmdp
|
||||
b.ne do_pmd
|
||||
@ -333,7 +337,7 @@ walk_ptes:
|
||||
add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
|
||||
do_pte: __idmap_kpti_get_pgtable_ent pte
|
||||
__idmap_kpti_put_pgtable_ent_ng pte
|
||||
next_pte:
|
||||
skip_pte:
|
||||
add cur_ptep, cur_ptep, #8
|
||||
cmp cur_ptep, end_ptep
|
||||
b.ne do_pte
|
||||
|
@ -250,8 +250,9 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
|
||||
off = offsetof(struct bpf_array, map.max_entries);
|
||||
emit_a64_mov_i64(tmp, off, ctx);
|
||||
emit(A64_LDR32(tmp, r2, tmp), ctx);
|
||||
emit(A64_MOV(0, r3, r3), ctx);
|
||||
emit(A64_CMP(0, r3, tmp), ctx);
|
||||
emit(A64_B_(A64_COND_GE, jmp_offset), ctx);
|
||||
emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
|
||||
|
||||
/* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
|
||||
* goto out;
|
||||
@ -259,7 +260,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
|
||||
*/
|
||||
emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
|
||||
emit(A64_CMP(1, tcc, tmp), ctx);
|
||||
emit(A64_B_(A64_COND_GT, jmp_offset), ctx);
|
||||
emit(A64_B_(A64_COND_HI, jmp_offset), ctx);
|
||||
emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
|
||||
|
||||
/* prog = array->ptrs[index];
|
||||
|
@ -44,18 +44,25 @@ struct bug_frame {
|
||||
* not be used like this with newer versions of gcc.
|
||||
*/
|
||||
#define BUG() \
|
||||
do { \
|
||||
__asm__ __volatile__ ("clear.d [" __stringify(BUG_MAGIC) "]\n\t"\
|
||||
"movu.w " __stringify(__LINE__) ",$r0\n\t"\
|
||||
"jump 0f\n\t" \
|
||||
".section .rodata\n" \
|
||||
"0:\t.string \"" __FILE__ "\"\n\t" \
|
||||
".previous")
|
||||
".previous"); \
|
||||
unreachable(); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
||||
/* This just causes an oops. */
|
||||
#define BUG() (*(int *)0 = 0)
|
||||
#define BUG() \
|
||||
do { \
|
||||
barrier_before_unreachable(); \
|
||||
__builtin_trap(); \
|
||||
} while (0)
|
||||
|
||||
#endif
|
||||
|
||||
|
@ -4,7 +4,11 @@
|
||||
|
||||
#ifdef CONFIG_BUG
|
||||
#define ia64_abort() __builtin_trap()
|
||||
#define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); ia64_abort(); } while (0)
|
||||
#define BUG() do { \
|
||||
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
|
||||
barrier_before_unreachable(); \
|
||||
ia64_abort(); \
|
||||
} while (0)
|
||||
|
||||
/* should this BUG be made generic? */
|
||||
#define HAVE_ARCH_BUG
|
||||
|
@ -41,7 +41,6 @@ ifneq ($(CONFIG_IA64_ESI),)
|
||||
obj-y += esi_stub.o # must be in kernel proper
|
||||
endif
|
||||
obj-$(CONFIG_INTEL_IOMMU) += pci-dma.o
|
||||
obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
|
||||
|
||||
obj-$(CONFIG_BINFMT_ELF) += elfcore.o
|
||||
|
||||
|
@ -8,16 +8,19 @@
|
||||
#ifndef CONFIG_SUN3
|
||||
#define BUG() do { \
|
||||
pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
|
||||
barrier_before_unreachable(); \
|
||||
__builtin_trap(); \
|
||||
} while (0)
|
||||
#else
|
||||
#define BUG() do { \
|
||||
pr_crit("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
|
||||
barrier_before_unreachable(); \
|
||||
panic("BUG!"); \
|
||||
} while (0)
|
||||
#endif
|
||||
#else
|
||||
#define BUG() do { \
|
||||
barrier_before_unreachable(); \
|
||||
__builtin_trap(); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
@ -126,6 +126,7 @@ $(obj)/vmlinux.its.S: $(addprefix $(srctree)/arch/mips/$(PLATFORM)/,$(ITS_INPUTS
|
||||
|
||||
quiet_cmd_cpp_its_S = ITS $@
|
||||
cmd_cpp_its_S = $(CPP) $(cpp_flags) -P -C -o $@ $< \
|
||||
-D__ASSEMBLY__ \
|
||||
-DKERNEL_NAME="\"Linux $(KERNELRELEASE)\"" \
|
||||
-DVMLINUX_BINARY="\"$(3)\"" \
|
||||
-DVMLINUX_COMPRESSION="\"$(2)\"" \
|
||||
|
@ -86,7 +86,6 @@ struct compat_flock {
|
||||
compat_off_t l_len;
|
||||
s32 l_sysid;
|
||||
compat_pid_t l_pid;
|
||||
short __unused;
|
||||
s32 pad[4];
|
||||
};
|
||||
|
||||
|
@ -10,6 +10,8 @@
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include <asm/mips-cps.h>
|
||||
@ -22,6 +24,17 @@ static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
|
||||
|
||||
phys_addr_t __weak mips_cpc_default_phys_base(void)
|
||||
{
|
||||
struct device_node *cpc_node;
|
||||
struct resource res;
|
||||
int err;
|
||||
|
||||
cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc");
|
||||
if (cpc_node) {
|
||||
err = of_address_to_resource(cpc_node, 0, &res);
|
||||
if (!err)
|
||||
return res.start;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -375,6 +375,7 @@ static void __init bootmem_init(void)
|
||||
unsigned long reserved_end;
|
||||
unsigned long mapstart = ~0UL;
|
||||
unsigned long bootmap_size;
|
||||
phys_addr_t ramstart = (phys_addr_t)ULLONG_MAX;
|
||||
bool bootmap_valid = false;
|
||||
int i;
|
||||
|
||||
@ -395,7 +396,8 @@ static void __init bootmem_init(void)
|
||||
max_low_pfn = 0;
|
||||
|
||||
/*
|
||||
* Find the highest page frame number we have available.
|
||||
* Find the highest page frame number we have available
|
||||
* and the lowest used RAM address
|
||||
*/
|
||||
for (i = 0; i < boot_mem_map.nr_map; i++) {
|
||||
unsigned long start, end;
|
||||
@ -407,6 +409,8 @@ static void __init bootmem_init(void)
|
||||
end = PFN_DOWN(boot_mem_map.map[i].addr
|
||||
+ boot_mem_map.map[i].size);
|
||||
|
||||
ramstart = min(ramstart, boot_mem_map.map[i].addr);
|
||||
|
||||
#ifndef CONFIG_HIGHMEM
|
||||
/*
|
||||
* Skip highmem here so we get an accurate max_low_pfn if low
|
||||
@ -436,6 +440,13 @@ static void __init bootmem_init(void)
|
||||
mapstart = max(reserved_end, start);
|
||||
}
|
||||
|
||||
/*
|
||||
* Reserve any memory between the start of RAM and PHYS_OFFSET
|
||||
*/
|
||||
if (ramstart > PHYS_OFFSET)
|
||||
add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET,
|
||||
BOOT_MEM_RESERVED);
|
||||
|
||||
if (min_low_pfn >= max_low_pfn)
|
||||
panic("Incorrect memory mapping !!!");
|
||||
if (min_low_pfn > ARCH_PFN_OFFSET) {
|
||||
@ -664,9 +675,6 @@ static int __init early_parse_mem(char *p)
|
||||
|
||||
add_memory_region(start, size, BOOT_MEM_RAM);
|
||||
|
||||
if (start && start > PHYS_OFFSET)
|
||||
add_memory_region(PHYS_OFFSET, start - PHYS_OFFSET,
|
||||
BOOT_MEM_RESERVED);
|
||||
return 0;
|
||||
}
|
||||
early_param("mem", early_parse_mem);
|
||||
|
@ -572,7 +572,7 @@ asmlinkage void __weak plat_wired_tlb_setup(void)
|
||||
*/
|
||||
}
|
||||
|
||||
void __init bmips_cpu_setup(void)
|
||||
void bmips_cpu_setup(void)
|
||||
{
|
||||
void __iomem __maybe_unused *cbr = BMIPS_GET_CBR();
|
||||
u32 __maybe_unused cfg;
|
||||
|
@ -16,6 +16,7 @@
|
||||
#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
|
||||
|
||||
#define PMD_CACHE_INDEX PMD_INDEX_SIZE
|
||||
#define PUD_CACHE_INDEX PUD_INDEX_SIZE
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
|
||||
|
@ -63,7 +63,8 @@ static inline int hash__hugepd_ok(hugepd_t hpd)
|
||||
* keeping the prototype consistent across the two formats.
|
||||
*/
|
||||
static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
|
||||
unsigned int subpg_index, unsigned long hidx)
|
||||
unsigned int subpg_index, unsigned long hidx,
|
||||
int offset)
|
||||
{
|
||||
return (hidx << H_PAGE_F_GIX_SHIFT) &
|
||||
(H_PAGE_F_SECOND | H_PAGE_F_GIX);
|
||||
|
@ -45,7 +45,7 @@
|
||||
* generic accessors and iterators here
|
||||
*/
|
||||
#define __real_pte __real_pte
|
||||
static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
|
||||
static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep, int offset)
|
||||
{
|
||||
real_pte_t rpte;
|
||||
unsigned long *hidxp;
|
||||
@ -59,7 +59,7 @@ static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep)
|
||||
*/
|
||||
smp_rmb();
|
||||
|
||||
hidxp = (unsigned long *)(ptep + PTRS_PER_PTE);
|
||||
hidxp = (unsigned long *)(ptep + offset);
|
||||
rpte.hidx = *hidxp;
|
||||
return rpte;
|
||||
}
|
||||
@ -86,9 +86,10 @@ static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index)
|
||||
* expected to modify the PTE bits accordingly and commit the PTE to memory.
|
||||
*/
|
||||
static inline unsigned long pte_set_hidx(pte_t *ptep, real_pte_t rpte,
|
||||
unsigned int subpg_index, unsigned long hidx)
|
||||
unsigned int subpg_index,
|
||||
unsigned long hidx, int offset)
|
||||
{
|
||||
unsigned long *hidxp = (unsigned long *)(ptep + PTRS_PER_PTE);
|
||||
unsigned long *hidxp = (unsigned long *)(ptep + offset);
|
||||
|
||||
rpte.hidx &= ~HIDX_BITS(0xfUL, subpg_index);
|
||||
*hidxp = rpte.hidx | HIDX_BITS(HIDX_SHIFT_BY_ONE(hidx), subpg_index);
|
||||
@ -140,13 +141,18 @@ static inline int hash__remap_4k_pfn(struct vm_area_struct *vma, unsigned long a
|
||||
}
|
||||
|
||||
#define H_PTE_TABLE_SIZE PTE_FRAG_SIZE
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined (CONFIG_HUGETLB_PAGE)
|
||||
#define H_PMD_TABLE_SIZE ((sizeof(pmd_t) << PMD_INDEX_SIZE) + \
|
||||
(sizeof(unsigned long) << PMD_INDEX_SIZE))
|
||||
#else
|
||||
#define H_PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE)
|
||||
#endif
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
#define H_PUD_TABLE_SIZE ((sizeof(pud_t) << PUD_INDEX_SIZE) + \
|
||||
(sizeof(unsigned long) << PUD_INDEX_SIZE))
|
||||
#else
|
||||
#define H_PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE)
|
||||
#endif
|
||||
#define H_PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE)
|
||||
|
||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||
|
@ -23,7 +23,8 @@
|
||||
H_PUD_INDEX_SIZE + H_PGD_INDEX_SIZE + PAGE_SHIFT)
|
||||
#define H_PGTABLE_RANGE (ASM_CONST(1) << H_PGTABLE_EADDR_SIZE)
|
||||
|
||||
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_PPC_64K_PAGES)
|
||||
#if (defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)) && \
|
||||
defined(CONFIG_PPC_64K_PAGES)
|
||||
/*
|
||||
* only with hash 64k we need to use the second half of pmd page table
|
||||
* to store pointer to deposited pgtable_t
|
||||
@ -32,6 +33,16 @@
|
||||
#else
|
||||
#define H_PMD_CACHE_INDEX H_PMD_INDEX_SIZE
|
||||
#endif
|
||||
/*
|
||||
* We store the slot details in the second half of page table.
|
||||
* Increase the pud level table so that hugetlb ptes can be stored
|
||||
* at pud level.
|
||||
*/
|
||||
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES)
|
||||
#define H_PUD_CACHE_INDEX (H_PUD_INDEX_SIZE + 1)
|
||||
#else
|
||||
#define H_PUD_CACHE_INDEX (H_PUD_INDEX_SIZE)
|
||||
#endif
|
||||
/*
|
||||
* Define the address range of the kernel non-linear virtual area
|
||||
*/
|
||||
|
@ -73,10 +73,16 @@ static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
|
||||
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
|
||||
if (radix_enabled())
|
||||
return radix__pgd_alloc(mm);
|
||||
return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
|
||||
pgtable_gfp_flags(mm, GFP_KERNEL));
|
||||
|
||||
pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE),
|
||||
pgtable_gfp_flags(mm, GFP_KERNEL));
|
||||
memset(pgd, 0, PGD_TABLE_SIZE);
|
||||
|
||||
return pgd;
|
||||
}
|
||||
|
||||
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
@ -93,13 +99,13 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
|
||||
|
||||
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE),
|
||||
return kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX),
|
||||
pgtable_gfp_flags(mm, GFP_KERNEL));
|
||||
}
|
||||
|
||||
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
||||
{
|
||||
kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud);
|
||||
kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud);
|
||||
}
|
||||
|
||||
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
||||
@ -115,7 +121,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
|
||||
* ahead and flush the page walk cache
|
||||
*/
|
||||
flush_tlb_pgtable(tlb, address);
|
||||
pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE);
|
||||
pgtable_free_tlb(tlb, pud, PUD_CACHE_INDEX);
|
||||
}
|
||||
|
||||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
||||
|
@ -232,11 +232,13 @@ extern unsigned long __pmd_index_size;
|
||||
extern unsigned long __pud_index_size;
|
||||
extern unsigned long __pgd_index_size;
|
||||
extern unsigned long __pmd_cache_index;
|
||||
extern unsigned long __pud_cache_index;
|
||||
#define PTE_INDEX_SIZE __pte_index_size
|
||||
#define PMD_INDEX_SIZE __pmd_index_size
|
||||
#define PUD_INDEX_SIZE __pud_index_size
|
||||
#define PGD_INDEX_SIZE __pgd_index_size
|
||||
#define PMD_CACHE_INDEX __pmd_cache_index
|
||||
#define PUD_CACHE_INDEX __pud_cache_index
|
||||
/*
|
||||
* Because of use of pte fragments and THP, size of page table
|
||||
* are not always derived out of index size above.
|
||||
@ -348,7 +350,7 @@ extern unsigned long pci_io_base;
|
||||
*/
|
||||
#ifndef __real_pte
|
||||
|
||||
#define __real_pte(e,p) ((real_pte_t){(e)})
|
||||
#define __real_pte(e, p, o) ((real_pte_t){(e)})
|
||||
#define __rpte_to_pte(r) ((r).pte)
|
||||
#define __rpte_to_hidx(r,index) (pte_val(__rpte_to_pte(r)) >> H_PAGE_F_GIX_SHIFT)
|
||||
|
||||
|
@ -645,7 +645,7 @@ END_FTR_SECTION_NESTED(ftr,ftr,943)
|
||||
EXC_HV, SOFTEN_TEST_HV, bitmask)
|
||||
|
||||
#define MASKABLE_RELON_EXCEPTION_HV_OOL(vec, label, bitmask) \
|
||||
MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_NOTEST_HV, vec, bitmask);\
|
||||
MASKABLE_EXCEPTION_PROLOG_1(PACA_EXGEN, SOFTEN_TEST_HV, vec, bitmask);\
|
||||
EXCEPTION_RELON_PROLOG_PSERIES_1(label, EXC_HV)
|
||||
|
||||
/*
|
||||
|
@ -52,7 +52,7 @@
|
||||
#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
|
||||
#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000)
|
||||
#define FW_FEATURE_DRMEM_V2 ASM_CONST(0x0000000400000000)
|
||||
#define FW_FEATURE_DRC_INFO ASM_CONST(0x0000000400000000)
|
||||
#define FW_FEATURE_DRC_INFO ASM_CONST(0x0000000800000000)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
@ -29,6 +29,16 @@
|
||||
#define PACA_IRQ_HMI 0x20
|
||||
#define PACA_IRQ_PMI 0x40
|
||||
|
||||
/*
|
||||
* Some soft-masked interrupts must be hard masked until they are replayed
|
||||
* (e.g., because the soft-masked handler does not clear the exception).
|
||||
*/
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE|PACA_IRQ_PMI)
|
||||
#else
|
||||
#define PACA_IRQ_MUST_HARD_MASK (PACA_IRQ_EE)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* flags for paca->irq_soft_mask
|
||||
*/
|
||||
@ -244,7 +254,7 @@ static inline bool lazy_irq_pending(void)
|
||||
static inline void may_hard_irq_enable(void)
|
||||
{
|
||||
get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
|
||||
if (!(get_paca()->irq_happened & PACA_IRQ_EE))
|
||||
if (!(get_paca()->irq_happened & PACA_IRQ_MUST_HARD_MASK))
|
||||
__hard_irq_enable();
|
||||
}
|
||||
|
||||
|
@ -140,6 +140,12 @@ static inline bool kdump_in_progress(void)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void crash_ipi_callback(struct pt_regs *regs) { }
|
||||
|
||||
static inline void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* CONFIG_KEXEC_CORE */
|
||||
#endif /* ! __ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
|
@ -24,6 +24,7 @@ extern int icache_44x_need_flush;
|
||||
#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT)
|
||||
|
||||
#define PMD_CACHE_INDEX PMD_INDEX_SIZE
|
||||
#define PUD_CACHE_INDEX PUD_INDEX_SIZE
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE)
|
||||
|
@ -27,6 +27,7 @@
|
||||
#else
|
||||
#define PMD_CACHE_INDEX PMD_INDEX_SIZE
|
||||
#endif
|
||||
#define PUD_CACHE_INDEX PUD_INDEX_SIZE
|
||||
|
||||
/*
|
||||
* Define the address range of the kernel non-linear virtual area
|
||||
|
@ -44,6 +44,11 @@ extern int sysfs_add_device_to_node(struct device *dev, int nid);
|
||||
extern void sysfs_remove_device_from_node(struct device *dev, int nid);
|
||||
extern int numa_update_cpu_topology(bool cpus_locked);
|
||||
|
||||
static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node)
|
||||
{
|
||||
numa_cpu_lookup_table[cpu] = node;
|
||||
}
|
||||
|
||||
static inline int early_cpu_to_node(int cpu)
|
||||
{
|
||||
int nid;
|
||||
@ -76,12 +81,16 @@ static inline int numa_update_cpu_topology(bool cpus_locked)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {}
|
||||
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
|
||||
extern int start_topology_update(void);
|
||||
extern int stop_topology_update(void);
|
||||
extern int prrn_is_enabled(void);
|
||||
extern int find_and_online_cpu_nid(int cpu);
|
||||
#else
|
||||
static inline int start_topology_update(void)
|
||||
{
|
||||
@ -95,6 +104,10 @@ static inline int prrn_is_enabled(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int find_and_online_cpu_nid(int cpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
|
||||
|
||||
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_NEED_MULTIPLE_NODES)
|
||||
|
@ -384,7 +384,8 @@ static void *eeh_report_resume(void *data, void *userdata)
|
||||
eeh_pcid_put(dev);
|
||||
pci_uevent_ers(dev, PCI_ERS_RESULT_RECOVERED);
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
eeh_ops->notify_resume(eeh_dev_to_pdn(edev));
|
||||
if (eeh_ops->notify_resume && eeh_dev_to_pdn(edev))
|
||||
eeh_ops->notify_resume(eeh_dev_to_pdn(edev));
|
||||
#endif
|
||||
return NULL;
|
||||
}
|
||||
|
@ -943,6 +943,8 @@ kernel_dbg_exc:
|
||||
/*
|
||||
* An interrupt came in while soft-disabled; We mark paca->irq_happened
|
||||
* accordingly and if the interrupt is level sensitive, we hard disable
|
||||
* hard disable (full_mask) corresponds to PACA_IRQ_MUST_HARD_MASK, so
|
||||
* keep these in synch.
|
||||
*/
|
||||
|
||||
.macro masked_interrupt_book3e paca_irq full_mask
|
||||
|
@ -1426,7 +1426,7 @@ EXC_COMMON_BEGIN(soft_nmi_common)
|
||||
* triggered and won't automatically refire.
|
||||
* - If it was a HMI we return immediately since we handled it in realmode
|
||||
* and it won't refire.
|
||||
* - else we hard disable and return.
|
||||
* - Else it is one of PACA_IRQ_MUST_HARD_MASK, so hard disable and return.
|
||||
* This is called with r10 containing the value to OR to the paca field.
|
||||
*/
|
||||
#define MASKED_INTERRUPT(_H) \
|
||||
@ -1441,8 +1441,8 @@ masked_##_H##interrupt: \
|
||||
ori r10,r10,0xffff; \
|
||||
mtspr SPRN_DEC,r10; \
|
||||
b MASKED_DEC_HANDLER_LABEL; \
|
||||
1: andi. r10,r10,(PACA_IRQ_DBELL|PACA_IRQ_HMI); \
|
||||
bne 2f; \
|
||||
1: andi. r10,r10,PACA_IRQ_MUST_HARD_MASK; \
|
||||
beq 2f; \
|
||||
mfspr r10,SPRN_##_H##SRR1; \
|
||||
xori r10,r10,MSR_EE; /* clear MSR_EE */ \
|
||||
mtspr SPRN_##_H##SRR1,r10; \
|
||||
|
@ -874,7 +874,7 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = {
|
||||
.mmu = 0,
|
||||
.hash_ext = 0,
|
||||
.radix_ext = 0,
|
||||
.byte22 = OV5_FEAT(OV5_DRC_INFO),
|
||||
.byte22 = 0,
|
||||
},
|
||||
|
||||
/* option vector 6: IBM PAPR hints */
|
||||
|
@ -788,7 +788,8 @@ static int register_cpu_online(unsigned int cpu)
|
||||
if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
|
||||
device_create_file(s, &dev_attr_pir);
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_206))
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_206) &&
|
||||
!firmware_has_feature(FW_FEATURE_LPAR))
|
||||
device_create_file(s, &dev_attr_tscr);
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
@ -873,7 +874,8 @@ static int unregister_cpu_online(unsigned int cpu)
|
||||
if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
|
||||
device_remove_file(s, &dev_attr_pir);
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_206))
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_206) &&
|
||||
!firmware_has_feature(FW_FEATURE_LPAR))
|
||||
device_remove_file(s, &dev_attr_tscr);
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
|
@ -188,7 +188,7 @@ static int xive_provision_queue(struct kvm_vcpu *vcpu, u8 prio)
|
||||
if (!qpage) {
|
||||
pr_err("Failed to allocate queue %d for VCPU %d\n",
|
||||
prio, xc->server_num);
|
||||
return -ENOMEM;;
|
||||
return -ENOMEM;
|
||||
}
|
||||
memset(qpage, 0, 1 << xive->q_order);
|
||||
|
||||
|
@ -98,7 +98,7 @@ static void init_drconf_v2_cell(struct of_drconf_cell_v2 *dr_cell,
|
||||
dr_cell->base_addr = cpu_to_be64(lmb->base_addr);
|
||||
dr_cell->drc_index = cpu_to_be32(lmb->drc_index);
|
||||
dr_cell->aa_index = cpu_to_be32(lmb->aa_index);
|
||||
dr_cell->flags = cpu_to_be32(lmb->flags);
|
||||
dr_cell->flags = cpu_to_be32(drmem_lmb_flags(lmb));
|
||||
}
|
||||
|
||||
static int drmem_update_dt_v2(struct device_node *memory,
|
||||
@ -121,7 +121,7 @@ static int drmem_update_dt_v2(struct device_node *memory,
|
||||
}
|
||||
|
||||
if (prev_lmb->aa_index != lmb->aa_index ||
|
||||
prev_lmb->flags != lmb->flags)
|
||||
drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb))
|
||||
lmb_sets++;
|
||||
|
||||
prev_lmb = lmb;
|
||||
@ -150,7 +150,7 @@ static int drmem_update_dt_v2(struct device_node *memory,
|
||||
}
|
||||
|
||||
if (prev_lmb->aa_index != lmb->aa_index ||
|
||||
prev_lmb->flags != lmb->flags) {
|
||||
drmem_lmb_flags(prev_lmb) != drmem_lmb_flags(lmb)) {
|
||||
/* end of one set, start of another */
|
||||
dr_cell->seq_lmbs = cpu_to_be32(seq_lmbs);
|
||||
dr_cell++;
|
||||
@ -216,6 +216,8 @@ static void __init __walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm,
|
||||
u32 i, n_lmbs;
|
||||
|
||||
n_lmbs = of_read_number(prop++, 1);
|
||||
if (n_lmbs == 0)
|
||||
return;
|
||||
|
||||
for (i = 0; i < n_lmbs; i++) {
|
||||
read_drconf_v1_cell(&lmb, &prop);
|
||||
@ -245,6 +247,8 @@ static void __init __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm,
|
||||
u32 i, j, lmb_sets;
|
||||
|
||||
lmb_sets = of_read_number(prop++, 1);
|
||||
if (lmb_sets == 0)
|
||||
return;
|
||||
|
||||
for (i = 0; i < lmb_sets; i++) {
|
||||
read_drconf_v2_cell(&dr_cell, &prop);
|
||||
@ -354,6 +358,8 @@ static void __init init_drmem_v1_lmbs(const __be32 *prop)
|
||||
struct drmem_lmb *lmb;
|
||||
|
||||
drmem_info->n_lmbs = of_read_number(prop++, 1);
|
||||
if (drmem_info->n_lmbs == 0)
|
||||
return;
|
||||
|
||||
drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
|
||||
GFP_KERNEL);
|
||||
@ -373,6 +379,8 @@ static void __init init_drmem_v2_lmbs(const __be32 *prop)
|
||||
int lmb_index;
|
||||
|
||||
lmb_sets = of_read_number(prop++, 1);
|
||||
if (lmb_sets == 0)
|
||||
return;
|
||||
|
||||
/* first pass, calculate the number of LMBs */
|
||||
p = prop;
|
||||
|
@ -55,7 +55,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
|
||||
* need to add in 0x1 if it's a read-only user page
|
||||
*/
|
||||
rflags = htab_convert_pte_flags(new_pte);
|
||||
rpte = __real_pte(__pte(old_pte), ptep);
|
||||
rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
|
||||
!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
|
||||
@ -117,7 +117,7 @@ repeat:
|
||||
return -1;
|
||||
}
|
||||
new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
|
||||
new_pte |= pte_set_hidx(ptep, rpte, 0, slot);
|
||||
new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE);
|
||||
}
|
||||
*ptep = __pte(new_pte & ~H_PAGE_BUSY);
|
||||
return 0;
|
||||
|
@ -86,7 +86,7 @@ int __hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
|
||||
|
||||
subpg_index = (ea & (PAGE_SIZE - 1)) >> shift;
|
||||
vpn = hpt_vpn(ea, vsid, ssize);
|
||||
rpte = __real_pte(__pte(old_pte), ptep);
|
||||
rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
|
||||
/*
|
||||
*None of the sub 4k page is hashed
|
||||
*/
|
||||
@ -214,7 +214,7 @@ repeat:
|
||||
return -1;
|
||||
}
|
||||
|
||||
new_pte |= pte_set_hidx(ptep, rpte, subpg_index, slot);
|
||||
new_pte |= pte_set_hidx(ptep, rpte, subpg_index, slot, PTRS_PER_PTE);
|
||||
new_pte |= H_PAGE_HASHPTE;
|
||||
|
||||
*ptep = __pte(new_pte & ~H_PAGE_BUSY);
|
||||
@ -262,7 +262,7 @@ int __hash_page_64K(unsigned long ea, unsigned long access,
|
||||
} while (!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
|
||||
|
||||
rflags = htab_convert_pte_flags(new_pte);
|
||||
rpte = __real_pte(__pte(old_pte), ptep);
|
||||
rpte = __real_pte(__pte(old_pte), ptep, PTRS_PER_PTE);
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_NOEXECUTE) &&
|
||||
!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
|
||||
@ -327,7 +327,7 @@ repeat:
|
||||
}
|
||||
|
||||
new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | H_PAGE_HASHPTE;
|
||||
new_pte |= pte_set_hidx(ptep, rpte, 0, slot);
|
||||
new_pte |= pte_set_hidx(ptep, rpte, 0, slot, PTRS_PER_PTE);
|
||||
}
|
||||
*ptep = __pte(new_pte & ~H_PAGE_BUSY);
|
||||
return 0;
|
||||
|
@ -1008,6 +1008,7 @@ void __init hash__early_init_mmu(void)
|
||||
__pmd_index_size = H_PMD_INDEX_SIZE;
|
||||
__pud_index_size = H_PUD_INDEX_SIZE;
|
||||
__pgd_index_size = H_PGD_INDEX_SIZE;
|
||||
__pud_cache_index = H_PUD_CACHE_INDEX;
|
||||
__pmd_cache_index = H_PMD_CACHE_INDEX;
|
||||
__pte_table_size = H_PTE_TABLE_SIZE;
|
||||
__pmd_table_size = H_PMD_TABLE_SIZE;
|
||||
|
@ -27,7 +27,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
|
||||
unsigned long vpn;
|
||||
unsigned long old_pte, new_pte;
|
||||
unsigned long rflags, pa, sz;
|
||||
long slot;
|
||||
long slot, offset;
|
||||
|
||||
BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
|
||||
|
||||
@ -63,7 +63,11 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
|
||||
} while(!pte_xchg(ptep, __pte(old_pte), __pte(new_pte)));
|
||||
|
||||
rflags = htab_convert_pte_flags(new_pte);
|
||||
rpte = __real_pte(__pte(old_pte), ptep);
|
||||
if (unlikely(mmu_psize == MMU_PAGE_16G))
|
||||
offset = PTRS_PER_PUD;
|
||||
else
|
||||
offset = PTRS_PER_PMD;
|
||||
rpte = __real_pte(__pte(old_pte), ptep, offset);
|
||||
|
||||
sz = ((1UL) << shift);
|
||||
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
|
||||
@ -104,7 +108,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
|
||||
return -1;
|
||||
}
|
||||
|
||||
new_pte |= pte_set_hidx(ptep, rpte, 0, slot);
|
||||
new_pte |= pte_set_hidx(ptep, rpte, 0, slot, offset);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -100,6 +100,6 @@ void pgtable_cache_init(void)
|
||||
* same size as either the pgd or pmd index except with THP enabled
|
||||
* on book3s 64
|
||||
*/
|
||||
if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
|
||||
pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
|
||||
if (PUD_CACHE_INDEX && !PGT_CACHE(PUD_CACHE_INDEX))
|
||||
pgtable_cache_add(PUD_CACHE_INDEX, pud_ctor);
|
||||
}
|
||||
|
@ -143,11 +143,6 @@ static void reset_numa_cpu_lookup_table(void)
|
||||
numa_cpu_lookup_table[cpu] = -1;
|
||||
}
|
||||
|
||||
static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
|
||||
{
|
||||
numa_cpu_lookup_table[cpu] = node;
|
||||
}
|
||||
|
||||
static void map_cpu_to_node(int cpu, int node)
|
||||
{
|
||||
update_numa_cpu_lookup_table(cpu, node);
|
||||
|
@ -17,9 +17,11 @@
|
||||
#include <linux/of_fdt.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/string_helpers.h>
|
||||
#include <linux/stop_machine.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/mmu.h>
|
||||
@ -333,6 +335,22 @@ static void __init radix_init_pgtable(void)
|
||||
"r" (TLBIEL_INVAL_SET_LPID), "r" (0));
|
||||
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
|
||||
trace_tlbie(0, 0, TLBIEL_INVAL_SET_LPID, 0, 2, 1, 1);
|
||||
|
||||
/*
|
||||
* The init_mm context is given the first available (non-zero) PID,
|
||||
* which is the "guard PID" and contains no page table. PIDR should
|
||||
* never be set to zero because that duplicates the kernel address
|
||||
* space at the 0x0... offset (quadrant 0)!
|
||||
*
|
||||
* An arbitrary PID that may later be allocated by the PID allocator
|
||||
* for userspace processes must not be used either, because that
|
||||
* would cause stale user mappings for that PID on CPUs outside of
|
||||
* the TLB invalidation scheme (because it won't be in mm_cpumask).
|
||||
*
|
||||
* So permanently carve out one PID for the purpose of a guard PID.
|
||||
*/
|
||||
init_mm.context.id = mmu_base_pid;
|
||||
mmu_base_pid++;
|
||||
}
|
||||
|
||||
static void __init radix_init_partition_table(void)
|
||||
@ -535,6 +553,7 @@ void __init radix__early_init_mmu(void)
|
||||
__pmd_index_size = RADIX_PMD_INDEX_SIZE;
|
||||
__pud_index_size = RADIX_PUD_INDEX_SIZE;
|
||||
__pgd_index_size = RADIX_PGD_INDEX_SIZE;
|
||||
__pud_cache_index = RADIX_PUD_INDEX_SIZE;
|
||||
__pmd_cache_index = RADIX_PMD_INDEX_SIZE;
|
||||
__pte_table_size = RADIX_PTE_TABLE_SIZE;
|
||||
__pmd_table_size = RADIX_PMD_TABLE_SIZE;
|
||||
@ -579,7 +598,8 @@ void __init radix__early_init_mmu(void)
|
||||
|
||||
radix_init_iamr();
|
||||
radix_init_pgtable();
|
||||
|
||||
/* Switch to the guard PID before turning on MMU */
|
||||
radix__switch_mmu_context(NULL, &init_mm);
|
||||
if (cpu_has_feature(CPU_FTR_HVMODE))
|
||||
tlbiel_all();
|
||||
}
|
||||
@ -604,6 +624,7 @@ void radix__early_init_mmu_secondary(void)
|
||||
}
|
||||
radix_init_iamr();
|
||||
|
||||
radix__switch_mmu_context(NULL, &init_mm);
|
||||
if (cpu_has_feature(CPU_FTR_HVMODE))
|
||||
tlbiel_all();
|
||||
}
|
||||
@ -666,6 +687,30 @@ static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
|
||||
pud_clear(pud);
|
||||
}
|
||||
|
||||
struct change_mapping_params {
|
||||
pte_t *pte;
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
unsigned long aligned_start;
|
||||
unsigned long aligned_end;
|
||||
};
|
||||
|
||||
static int stop_machine_change_mapping(void *data)
|
||||
{
|
||||
struct change_mapping_params *params =
|
||||
(struct change_mapping_params *)data;
|
||||
|
||||
if (!data)
|
||||
return -1;
|
||||
|
||||
spin_unlock(&init_mm.page_table_lock);
|
||||
pte_clear(&init_mm, params->aligned_start, params->pte);
|
||||
create_physical_mapping(params->aligned_start, params->start);
|
||||
create_physical_mapping(params->end, params->aligned_end);
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void remove_pte_table(pte_t *pte_start, unsigned long addr,
|
||||
unsigned long end)
|
||||
{
|
||||
@ -694,6 +739,52 @@ static void remove_pte_table(pte_t *pte_start, unsigned long addr,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* clear the pte and potentially split the mapping helper
|
||||
*/
|
||||
static void split_kernel_mapping(unsigned long addr, unsigned long end,
|
||||
unsigned long size, pte_t *pte)
|
||||
{
|
||||
unsigned long mask = ~(size - 1);
|
||||
unsigned long aligned_start = addr & mask;
|
||||
unsigned long aligned_end = addr + size;
|
||||
struct change_mapping_params params;
|
||||
bool split_region = false;
|
||||
|
||||
if ((end - addr) < size) {
|
||||
/*
|
||||
* We're going to clear the PTE, but not flushed
|
||||
* the mapping, time to remap and flush. The
|
||||
* effects if visible outside the processor or
|
||||
* if we are running in code close to the
|
||||
* mapping we cleared, we are in trouble.
|
||||
*/
|
||||
if (overlaps_kernel_text(aligned_start, addr) ||
|
||||
overlaps_kernel_text(end, aligned_end)) {
|
||||
/*
|
||||
* Hack, just return, don't pte_clear
|
||||
*/
|
||||
WARN_ONCE(1, "Linear mapping %lx->%lx overlaps kernel "
|
||||
"text, not splitting\n", addr, end);
|
||||
return;
|
||||
}
|
||||
split_region = true;
|
||||
}
|
||||
|
||||
if (split_region) {
|
||||
params.pte = pte;
|
||||
params.start = addr;
|
||||
params.end = end;
|
||||
params.aligned_start = addr & ~(size - 1);
|
||||
params.aligned_end = min_t(unsigned long, aligned_end,
|
||||
(unsigned long)__va(memblock_end_of_DRAM()));
|
||||
stop_machine(stop_machine_change_mapping, ¶ms, NULL);
|
||||
return;
|
||||
}
|
||||
|
||||
pte_clear(&init_mm, addr, pte);
|
||||
}
|
||||
|
||||
static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
|
||||
unsigned long end)
|
||||
{
|
||||
@ -709,13 +800,7 @@ static void remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
|
||||
continue;
|
||||
|
||||
if (pmd_huge(*pmd)) {
|
||||
if (!IS_ALIGNED(addr, PMD_SIZE) ||
|
||||
!IS_ALIGNED(next, PMD_SIZE)) {
|
||||
WARN_ONCE(1, "%s: unaligned range\n", __func__);
|
||||
continue;
|
||||
}
|
||||
|
||||
pte_clear(&init_mm, addr, (pte_t *)pmd);
|
||||
split_kernel_mapping(addr, end, PMD_SIZE, (pte_t *)pmd);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -740,13 +825,7 @@ static void remove_pud_table(pud_t *pud_start, unsigned long addr,
|
||||
continue;
|
||||
|
||||
if (pud_huge(*pud)) {
|
||||
if (!IS_ALIGNED(addr, PUD_SIZE) ||
|
||||
!IS_ALIGNED(next, PUD_SIZE)) {
|
||||
WARN_ONCE(1, "%s: unaligned range\n", __func__);
|
||||
continue;
|
||||
}
|
||||
|
||||
pte_clear(&init_mm, addr, (pte_t *)pud);
|
||||
split_kernel_mapping(addr, end, PUD_SIZE, (pte_t *)pud);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -772,13 +851,7 @@ static void remove_pagetable(unsigned long start, unsigned long end)
|
||||
continue;
|
||||
|
||||
if (pgd_huge(*pgd)) {
|
||||
if (!IS_ALIGNED(addr, PGDIR_SIZE) ||
|
||||
!IS_ALIGNED(next, PGDIR_SIZE)) {
|
||||
WARN_ONCE(1, "%s: unaligned range\n", __func__);
|
||||
continue;
|
||||
}
|
||||
|
||||
pte_clear(&init_mm, addr, (pte_t *)pgd);
|
||||
split_kernel_mapping(addr, end, PGDIR_SIZE, (pte_t *)pgd);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -82,6 +82,8 @@ unsigned long __pgd_index_size;
|
||||
EXPORT_SYMBOL(__pgd_index_size);
|
||||
unsigned long __pmd_cache_index;
|
||||
EXPORT_SYMBOL(__pmd_cache_index);
|
||||
unsigned long __pud_cache_index;
|
||||
EXPORT_SYMBOL(__pud_cache_index);
|
||||
unsigned long __pte_table_size;
|
||||
EXPORT_SYMBOL(__pte_table_size);
|
||||
unsigned long __pmd_table_size;
|
||||
@ -471,6 +473,8 @@ void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
|
||||
if (old & PATB_HR) {
|
||||
asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
|
||||
"r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
|
||||
asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
|
||||
"r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
|
||||
trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1);
|
||||
} else {
|
||||
asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
|
||||
|
@ -51,7 +51,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
|
||||
unsigned int psize;
|
||||
int ssize;
|
||||
real_pte_t rpte;
|
||||
int i;
|
||||
int i, offset;
|
||||
|
||||
i = batch->index;
|
||||
|
||||
@ -67,6 +67,10 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
|
||||
psize = get_slice_psize(mm, addr);
|
||||
/* Mask the address for the correct page size */
|
||||
addr &= ~((1UL << mmu_psize_defs[psize].shift) - 1);
|
||||
if (unlikely(psize == MMU_PAGE_16G))
|
||||
offset = PTRS_PER_PUD;
|
||||
else
|
||||
offset = PTRS_PER_PMD;
|
||||
#else
|
||||
BUG();
|
||||
psize = pte_pagesize_index(mm, addr, pte); /* shutup gcc */
|
||||
@ -78,6 +82,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
|
||||
* support 64k pages, this might be different from the
|
||||
* hardware page size encoded in the slice table. */
|
||||
addr &= PAGE_MASK;
|
||||
offset = PTRS_PER_PTE;
|
||||
}
|
||||
|
||||
|
||||
@ -91,7 +96,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
|
||||
}
|
||||
WARN_ON(vsid == 0);
|
||||
vpn = hpt_vpn(addr, vsid, ssize);
|
||||
rpte = __real_pte(__pte(pte), ptep);
|
||||
rpte = __real_pte(__pte(pte), ptep, offset);
|
||||
|
||||
/*
|
||||
* Check if we have an active batch on this CPU. If not, just
|
||||
|
@ -327,6 +327,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
|
||||
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
|
||||
PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len));
|
||||
break;
|
||||
case BPF_LDX | BPF_W | BPF_ABS: /* A = *((u32 *)(seccomp_data + K)); */
|
||||
PPC_LWZ_OFFS(r_A, r_skb, K);
|
||||
break;
|
||||
case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */
|
||||
PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len));
|
||||
break;
|
||||
|
@ -199,9 +199,11 @@ static void disable_nest_pmu_counters(void)
|
||||
const struct cpumask *l_cpumask;
|
||||
|
||||
get_online_cpus();
|
||||
for_each_online_node(nid) {
|
||||
for_each_node_with_cpus(nid) {
|
||||
l_cpumask = cpumask_of_node(nid);
|
||||
cpu = cpumask_first(l_cpumask);
|
||||
cpu = cpumask_first_and(l_cpumask, cpu_online_mask);
|
||||
if (cpu >= nr_cpu_ids)
|
||||
continue;
|
||||
opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
|
||||
get_hard_smp_processor_id(cpu));
|
||||
}
|
||||
|
@ -1854,7 +1854,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
|
||||
s64 rc;
|
||||
|
||||
if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
|
||||
return -ENODEV;;
|
||||
return -ENODEV;
|
||||
|
||||
pe = &phb->ioda.pe_array[pdn->pe_number];
|
||||
if (pe->tce_bypass_enabled) {
|
||||
|
@ -80,6 +80,10 @@ static void pnv_setup_rfi_flush(void)
|
||||
if (np && of_property_read_bool(np, "disabled"))
|
||||
enable--;
|
||||
|
||||
np = of_get_child_by_name(fw_features, "speculation-policy-favor-security");
|
||||
if (np && of_property_read_bool(np, "disabled"))
|
||||
enable = 0;
|
||||
|
||||
of_node_put(np);
|
||||
of_node_put(fw_features);
|
||||
}
|
||||
|
@ -1063,16 +1063,16 @@ struct vas_window *vas_tx_win_open(int vasid, enum vas_cop_type cop,
|
||||
rc = PTR_ERR(txwin->paste_kaddr);
|
||||
goto free_window;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* A user mapping must ensure that context switch issues
|
||||
* CP_ABORT for this thread.
|
||||
*/
|
||||
rc = set_thread_uses_vas();
|
||||
if (rc)
|
||||
goto free_window;
|
||||
}
|
||||
|
||||
/*
|
||||
* Now that we have a send window, ensure context switch issues
|
||||
* CP_ABORT for this thread.
|
||||
*/
|
||||
rc = -EINVAL;
|
||||
if (set_thread_uses_vas() < 0)
|
||||
goto free_window;
|
||||
|
||||
set_vinst_win(vinst, txwin);
|
||||
|
||||
return txwin;
|
||||
|
@ -36,6 +36,7 @@
|
||||
#include <asm/xics.h>
|
||||
#include <asm/xive.h>
|
||||
#include <asm/plpar_wrappers.h>
|
||||
#include <asm/topology.h>
|
||||
|
||||
#include "pseries.h"
|
||||
#include "offline_states.h"
|
||||
@ -331,6 +332,7 @@ static void pseries_remove_processor(struct device_node *np)
|
||||
BUG_ON(cpu_online(cpu));
|
||||
set_cpu_present(cpu, false);
|
||||
set_hard_smp_processor_id(cpu, -1);
|
||||
update_numa_cpu_lookup_table(cpu, -1);
|
||||
break;
|
||||
}
|
||||
if (cpu >= nr_cpu_ids)
|
||||
@ -340,8 +342,6 @@ static void pseries_remove_processor(struct device_node *np)
|
||||
cpu_maps_update_done();
|
||||
}
|
||||
|
||||
extern int find_and_online_cpu_nid(int cpu);
|
||||
|
||||
static int dlpar_online_cpu(struct device_node *dn)
|
||||
{
|
||||
int rc = 0;
|
||||
|
@ -48,6 +48,28 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id);
|
||||
static irqreturn_t ras_error_interrupt(int irq, void *dev_id);
|
||||
|
||||
|
||||
/*
|
||||
* Enable the hotplug interrupt late because processing them may touch other
|
||||
* devices or systems (e.g. hugepages) that have not been initialized at the
|
||||
* subsys stage.
|
||||
*/
|
||||
int __init init_ras_hotplug_IRQ(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
|
||||
/* Hotplug Events */
|
||||
np = of_find_node_by_path("/event-sources/hot-plug-events");
|
||||
if (np != NULL) {
|
||||
if (dlpar_workqueue_init() == 0)
|
||||
request_event_sources_irqs(np, ras_hotplug_interrupt,
|
||||
"RAS_HOTPLUG");
|
||||
of_node_put(np);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
machine_late_initcall(pseries, init_ras_hotplug_IRQ);
|
||||
|
||||
/*
|
||||
* Initialize handlers for the set of interrupts caused by hardware errors
|
||||
* and power system events.
|
||||
@ -66,15 +88,6 @@ static int __init init_ras_IRQ(void)
|
||||
of_node_put(np);
|
||||
}
|
||||
|
||||
/* Hotplug Events */
|
||||
np = of_find_node_by_path("/event-sources/hot-plug-events");
|
||||
if (np != NULL) {
|
||||
if (dlpar_workqueue_init() == 0)
|
||||
request_event_sources_irqs(np, ras_hotplug_interrupt,
|
||||
"RAS_HOTPLUG");
|
||||
of_node_put(np);
|
||||
}
|
||||
|
||||
/* EPOW Events */
|
||||
np = of_find_node_by_path("/event-sources/epow-events");
|
||||
if (np != NULL) {
|
||||
|
@ -482,7 +482,8 @@ static void pseries_setup_rfi_flush(void)
|
||||
if (types == L1D_FLUSH_NONE)
|
||||
types = L1D_FLUSH_FALLBACK;
|
||||
|
||||
if (!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR))
|
||||
if ((!(result.behaviour & H_CPU_BEHAV_L1D_FLUSH_PR)) ||
|
||||
(!(result.behaviour & H_CPU_BEHAV_FAVOUR_SECURITY)))
|
||||
enable = false;
|
||||
} else {
|
||||
/* Default to fallback if case hcall is not available */
|
||||
|
@ -356,7 +356,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
|
||||
|
||||
rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size);
|
||||
if (rc) {
|
||||
pr_err("Error %lld getting queue info prio %d\n", rc, prio);
|
||||
pr_err("Error %lld getting queue info CPU %d prio %d\n", rc,
|
||||
target, prio);
|
||||
rc = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
@ -370,7 +371,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
|
||||
/* Configure and enable the queue in HW */
|
||||
rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
|
||||
if (rc) {
|
||||
pr_err("Error %lld setting queue for prio %d\n", rc, prio);
|
||||
pr_err("Error %lld setting queue for CPU %d prio %d\n", rc,
|
||||
target, prio);
|
||||
rc = -EIO;
|
||||
} else {
|
||||
q->qpage = qpage;
|
||||
@ -389,8 +391,8 @@ static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
|
||||
if (IS_ERR(qpage))
|
||||
return PTR_ERR(qpage);
|
||||
|
||||
return xive_spapr_configure_queue(cpu, q, prio, qpage,
|
||||
xive_queue_shift);
|
||||
return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
|
||||
q, prio, qpage, xive_queue_shift);
|
||||
}
|
||||
|
||||
static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
|
||||
@ -399,10 +401,12 @@ static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
|
||||
struct xive_q *q = &xc->queue[prio];
|
||||
unsigned int alloc_order;
|
||||
long rc;
|
||||
int hw_cpu = get_hard_smp_processor_id(cpu);
|
||||
|
||||
rc = plpar_int_set_queue_config(0, cpu, prio, 0, 0);
|
||||
rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0);
|
||||
if (rc)
|
||||
pr_err("Error %ld setting queue for prio %d\n", rc, prio);
|
||||
pr_err("Error %ld setting queue for CPU %d prio %d\n", rc,
|
||||
hw_cpu, prio);
|
||||
|
||||
alloc_order = xive_alloc_order(xive_queue_shift);
|
||||
free_pages((unsigned long)q->qpage, alloc_order);
|
||||
|
@ -8,7 +8,6 @@ config RISCV
|
||||
select OF
|
||||
select OF_EARLY_FLATTREE
|
||||
select OF_IRQ
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select ARCH_WANT_FRAME_POINTERS
|
||||
select CLONE_BACKWARDS
|
||||
select COMMON_CLK
|
||||
@ -20,7 +19,6 @@ config RISCV
|
||||
select GENERIC_STRNLEN_USER
|
||||
select GENERIC_SMP_IDLE_THREAD
|
||||
select GENERIC_ATOMIC64 if !64BIT || !RISCV_ISA_A
|
||||
select ARCH_WANT_OPTIONAL_GPIOLIB
|
||||
select HAVE_MEMBLOCK
|
||||
select HAVE_MEMBLOCK_NODE_MAP
|
||||
select HAVE_DMA_API_DEBUG
|
||||
@ -34,7 +32,6 @@ config RISCV
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select MODULES_USE_ELF_RELA if MODULES
|
||||
select THREAD_INFO_IN_TASK
|
||||
select RISCV_IRQ_INTC
|
||||
select RISCV_TIMER
|
||||
|
||||
config MMU
|
||||
|
@ -172,6 +172,9 @@ ENTRY(handle_exception)
|
||||
move a1, sp /* pt_regs */
|
||||
tail do_IRQ
|
||||
1:
|
||||
/* Exceptions run with interrupts enabled */
|
||||
csrs sstatus, SR_SIE
|
||||
|
||||
/* Handle syscalls */
|
||||
li t0, EXC_SYSCALL
|
||||
beq s4, t0, handle_syscall
|
||||
@ -198,8 +201,6 @@ handle_syscall:
|
||||
*/
|
||||
addi s2, s2, 0x4
|
||||
REG_S s2, PT_SEPC(sp)
|
||||
/* System calls run with interrupts enabled */
|
||||
csrs sstatus, SR_SIE
|
||||
/* Trace syscalls, but only if requested by the user. */
|
||||
REG_L t0, TASK_TI_FLAGS(tp)
|
||||
andi t0, t0, _TIF_SYSCALL_TRACE
|
||||
|
@ -64,7 +64,7 @@ ENTRY(_start)
|
||||
/* Start the kernel */
|
||||
mv a0, s0
|
||||
mv a1, s1
|
||||
call sbi_save
|
||||
call parse_dtb
|
||||
tail start_kernel
|
||||
|
||||
relocate:
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user