forked from Minki/linux
Linux 5.16-rc4
-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAmGtOFYeHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiG1hUH/1bmlOYsscJ7biqd VECr5HhTg6iRvwWUiOpU27fLuBeAM1ZdF0oLuCtzvbK2h8lfTclcHfueihK0GIvX ci8BvwpOYfUdDUWHglgvGXqICqYch3PiBVMFiRRRkzcpZdyCFCirAynLdOeusdTU 72Fi2RBaIM+U/5UVKcTx0J9WJsvFcG97lnNX5nT3dUmuoSW4WmX+h4vIe8VYFVmd 8q1gD17hPL+ThTKcZApn7IsArU1LNEGRg0tYItgMJo8AMTvsZjwR6yQgXeyuQ0Xk xp6pZwzABtnL9dfNJ95q1GhsJBX5T5XvAVjt2uR1ADbgh6TDApC1VBKICm1Nva7g uT6S0yE= =JNL8 -----END PGP SIGNATURE----- Merge tag 'v5.16-rc4' into docs-next I have a couple of fixes for warnings introduced after -rc1; catch up to -rc4 so that the fixes have something to fix.
This commit is contained in:
commit
a7fb920b15
3
.mailmap
3
.mailmap
@ -71,6 +71,9 @@ Chao Yu <chao@kernel.org> <chao2.yu@samsung.com>
|
||||
Chao Yu <chao@kernel.org> <yuchao0@huawei.com>
|
||||
Chris Chiu <chris.chiu@canonical.com> <chiu@endlessm.com>
|
||||
Chris Chiu <chris.chiu@canonical.com> <chiu@endlessos.org>
|
||||
Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com>
|
||||
Christian Borntraeger <borntraeger@linux.ibm.com> <cborntra@de.ibm.com>
|
||||
Christian Borntraeger <borntraeger@linux.ibm.com> <borntrae@de.ibm.com>
|
||||
Christophe Ricard <christophe.ricard@gmail.com>
|
||||
Christoph Hellwig <hch@lst.de>
|
||||
Colin Ian King <colin.king@intel.com> <colin.king@canonical.com>
|
||||
|
@ -1520,15 +1520,15 @@ This sysfs attribute controls the keyboard "face" that will be shown on the
|
||||
Lenovo X1 Carbon 2nd gen (2014)'s adaptive keyboard. The value can be read
|
||||
and set.
|
||||
|
||||
- 1 = Home mode
|
||||
- 2 = Web-browser mode
|
||||
- 3 = Web-conference mode
|
||||
- 4 = Function mode
|
||||
- 5 = Layflat mode
|
||||
- 0 = Home mode
|
||||
- 1 = Web-browser mode
|
||||
- 2 = Web-conference mode
|
||||
- 3 = Function mode
|
||||
- 4 = Layflat mode
|
||||
|
||||
For more details about which buttons will appear depending on the mode, please
|
||||
review the laptop's user guide:
|
||||
http://www.lenovo.com/shop/americas/content/user_guides/x1carbon_2_ug_en.pdf
|
||||
https://download.lenovo.com/ibmdl/pub/pc/pccbbs/mobiles_pdf/x1carbon_2_ug_en.pdf
|
||||
|
||||
Battery charge control
|
||||
----------------------
|
||||
|
@ -53,11 +53,10 @@ The number of bits that the PAC occupies in a pointer is 55 minus the
|
||||
virtual address size configured by the kernel. For example, with a
|
||||
virtual address size of 48, the PAC is 7 bits wide.
|
||||
|
||||
Recent versions of GCC can compile code with APIAKey-based return
|
||||
address protection when passed the -msign-return-address option. This
|
||||
uses instructions in the HINT space (unless -march=armv8.3-a or higher
|
||||
is also passed), and such code can run on systems without the pointer
|
||||
authentication extension.
|
||||
When ARM64_PTR_AUTH_KERNEL is selected, the kernel will be compiled
|
||||
with HINT space pointer authentication instructions protecting
|
||||
function returns. Kernels built with this option will work on hardware
|
||||
with or without pointer authentication support.
|
||||
|
||||
In addition to exec(), keys can also be reinitialized to random values
|
||||
using the PR_PAC_RESET_KEYS prctl. A bitmask of PR_PAC_APIAKEY,
|
||||
|
@ -73,12 +73,12 @@ CPUFREQ_POSTCHANGE.
|
||||
The third argument is a struct cpufreq_freqs with the following
|
||||
values:
|
||||
|
||||
===== ===========================
|
||||
cpu number of the affected CPU
|
||||
====== ======================================
|
||||
policy a pointer to the struct cpufreq_policy
|
||||
old old frequency
|
||||
new new frequency
|
||||
flags flags of the cpufreq driver
|
||||
===== ===========================
|
||||
====== ======================================
|
||||
|
||||
3. CPUFreq Table Generation with Operating Performance Point (OPP)
|
||||
==================================================================
|
||||
|
@ -17,9 +17,10 @@ properties:
|
||||
oneOf:
|
||||
- enum:
|
||||
- fsl,imx7ulp-lpi2c
|
||||
- fsl,imx8qm-lpi2c
|
||||
- items:
|
||||
- const: fsl,imx8qxp-lpi2c
|
||||
- enum:
|
||||
- fsl,imx8qxp-lpi2c
|
||||
- fsl,imx8qm-lpi2c
|
||||
- const: fsl,imx7ulp-lpi2c
|
||||
|
||||
reg:
|
||||
|
@ -50,11 +50,11 @@ ksmbd.mountd (user space daemon)
|
||||
--------------------------------
|
||||
|
||||
ksmbd.mountd is userspace process to, transfer user account and password that
|
||||
are registered using ksmbd.adduser(part of utils for user space). Further it
|
||||
are registered using ksmbd.adduser (part of utils for user space). Further it
|
||||
allows sharing information parameters that parsed from smb.conf to ksmbd in
|
||||
kernel. For the execution part it has a daemon which is continuously running
|
||||
and connected to the kernel interface using netlink socket, it waits for the
|
||||
requests(dcerpc and share/user info). It handles RPC calls (at a minimum few
|
||||
requests (dcerpc and share/user info). It handles RPC calls (at a minimum few
|
||||
dozen) that are most important for file server from NetShareEnum and
|
||||
NetServerGetInfo. Complete DCE/RPC response is prepared from the user space
|
||||
and passed over to the associated kernel thread for the client.
|
||||
@ -154,11 +154,11 @@ Each layer
|
||||
1. Enable all component prints
|
||||
# sudo ksmbd.control -d "all"
|
||||
|
||||
2. Enable one of components(smb, auth, vfs, oplock, ipc, conn, rdma)
|
||||
2. Enable one of components (smb, auth, vfs, oplock, ipc, conn, rdma)
|
||||
# sudo ksmbd.control -d "smb"
|
||||
|
||||
3. Show what prints are enable.
|
||||
# cat/sys/class/ksmbd-control/debug
|
||||
3. Show what prints are enabled.
|
||||
# cat /sys/class/ksmbd-control/debug
|
||||
[smb] auth vfs oplock ipc conn [rdma]
|
||||
|
||||
4. Disable prints:
|
||||
|
@ -1,7 +1,7 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
=================================
|
||||
NETWORK FILESYSTEM HELPER LIBRARY
|
||||
Network Filesystem Helper Library
|
||||
=================================
|
||||
|
||||
.. Contents:
|
||||
@ -37,22 +37,22 @@ into a common call framework.
|
||||
|
||||
The following services are provided:
|
||||
|
||||
* Handles transparent huge pages (THPs).
|
||||
* Handle folios that span multiple pages.
|
||||
|
||||
* Insulates the netfs from VM interface changes.
|
||||
* Insulate the netfs from VM interface changes.
|
||||
|
||||
* Allows the netfs to arbitrarily split reads up into pieces, even ones that
|
||||
don't match page sizes or page alignments and that may cross pages.
|
||||
* Allow the netfs to arbitrarily split reads up into pieces, even ones that
|
||||
don't match folio sizes or folio alignments and that may cross folios.
|
||||
|
||||
* Allows the netfs to expand a readahead request in both directions to meet
|
||||
its needs.
|
||||
* Allow the netfs to expand a readahead request in both directions to meet its
|
||||
needs.
|
||||
|
||||
* Allows the netfs to partially fulfil a read, which will then be resubmitted.
|
||||
* Allow the netfs to partially fulfil a read, which will then be resubmitted.
|
||||
|
||||
* Handles local caching, allowing cached data and server-read data to be
|
||||
* Handle local caching, allowing cached data and server-read data to be
|
||||
interleaved for a single request.
|
||||
|
||||
* Handles clearing of bufferage that aren't on the server.
|
||||
* Handle clearing of bufferage that aren't on the server.
|
||||
|
||||
* Handle retrying of reads that failed, switching reads from the cache to the
|
||||
server as necessary.
|
||||
@ -70,22 +70,22 @@ Read Helper Functions
|
||||
|
||||
Three read helpers are provided::
|
||||
|
||||
* void netfs_readahead(struct readahead_control *ractl,
|
||||
const struct netfs_read_request_ops *ops,
|
||||
void *netfs_priv);``
|
||||
* int netfs_readpage(struct file *file,
|
||||
struct page *page,
|
||||
const struct netfs_read_request_ops *ops,
|
||||
void *netfs_priv);
|
||||
* int netfs_write_begin(struct file *file,
|
||||
struct address_space *mapping,
|
||||
loff_t pos,
|
||||
unsigned int len,
|
||||
unsigned int flags,
|
||||
struct page **_page,
|
||||
void **_fsdata,
|
||||
const struct netfs_read_request_ops *ops,
|
||||
void *netfs_priv);
|
||||
void netfs_readahead(struct readahead_control *ractl,
|
||||
const struct netfs_read_request_ops *ops,
|
||||
void *netfs_priv);
|
||||
int netfs_readpage(struct file *file,
|
||||
struct folio *folio,
|
||||
const struct netfs_read_request_ops *ops,
|
||||
void *netfs_priv);
|
||||
int netfs_write_begin(struct file *file,
|
||||
struct address_space *mapping,
|
||||
loff_t pos,
|
||||
unsigned int len,
|
||||
unsigned int flags,
|
||||
struct folio **_folio,
|
||||
void **_fsdata,
|
||||
const struct netfs_read_request_ops *ops,
|
||||
void *netfs_priv);
|
||||
|
||||
Each corresponds to a VM operation, with the addition of a couple of parameters
|
||||
for the use of the read helpers:
|
||||
@ -103,8 +103,8 @@ Both of these values will be stored into the read request structure.
|
||||
For ->readahead() and ->readpage(), the network filesystem should just jump
|
||||
into the corresponding read helper; whereas for ->write_begin(), it may be a
|
||||
little more complicated as the network filesystem might want to flush
|
||||
conflicting writes or track dirty data and needs to put the acquired page if an
|
||||
error occurs after calling the helper.
|
||||
conflicting writes or track dirty data and needs to put the acquired folio if
|
||||
an error occurs after calling the helper.
|
||||
|
||||
The helpers manage the read request, calling back into the network filesystem
|
||||
through the suppplied table of operations. Waits will be performed as
|
||||
@ -253,7 +253,7 @@ through which it can issue requests and negotiate::
|
||||
void (*issue_op)(struct netfs_read_subrequest *subreq);
|
||||
bool (*is_still_valid)(struct netfs_read_request *rreq);
|
||||
int (*check_write_begin)(struct file *file, loff_t pos, unsigned len,
|
||||
struct page *page, void **_fsdata);
|
||||
struct folio *folio, void **_fsdata);
|
||||
void (*done)(struct netfs_read_request *rreq);
|
||||
void (*cleanup)(struct address_space *mapping, void *netfs_priv);
|
||||
};
|
||||
@ -313,13 +313,14 @@ The operations are as follows:
|
||||
|
||||
There is no return value; the netfs_subreq_terminated() function should be
|
||||
called to indicate whether or not the operation succeeded and how much data
|
||||
it transferred. The filesystem also should not deal with setting pages
|
||||
it transferred. The filesystem also should not deal with setting folios
|
||||
uptodate, unlocking them or dropping their refs - the helpers need to deal
|
||||
with this as they have to coordinate with copying to the local cache.
|
||||
|
||||
Note that the helpers have the pages locked, but not pinned. It is possible
|
||||
to use the ITER_XARRAY iov iterator to refer to the range of the inode that
|
||||
is being operated upon without the need to allocate large bvec tables.
|
||||
Note that the helpers have the folios locked, but not pinned. It is
|
||||
possible to use the ITER_XARRAY iov iterator to refer to the range of the
|
||||
inode that is being operated upon without the need to allocate large bvec
|
||||
tables.
|
||||
|
||||
* ``is_still_valid()``
|
||||
|
||||
@ -330,15 +331,15 @@ The operations are as follows:
|
||||
* ``check_write_begin()``
|
||||
|
||||
[Optional] This is called from the netfs_write_begin() helper once it has
|
||||
allocated/grabbed the page to be modified to allow the filesystem to flush
|
||||
allocated/grabbed the folio to be modified to allow the filesystem to flush
|
||||
conflicting state before allowing it to be modified.
|
||||
|
||||
It should return 0 if everything is now fine, -EAGAIN if the page should be
|
||||
It should return 0 if everything is now fine, -EAGAIN if the folio should be
|
||||
regrabbed and any other error code to abort the operation.
|
||||
|
||||
* ``done``
|
||||
|
||||
[Optional] This is called after the pages in the request have all been
|
||||
[Optional] This is called after the folios in the request have all been
|
||||
unlocked (and marked uptodate if applicable).
|
||||
|
||||
* ``cleanup``
|
||||
@ -390,7 +391,7 @@ The read helpers work by the following general procedure:
|
||||
* If NETFS_SREQ_CLEAR_TAIL was set, a short read will be cleared to the
|
||||
end of the slice instead of reissuing.
|
||||
|
||||
* Once the data is read, the pages that have been fully read/cleared:
|
||||
* Once the data is read, the folios that have been fully read/cleared:
|
||||
|
||||
* Will be marked uptodate.
|
||||
|
||||
@ -398,11 +399,11 @@ The read helpers work by the following general procedure:
|
||||
|
||||
* Unlocked
|
||||
|
||||
* Any pages that need writing to the cache will then have DIO writes issued.
|
||||
* Any folios that need writing to the cache will then have DIO writes issued.
|
||||
|
||||
* Synchronous operations will wait for reading to be complete.
|
||||
|
||||
* Writes to the cache will proceed asynchronously and the pages will have the
|
||||
* Writes to the cache will proceed asynchronously and the folios will have the
|
||||
PG_fscache mark removed when that completes.
|
||||
|
||||
* The request structures will be cleaned up when everything has completed.
|
||||
@ -452,6 +453,9 @@ operation table looks like the following::
|
||||
netfs_io_terminated_t term_func,
|
||||
void *term_func_priv);
|
||||
|
||||
int (*prepare_write)(struct netfs_cache_resources *cres,
|
||||
loff_t *_start, size_t *_len, loff_t i_size);
|
||||
|
||||
int (*write)(struct netfs_cache_resources *cres,
|
||||
loff_t start_pos,
|
||||
struct iov_iter *iter,
|
||||
@ -509,6 +513,14 @@ The methods defined in the table are:
|
||||
indicating whether the termination is definitely happening in the caller's
|
||||
context.
|
||||
|
||||
* ``prepare_write()``
|
||||
|
||||
[Required] Called to adjust a write to the cache and check that there is
|
||||
sufficient space in the cache. The start and length values indicate the
|
||||
size of the write that netfslib is proposing, and this can be adjusted by
|
||||
the cache to respect DIO boundaries. The file size is passed for
|
||||
information.
|
||||
|
||||
* ``write()``
|
||||
|
||||
[Required] Called to write to the cache. The start file offset is given
|
||||
@ -525,4 +537,9 @@ not the read request structure as they could be used in other situations where
|
||||
there isn't a read request structure as well, such as writing dirty data to the
|
||||
cache.
|
||||
|
||||
|
||||
API Function Reference
|
||||
======================
|
||||
|
||||
.. kernel-doc:: include/linux/netfs.h
|
||||
.. kernel-doc:: fs/netfs/read_helper.c
|
||||
|
@ -36,6 +36,8 @@ Key to symbols
|
||||
|
||||
=============== =============================================================
|
||||
S Start condition
|
||||
Sr Repeated start condition, used to switch from write to
|
||||
read mode.
|
||||
P Stop condition
|
||||
Rd/Wr (1 bit) Read/Write bit. Rd equals 1, Wr equals 0.
|
||||
A, NA (1 bit) Acknowledge (ACK) and Not Acknowledge (NACK) bit
|
||||
@ -100,7 +102,7 @@ Implemented by i2c_smbus_read_byte_data()
|
||||
This reads a single byte from a device, from a designated register.
|
||||
The register is specified through the Comm byte::
|
||||
|
||||
S Addr Wr [A] Comm [A] S Addr Rd [A] [Data] NA P
|
||||
S Addr Wr [A] Comm [A] Sr Addr Rd [A] [Data] NA P
|
||||
|
||||
Functionality flag: I2C_FUNC_SMBUS_READ_BYTE_DATA
|
||||
|
||||
@ -114,7 +116,7 @@ This operation is very like Read Byte; again, data is read from a
|
||||
device, from a designated register that is specified through the Comm
|
||||
byte. But this time, the data is a complete word (16 bits)::
|
||||
|
||||
S Addr Wr [A] Comm [A] S Addr Rd [A] [DataLow] A [DataHigh] NA P
|
||||
S Addr Wr [A] Comm [A] Sr Addr Rd [A] [DataLow] A [DataHigh] NA P
|
||||
|
||||
Functionality flag: I2C_FUNC_SMBUS_READ_WORD_DATA
|
||||
|
||||
@ -164,7 +166,7 @@ This command selects a device register (through the Comm byte), sends
|
||||
16 bits of data to it, and reads 16 bits of data in return::
|
||||
|
||||
S Addr Wr [A] Comm [A] DataLow [A] DataHigh [A]
|
||||
S Addr Rd [A] [DataLow] A [DataHigh] NA P
|
||||
Sr Addr Rd [A] [DataLow] A [DataHigh] NA P
|
||||
|
||||
Functionality flag: I2C_FUNC_SMBUS_PROC_CALL
|
||||
|
||||
@ -181,7 +183,7 @@ of data is specified by the device in the Count byte.
|
||||
::
|
||||
|
||||
S Addr Wr [A] Comm [A]
|
||||
S Addr Rd [A] [Count] A [Data] A [Data] A ... A [Data] NA P
|
||||
Sr Addr Rd [A] [Count] A [Data] A [Data] A ... A [Data] NA P
|
||||
|
||||
Functionality flag: I2C_FUNC_SMBUS_READ_BLOCK_DATA
|
||||
|
||||
@ -212,7 +214,7 @@ This command selects a device register (through the Comm byte), sends
|
||||
1 to 31 bytes of data to it, and reads 1 to 31 bytes of data in return::
|
||||
|
||||
S Addr Wr [A] Comm [A] Count [A] Data [A] ...
|
||||
S Addr Rd [A] [Count] A [Data] ... A P
|
||||
Sr Addr Rd [A] [Count] A [Data] ... A P
|
||||
|
||||
Functionality flag: I2C_FUNC_SMBUS_BLOCK_PROC_CALL
|
||||
|
||||
@ -300,7 +302,7 @@ This command reads a block of bytes from a device, from a
|
||||
designated register that is specified through the Comm byte::
|
||||
|
||||
S Addr Wr [A] Comm [A]
|
||||
S Addr Rd [A] [Data] A [Data] A ... A [Data] NA P
|
||||
Sr Addr Rd [A] [Data] A [Data] A ... A [Data] NA P
|
||||
|
||||
Functionality flag: I2C_FUNC_SMBUS_READ_I2C_BLOCK
|
||||
|
||||
|
@ -37,8 +37,7 @@ conn_reuse_mode - INTEGER
|
||||
|
||||
0: disable any special handling on port reuse. The new
|
||||
connection will be delivered to the same real server that was
|
||||
servicing the previous connection. This will effectively
|
||||
disable expire_nodest_conn.
|
||||
servicing the previous connection.
|
||||
|
||||
bit 1: enable rescheduling of new connections when it is safe.
|
||||
That is, whenever expire_nodest_conn and for TCP sockets, when
|
||||
|
@ -486,8 +486,8 @@ of packets.
|
||||
Drivers are free to use a more permissive configuration than the requested
|
||||
configuration. It is expected that drivers should only implement directly the
|
||||
most generic mode that can be supported. For example if the hardware can
|
||||
support HWTSTAMP_FILTER_V2_EVENT, then it should generally always upscale
|
||||
HWTSTAMP_FILTER_V2_L2_SYNC_MESSAGE, and so forth, as HWTSTAMP_FILTER_V2_EVENT
|
||||
support HWTSTAMP_FILTER_PTP_V2_EVENT, then it should generally always upscale
|
||||
HWTSTAMP_FILTER_PTP_V2_L2_SYNC, and so forth, as HWTSTAMP_FILTER_PTP_V2_EVENT
|
||||
is more generic (and more useful to applications).
|
||||
|
||||
A driver which supports hardware time stamping shall update the struct
|
||||
|
@ -84,6 +84,16 @@ CONFIG_ENERGY_MODEL must be enabled to use the EM framework.
|
||||
2.2 Registration of performance domains
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Registration of 'advanced' EM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The 'advanced' EM gets it's name due to the fact that the driver is allowed
|
||||
to provide more precised power model. It's not limited to some implemented math
|
||||
formula in the framework (like it's in 'simple' EM case). It can better reflect
|
||||
the real power measurements performed for each performance state. Thus, this
|
||||
registration method should be preferred in case considering EM static power
|
||||
(leakage) is important.
|
||||
|
||||
Drivers are expected to register performance domains into the EM framework by
|
||||
calling the following API::
|
||||
|
||||
@ -103,6 +113,18 @@ to: return warning/error, stop working or panic.
|
||||
See Section 3. for an example of driver implementing this
|
||||
callback, or Section 2.4 for further documentation on this API
|
||||
|
||||
Registration of 'simple' EM
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
The 'simple' EM is registered using the framework helper function
|
||||
cpufreq_register_em_with_opp(). It implements a power model which is tight to
|
||||
math formula::
|
||||
|
||||
Power = C * V^2 * f
|
||||
|
||||
The EM which is registered using this method might not reflect correctly the
|
||||
physics of a real device, e.g. when static power (leakage) is important.
|
||||
|
||||
|
||||
2.3 Accessing performance domains
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
@ -138,6 +160,10 @@ or in Section 2.4
|
||||
3. Example driver
|
||||
-----------------
|
||||
|
||||
The CPUFreq framework supports dedicated callback for registering
|
||||
the EM for a given CPU(s) 'policy' object: cpufreq_driver::register_em().
|
||||
That callback has to be implemented properly for a given driver,
|
||||
because the framework would call it at the right time during setup.
|
||||
This section provides a simple example of a CPUFreq driver registering a
|
||||
performance domain in the Energy Model framework using the (fake) 'foo'
|
||||
protocol. The driver implements an est_power() function to be provided to the
|
||||
@ -167,25 +193,22 @@ EM framework::
|
||||
20 return 0;
|
||||
21 }
|
||||
22
|
||||
23 static int foo_cpufreq_init(struct cpufreq_policy *policy)
|
||||
23 static void foo_cpufreq_register_em(struct cpufreq_policy *policy)
|
||||
24 {
|
||||
25 struct em_data_callback em_cb = EM_DATA_CB(est_power);
|
||||
26 struct device *cpu_dev;
|
||||
27 int nr_opp, ret;
|
||||
27 int nr_opp;
|
||||
28
|
||||
29 cpu_dev = get_cpu_device(cpumask_first(policy->cpus));
|
||||
30
|
||||
31 /* Do the actual CPUFreq init work ... */
|
||||
32 ret = do_foo_cpufreq_init(policy);
|
||||
33 if (ret)
|
||||
34 return ret;
|
||||
35
|
||||
36 /* Find the number of OPPs for this policy */
|
||||
37 nr_opp = foo_get_nr_opp(policy);
|
||||
31 /* Find the number of OPPs for this policy */
|
||||
32 nr_opp = foo_get_nr_opp(policy);
|
||||
33
|
||||
34 /* And register the new performance domain */
|
||||
35 em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, policy->cpus,
|
||||
36 true);
|
||||
37 }
|
||||
38
|
||||
39 /* And register the new performance domain */
|
||||
40 em_dev_register_perf_domain(cpu_dev, nr_opp, &em_cb, policy->cpus,
|
||||
41 true);
|
||||
42
|
||||
43 return 0;
|
||||
44 }
|
||||
39 static struct cpufreq_driver foo_cpufreq_driver = {
|
||||
40 .register_em = foo_cpufreq_register_em,
|
||||
41 };
|
||||
|
43
MAINTAINERS
43
MAINTAINERS
@ -2263,6 +2263,15 @@ L: linux-iio@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/counter/microchip-tcb-capture.c
|
||||
|
||||
ARM/MILBEAUT ARCHITECTURE
|
||||
M: Taichi Sugaya <sugaya.taichi@socionext.com>
|
||||
M: Takao Orito <orito.takao@socionext.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: arch/arm/boot/dts/milbeaut*
|
||||
F: arch/arm/mach-milbeaut/
|
||||
N: milbeaut
|
||||
|
||||
ARM/MIOA701 MACHINE SUPPORT
|
||||
M: Robert Jarzmik <robert.jarzmik@free.fr>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
@ -2729,10 +2738,11 @@ S: Maintained
|
||||
F: drivers/memory/*emif*
|
||||
|
||||
ARM/TEXAS INSTRUMENT KEYSTONE ARCHITECTURE
|
||||
M: Nishanth Menon <nm@ti.com>
|
||||
M: Santosh Shilimkar <ssantosh@kernel.org>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git
|
||||
F: arch/arm/boot/dts/keystone-*
|
||||
F: arch/arm/mach-keystone/
|
||||
|
||||
@ -3570,13 +3580,14 @@ L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/broadcom/b44.*
|
||||
|
||||
BROADCOM B53 ETHERNET SWITCH DRIVER
|
||||
BROADCOM B53/SF2 ETHERNET SWITCH DRIVER
|
||||
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
L: openwrt-devel@lists.openwrt.org (subscribers-only)
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml
|
||||
F: drivers/net/dsa/b53/*
|
||||
F: drivers/net/dsa/bcm_sf2*
|
||||
F: include/linux/dsa/brcm.h
|
||||
F: include/linux/platform_data/b53.h
|
||||
|
||||
@ -3733,7 +3744,7 @@ F: drivers/scsi/bnx2i/
|
||||
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
|
||||
M: Ariel Elior <aelior@marvell.com>
|
||||
M: Sudarsana Kalluru <skalluru@marvell.com>
|
||||
M: GR-everest-linux-l2@marvell.com
|
||||
M: Manish Chopra <manishc@marvell.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/broadcom/bnx2x/
|
||||
@ -10445,7 +10456,7 @@ F: arch/riscv/include/uapi/asm/kvm*
|
||||
F: arch/riscv/kvm/
|
||||
|
||||
KERNEL VIRTUAL MACHINE for s390 (KVM/s390)
|
||||
M: Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
M: Christian Borntraeger <borntraeger@linux.ibm.com>
|
||||
M: Janosch Frank <frankja@linux.ibm.com>
|
||||
R: David Hildenbrand <david@redhat.com>
|
||||
R: Claudio Imbrenda <imbrenda@linux.ibm.com>
|
||||
@ -15593,7 +15604,7 @@ F: drivers/scsi/qedi/
|
||||
|
||||
QLOGIC QL4xxx ETHERNET DRIVER
|
||||
M: Ariel Elior <aelior@marvell.com>
|
||||
M: GR-everest-linux-l2@marvell.com
|
||||
M: Manish Chopra <manishc@marvell.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/net/ethernet/qlogic/qed/
|
||||
@ -15968,6 +15979,7 @@ F: arch/mips/generic/board-ranchu.c
|
||||
|
||||
RANDOM NUMBER DRIVER
|
||||
M: "Theodore Ts'o" <tytso@mit.edu>
|
||||
M: Jason A. Donenfeld <Jason@zx2c4.com>
|
||||
S: Maintained
|
||||
F: drivers/char/random.c
|
||||
|
||||
@ -16490,6 +16502,12 @@ T: git git://linuxtv.org/media_tree.git
|
||||
F: Documentation/devicetree/bindings/media/allwinner,sun8i-a83t-de2-rotate.yaml
|
||||
F: drivers/media/platform/sunxi/sun8i-rotate/
|
||||
|
||||
RPMSG TTY DRIVER
|
||||
M: Arnaud Pouliquen <arnaud.pouliquen@foss.st.com>
|
||||
L: linux-remoteproc@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/tty/rpmsg_tty.c
|
||||
|
||||
RTL2830 MEDIA DRIVER
|
||||
M: Antti Palosaari <crope@iki.fi>
|
||||
L: linux-media@vger.kernel.org
|
||||
@ -16573,7 +16591,7 @@ F: drivers/video/fbdev/savage/
|
||||
S390
|
||||
M: Heiko Carstens <hca@linux.ibm.com>
|
||||
M: Vasily Gorbik <gor@linux.ibm.com>
|
||||
M: Christian Borntraeger <borntraeger@de.ibm.com>
|
||||
M: Christian Borntraeger <borntraeger@linux.ibm.com>
|
||||
R: Alexander Gordeev <agordeev@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
S: Supported
|
||||
@ -16612,7 +16630,8 @@ F: drivers/iommu/s390-iommu.c
|
||||
|
||||
S390 IUCV NETWORK LAYER
|
||||
M: Julian Wiedmann <jwi@linux.ibm.com>
|
||||
M: Karsten Graul <kgraul@linux.ibm.com>
|
||||
M: Alexandra Winter <wintera@linux.ibm.com>
|
||||
M: Wenjia Zhang <wenjia@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
@ -16623,7 +16642,8 @@ F: net/iucv/
|
||||
|
||||
S390 NETWORK DRIVERS
|
||||
M: Julian Wiedmann <jwi@linux.ibm.com>
|
||||
M: Karsten Graul <kgraul@linux.ibm.com>
|
||||
M: Alexandra Winter <wintera@linux.ibm.com>
|
||||
M: Wenjia Zhang <wenjia@linux.ibm.com>
|
||||
L: linux-s390@vger.kernel.org
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
@ -18484,6 +18504,7 @@ F: include/uapi/linux/pkt_sched.h
|
||||
F: include/uapi/linux/tc_act/
|
||||
F: include/uapi/linux/tc_ematch/
|
||||
F: net/sched/
|
||||
F: tools/testing/selftests/tc-testing
|
||||
|
||||
TC90522 MEDIA DRIVER
|
||||
M: Akihiro Tsukada <tskd08@gmail.com>
|
||||
@ -19032,11 +19053,12 @@ F: drivers/mmc/host/tifm_sd.c
|
||||
F: include/linux/tifm.h
|
||||
|
||||
TI KEYSTONE MULTICORE NAVIGATOR DRIVERS
|
||||
M: Nishanth Menon <nm@ti.com>
|
||||
M: Santosh Shilimkar <ssantosh@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ti/linux.git
|
||||
F: drivers/soc/ti/*
|
||||
|
||||
TI LM49xxx FAMILY ASoC CODEC DRIVERS
|
||||
@ -20318,7 +20340,8 @@ F: arch/x86/include/asm/vmware.h
|
||||
F: arch/x86/kernel/cpu/vmware.c
|
||||
|
||||
VMWARE PVRDMA DRIVER
|
||||
M: Adit Ranadive <aditr@vmware.com>
|
||||
M: Bryan Tan <bryantan@vmware.com>
|
||||
M: Vishnu Dasa <vdasa@vmware.com>
|
||||
M: VMware PV-Drivers <pv-drivers@vmware.com>
|
||||
L: linux-rdma@vger.kernel.org
|
||||
S: Maintained
|
||||
|
4
Makefile
4
Makefile
@ -2,8 +2,8 @@
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 16
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
NAME = Trick or Treat
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Gobble Gobble
|
||||
|
||||
# *DOCUMENTATION*
|
||||
# To see a list of typical targets execute "make help"
|
||||
|
10
arch/Kconfig
10
arch/Kconfig
@ -991,6 +991,16 @@ config HAVE_ARCH_COMPAT_MMAP_BASES
|
||||
and vice-versa 32-bit applications to call 64-bit mmap().
|
||||
Required for applications doing different bitness syscalls.
|
||||
|
||||
config PAGE_SIZE_LESS_THAN_64KB
|
||||
def_bool y
|
||||
depends on !ARM64_64K_PAGES
|
||||
depends on !IA64_PAGE_SIZE_64KB
|
||||
depends on !PAGE_SIZE_64KB
|
||||
depends on !PARISC_PAGE_SIZE_64KB
|
||||
depends on !PPC_64K_PAGES
|
||||
depends on !PPC_256K_PAGES
|
||||
depends on !PAGE_SIZE_256KB
|
||||
|
||||
# This allows to use a set of generic functions to determine mmap base
|
||||
# address by giving priority to top-down scheme only if the process
|
||||
# is not in legacy mode (compat task, unlimited stack size or
|
||||
|
@ -488,3 +488,4 @@
|
||||
556 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 557 reserved for memfd_secret
|
||||
558 common process_mrelease sys_process_mrelease
|
||||
559 common futex_waitv sys_futex_waitv
|
||||
|
@ -36,7 +36,6 @@ void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
|
||||
void flush_dcache_page(struct page *page);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
void dma_cache_wback_inv(phys_addr_t start, unsigned long sz);
|
||||
void dma_cache_inv(phys_addr_t start, unsigned long sz);
|
||||
|
@ -1463,6 +1463,7 @@ config HIGHMEM
|
||||
bool "High Memory Support"
|
||||
depends on MMU
|
||||
select KMAP_LOCAL
|
||||
select KMAP_LOCAL_NON_LINEAR_PTE_ARRAY
|
||||
help
|
||||
The address space of ARM processors is only 4 Gigabytes large
|
||||
and it has to accommodate user address space, kernel address
|
||||
|
@ -506,11 +506,17 @@
|
||||
#address-cells = <3>;
|
||||
#interrupt-cells = <1>;
|
||||
#size-cells = <2>;
|
||||
interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
|
||||
interrupts = <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "pcie", "msi";
|
||||
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
|
||||
interrupt-map = <0 0 0 1 &gicv2 GIC_SPI 143
|
||||
IRQ_TYPE_LEVEL_HIGH>,
|
||||
<0 0 0 2 &gicv2 GIC_SPI 144
|
||||
IRQ_TYPE_LEVEL_HIGH>,
|
||||
<0 0 0 3 &gicv2 GIC_SPI 145
|
||||
IRQ_TYPE_LEVEL_HIGH>,
|
||||
<0 0 0 4 &gicv2 GIC_SPI 146
|
||||
IRQ_TYPE_LEVEL_HIGH>;
|
||||
msi-controller;
|
||||
msi-parent = <&pcie0>;
|
||||
|
@ -242,6 +242,8 @@
|
||||
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
pcie0: pcie@12000 {
|
||||
@ -408,7 +410,7 @@
|
||||
i2c0: i2c@18009000 {
|
||||
compatible = "brcm,iproc-i2c";
|
||||
reg = <0x18009000 0x50>;
|
||||
interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
clock-frequency = <100000>;
|
||||
|
@ -290,7 +290,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
|
||||
*/
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
extern void flush_dcache_page(struct page *);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
|
||||
static inline void flush_kernel_vmap_range(void *addr, int size)
|
||||
|
@ -33,7 +33,7 @@ extern void __iomem *sdr_ctl_base_addr;
|
||||
u32 socfpga_sdram_self_refresh(u32 sdr_base);
|
||||
extern unsigned int socfpga_sdram_self_refresh_sz;
|
||||
|
||||
extern char secondary_trampoline, secondary_trampoline_end;
|
||||
extern char secondary_trampoline[], secondary_trampoline_end[];
|
||||
|
||||
extern unsigned long socfpga_cpu1start_addr;
|
||||
|
||||
|
@ -20,14 +20,14 @@
|
||||
|
||||
static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
{
|
||||
int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
|
||||
int trampoline_size = secondary_trampoline_end - secondary_trampoline;
|
||||
|
||||
if (socfpga_cpu1start_addr) {
|
||||
/* This will put CPU #1 into reset. */
|
||||
writel(RSTMGR_MPUMODRST_CPU1,
|
||||
rst_manager_base_addr + SOCFPGA_RSTMGR_MODMPURST);
|
||||
|
||||
memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
|
||||
memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
|
||||
|
||||
writel(__pa_symbol(secondary_startup),
|
||||
sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff));
|
||||
@ -45,12 +45,12 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
|
||||
static int socfpga_a10_boot_secondary(unsigned int cpu, struct task_struct *idle)
|
||||
{
|
||||
int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
|
||||
int trampoline_size = secondary_trampoline_end - secondary_trampoline;
|
||||
|
||||
if (socfpga_cpu1start_addr) {
|
||||
writel(RSTMGR_MPUMODRST_CPU1, rst_manager_base_addr +
|
||||
SOCFPGA_A10_RSTMGR_MODMPURST);
|
||||
memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
|
||||
memcpy(phys_to_virt(0), secondary_trampoline, trampoline_size);
|
||||
|
||||
writel(__pa_symbol(secondary_startup),
|
||||
sys_manager_base_addr + (socfpga_cpu1start_addr & 0x00000fff));
|
||||
|
@ -296,8 +296,7 @@
|
||||
pinctrl-0 = <&ufs_rst_n &ufs_refclk_out>;
|
||||
phys = <&ufs_0_phy>;
|
||||
phy-names = "ufs-phy";
|
||||
samsung,sysreg = <&syscon_fsys2>;
|
||||
samsung,ufs-shareability-reg-offset = <0x710>;
|
||||
samsung,sysreg = <&syscon_fsys2 0x710>;
|
||||
status = "disabled";
|
||||
};
|
||||
};
|
||||
|
@ -12,6 +12,17 @@
|
||||
|
||||
#define HAVE_FUNCTION_GRAPH_FP_TEST
|
||||
|
||||
/*
|
||||
* HAVE_FUNCTION_GRAPH_RET_ADDR_PTR means that the architecture can provide a
|
||||
* "return address pointer" which can be used to uniquely identify a return
|
||||
* address which has been overwritten.
|
||||
*
|
||||
* On arm64 we use the address of the caller's frame record, which remains the
|
||||
* same for the lifetime of the instrumented function, unlike the return
|
||||
* address in the LR.
|
||||
*/
|
||||
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
|
||||
#define ARCH_SUPPORTS_FTRACE_OPS 1
|
||||
#else
|
||||
|
@ -91,7 +91,7 @@
|
||||
#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
|
||||
|
||||
/* TCR_EL2 Registers bits */
|
||||
#define TCR_EL2_RES1 ((1 << 31) | (1 << 23))
|
||||
#define TCR_EL2_RES1 ((1U << 31) | (1 << 23))
|
||||
#define TCR_EL2_TBI (1 << 20)
|
||||
#define TCR_EL2_PS_SHIFT 16
|
||||
#define TCR_EL2_PS_MASK (7 << TCR_EL2_PS_SHIFT)
|
||||
@ -276,7 +276,7 @@
|
||||
#define CPTR_EL2_TFP_SHIFT 10
|
||||
|
||||
/* Hyp Coprocessor Trap Register */
|
||||
#define CPTR_EL2_TCPAC (1 << 31)
|
||||
#define CPTR_EL2_TCPAC (1U << 31)
|
||||
#define CPTR_EL2_TAM (1 << 30)
|
||||
#define CPTR_EL2_TTA (1 << 20)
|
||||
#define CPTR_EL2_TFP (1 << CPTR_EL2_TFP_SHIFT)
|
||||
|
@ -76,7 +76,7 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
|
||||
static inline void
|
||||
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
|
||||
{
|
||||
VM_BUG_ON(mm != &init_mm);
|
||||
VM_BUG_ON(mm && mm != &init_mm);
|
||||
__pmd_populate(pmdp, __pa(ptep), PMD_TYPE_TABLE | PMD_TABLE_UXN);
|
||||
}
|
||||
|
||||
|
@ -47,9 +47,6 @@ struct stack_info {
|
||||
* @prev_type: The type of stack this frame record was on, or a synthetic
|
||||
* value of STACK_TYPE_UNKNOWN. This is used to detect a
|
||||
* transition from one stack to another.
|
||||
*
|
||||
* @graph: When FUNCTION_GRAPH_TRACER is selected, holds the index of a
|
||||
* replacement lr value in the ftrace graph stack.
|
||||
*/
|
||||
struct stackframe {
|
||||
unsigned long fp;
|
||||
@ -57,9 +54,6 @@ struct stackframe {
|
||||
DECLARE_BITMAP(stacks_done, __NR_STACK_TYPES);
|
||||
unsigned long prev_fp;
|
||||
enum stack_type prev_type;
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
int graph;
|
||||
#endif
|
||||
#ifdef CONFIG_KRETPROBES
|
||||
struct llist_node *kr_cur;
|
||||
#endif
|
||||
|
@ -281,12 +281,22 @@ do { \
|
||||
(x) = (__force __typeof__(*(ptr)))__gu_val; \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* We must not call into the scheduler between uaccess_ttbr0_enable() and
|
||||
* uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
|
||||
* we must evaluate these outside of the critical section.
|
||||
*/
|
||||
#define __raw_get_user(x, ptr, err) \
|
||||
do { \
|
||||
__typeof__(*(ptr)) __user *__rgu_ptr = (ptr); \
|
||||
__typeof__(x) __rgu_val; \
|
||||
__chk_user_ptr(ptr); \
|
||||
\
|
||||
uaccess_ttbr0_enable(); \
|
||||
__raw_get_mem("ldtr", x, ptr, err); \
|
||||
__raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err); \
|
||||
uaccess_ttbr0_disable(); \
|
||||
\
|
||||
(x) = __rgu_val; \
|
||||
} while (0)
|
||||
|
||||
#define __get_user_error(x, ptr, err) \
|
||||
@ -310,14 +320,22 @@ do { \
|
||||
|
||||
#define get_user __get_user
|
||||
|
||||
/*
|
||||
* We must not call into the scheduler between __uaccess_enable_tco_async() and
|
||||
* __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
|
||||
* functions, we must evaluate these outside of the critical section.
|
||||
*/
|
||||
#define __get_kernel_nofault(dst, src, type, err_label) \
|
||||
do { \
|
||||
__typeof__(dst) __gkn_dst = (dst); \
|
||||
__typeof__(src) __gkn_src = (src); \
|
||||
int __gkn_err = 0; \
|
||||
\
|
||||
__uaccess_enable_tco_async(); \
|
||||
__raw_get_mem("ldr", *((type *)(dst)), \
|
||||
(__force type *)(src), __gkn_err); \
|
||||
__raw_get_mem("ldr", *((type *)(__gkn_dst)), \
|
||||
(__force type *)(__gkn_src), __gkn_err); \
|
||||
__uaccess_disable_tco_async(); \
|
||||
\
|
||||
if (unlikely(__gkn_err)) \
|
||||
goto err_label; \
|
||||
} while (0)
|
||||
@ -351,11 +369,19 @@ do { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
* We must not call into the scheduler between uaccess_ttbr0_enable() and
|
||||
* uaccess_ttbr0_disable(). As `x` and `ptr` could contain blocking functions,
|
||||
* we must evaluate these outside of the critical section.
|
||||
*/
|
||||
#define __raw_put_user(x, ptr, err) \
|
||||
do { \
|
||||
__chk_user_ptr(ptr); \
|
||||
__typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \
|
||||
__typeof__(*(ptr)) __rpu_val = (x); \
|
||||
__chk_user_ptr(__rpu_ptr); \
|
||||
\
|
||||
uaccess_ttbr0_enable(); \
|
||||
__raw_put_mem("sttr", x, ptr, err); \
|
||||
__raw_put_mem("sttr", __rpu_val, __rpu_ptr, err); \
|
||||
uaccess_ttbr0_disable(); \
|
||||
} while (0)
|
||||
|
||||
@ -380,14 +406,22 @@ do { \
|
||||
|
||||
#define put_user __put_user
|
||||
|
||||
/*
|
||||
* We must not call into the scheduler between __uaccess_enable_tco_async() and
|
||||
* __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
|
||||
* functions, we must evaluate these outside of the critical section.
|
||||
*/
|
||||
#define __put_kernel_nofault(dst, src, type, err_label) \
|
||||
do { \
|
||||
__typeof__(dst) __pkn_dst = (dst); \
|
||||
__typeof__(src) __pkn_src = (src); \
|
||||
int __pkn_err = 0; \
|
||||
\
|
||||
__uaccess_enable_tco_async(); \
|
||||
__raw_put_mem("str", *((type *)(src)), \
|
||||
(__force type *)(dst), __pkn_err); \
|
||||
__raw_put_mem("str", *((type *)(__pkn_src)), \
|
||||
(__force type *)(__pkn_dst), __pkn_err); \
|
||||
__uaccess_disable_tco_async(); \
|
||||
\
|
||||
if (unlikely(__pkn_err)) \
|
||||
goto err_label; \
|
||||
} while(0)
|
||||
|
@ -77,11 +77,17 @@
|
||||
.endm
|
||||
|
||||
SYM_CODE_START(ftrace_regs_caller)
|
||||
#ifdef BTI_C
|
||||
BTI_C
|
||||
#endif
|
||||
ftrace_regs_entry 1
|
||||
b ftrace_common
|
||||
SYM_CODE_END(ftrace_regs_caller)
|
||||
|
||||
SYM_CODE_START(ftrace_caller)
|
||||
#ifdef BTI_C
|
||||
BTI_C
|
||||
#endif
|
||||
ftrace_regs_entry 0
|
||||
b ftrace_common
|
||||
SYM_CODE_END(ftrace_caller)
|
||||
|
@ -244,8 +244,6 @@ void arch_ftrace_update_code(int command)
|
||||
* on the way back to parent. For this purpose, this function is called
|
||||
* in _mcount() or ftrace_caller() to replace return address (*parent) on
|
||||
* the call stack to return_to_handler.
|
||||
*
|
||||
* Note that @frame_pointer is used only for sanity check later.
|
||||
*/
|
||||
void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
|
||||
unsigned long frame_pointer)
|
||||
@ -263,8 +261,10 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
|
||||
*/
|
||||
old = *parent;
|
||||
|
||||
if (!function_graph_enter(old, self_addr, frame_pointer, NULL))
|
||||
if (!function_graph_enter(old, self_addr, frame_pointer,
|
||||
(void *)frame_pointer)) {
|
||||
*parent = return_hooker;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
|
@ -147,7 +147,7 @@ int machine_kexec_post_load(struct kimage *kimage)
|
||||
if (rc)
|
||||
return rc;
|
||||
kimage->arch.ttbr1 = __pa(trans_pgd);
|
||||
kimage->arch.zero_page = __pa(empty_zero_page);
|
||||
kimage->arch.zero_page = __pa_symbol(empty_zero_page);
|
||||
|
||||
reloc_size = __relocate_new_kernel_end - __relocate_new_kernel_start;
|
||||
memcpy(reloc_code, __relocate_new_kernel_start, reloc_size);
|
||||
|
@ -38,9 +38,6 @@ void start_backtrace(struct stackframe *frame, unsigned long fp,
|
||||
{
|
||||
frame->fp = fp;
|
||||
frame->pc = pc;
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
frame->graph = 0;
|
||||
#endif
|
||||
#ifdef CONFIG_KRETPROBES
|
||||
frame->kr_cur = NULL;
|
||||
#endif
|
||||
@ -116,20 +113,23 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
|
||||
frame->prev_fp = fp;
|
||||
frame->prev_type = info.type;
|
||||
|
||||
frame->pc = ptrauth_strip_insn_pac(frame->pc);
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
if (tsk->ret_stack &&
|
||||
(ptrauth_strip_insn_pac(frame->pc) == (unsigned long)return_to_handler)) {
|
||||
struct ftrace_ret_stack *ret_stack;
|
||||
(frame->pc == (unsigned long)return_to_handler)) {
|
||||
unsigned long orig_pc;
|
||||
/*
|
||||
* This is a case where function graph tracer has
|
||||
* modified a return address (LR) in a stack frame
|
||||
* to hook a function return.
|
||||
* So replace it to an original value.
|
||||
*/
|
||||
ret_stack = ftrace_graph_get_ret_stack(tsk, frame->graph++);
|
||||
if (WARN_ON_ONCE(!ret_stack))
|
||||
orig_pc = ftrace_graph_ret_addr(tsk, NULL, frame->pc,
|
||||
(void *)frame->fp);
|
||||
if (WARN_ON_ONCE(frame->pc == orig_pc))
|
||||
return -EINVAL;
|
||||
frame->pc = ret_stack->ret;
|
||||
frame->pc = orig_pc;
|
||||
}
|
||||
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
||||
#ifdef CONFIG_KRETPROBES
|
||||
@ -137,8 +137,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
|
||||
frame->pc = kretprobe_find_ret_addr(tsk, (void *)frame->fp, &frame->kr_cur);
|
||||
#endif
|
||||
|
||||
frame->pc = ptrauth_strip_insn_pac(frame->pc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(unwind_frame);
|
||||
|
@ -223,7 +223,14 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
r = 1;
|
||||
break;
|
||||
case KVM_CAP_NR_VCPUS:
|
||||
r = num_online_cpus();
|
||||
/*
|
||||
* ARM64 treats KVM_CAP_NR_CPUS differently from all other
|
||||
* architectures, as it does not always bound it to
|
||||
* KVM_CAP_MAX_VCPUS. It should not matter much because
|
||||
* this is just an advisory value.
|
||||
*/
|
||||
r = min_t(unsigned int, num_online_cpus(),
|
||||
kvm_arm_default_max_vcpus());
|
||||
break;
|
||||
case KVM_CAP_MAX_VCPUS:
|
||||
case KVM_CAP_MAX_VCPU_ID:
|
||||
|
@ -403,6 +403,8 @@ typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *);
|
||||
|
||||
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu);
|
||||
|
||||
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code);
|
||||
|
||||
/*
|
||||
* Allow the hypervisor to handle the exit with an exit handler if it has one.
|
||||
*
|
||||
@ -429,6 +431,18 @@ static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
*/
|
||||
static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
/*
|
||||
* Save PSTATE early so that we can evaluate the vcpu mode
|
||||
* early on.
|
||||
*/
|
||||
vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR);
|
||||
|
||||
/*
|
||||
* Check whether we want to repaint the state one way or
|
||||
* another.
|
||||
*/
|
||||
early_exit_filter(vcpu, exit_code);
|
||||
|
||||
if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
|
||||
vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
|
||||
|
||||
|
@ -70,7 +70,12 @@ static inline void __sysreg_save_el1_state(struct kvm_cpu_context *ctxt)
|
||||
static inline void __sysreg_save_el2_return_state(struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
ctxt->regs.pc = read_sysreg_el2(SYS_ELR);
|
||||
ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR);
|
||||
/*
|
||||
* Guest PSTATE gets saved at guest fixup time in all
|
||||
* cases. We still need to handle the nVHE host side here.
|
||||
*/
|
||||
if (!has_vhe() && ctxt->__hyp_running_vcpu)
|
||||
ctxt->regs.pstate = read_sysreg_el2(SYS_SPSR);
|
||||
|
||||
if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
|
||||
ctxt_sys_reg(ctxt, DISR_EL1) = read_sysreg_s(SYS_VDISR_EL2);
|
||||
|
@ -233,7 +233,7 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
|
||||
* Returns false if the guest ran in AArch32 when it shouldn't have, and
|
||||
* thus should exit to the host, or true if a the guest run loop can continue.
|
||||
*/
|
||||
static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
|
||||
|
||||
@ -248,10 +248,7 @@ static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
vcpu->arch.target = -1;
|
||||
*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
|
||||
*exit_code |= ARM_EXCEPTION_IL;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Switch to the guest for legacy non-VHE systems */
|
||||
@ -316,9 +313,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
/* Jump in the fire! */
|
||||
exit_code = __guest_enter(vcpu);
|
||||
|
||||
if (unlikely(!handle_aarch32_guest(vcpu, &exit_code)))
|
||||
break;
|
||||
|
||||
/* And we're baaack! */
|
||||
} while (fixup_guest_exit(vcpu, &exit_code));
|
||||
|
||||
|
@ -112,6 +112,10 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
|
||||
return hyp_exit_handlers;
|
||||
}
|
||||
|
||||
static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
}
|
||||
|
||||
/* Switch to the guest for VHE systems running in EL2 */
|
||||
static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
@ -1,26 +0,0 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Timer support for Hexagon
|
||||
*
|
||||
* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_TIMER_REGS_H
|
||||
#define _ASM_TIMER_REGS_H
|
||||
|
||||
/* This stuff should go into a platform specific file */
|
||||
#define TCX0_CLK_RATE 19200
|
||||
#define TIMER_ENABLE 0
|
||||
#define TIMER_CLR_ON_MATCH 1
|
||||
|
||||
/*
|
||||
* 8x50 HDD Specs 5-8. Simulator co-sim not fixed until
|
||||
* release 1.1, and then it's "adjustable" and probably not defaulted.
|
||||
*/
|
||||
#define RTOS_TIMER_INT 3
|
||||
#ifdef CONFIG_HEXAGON_COMET
|
||||
#define RTOS_TIMER_REGS_ADDR 0xAB000000UL
|
||||
#endif
|
||||
#define SLEEP_CLK_RATE 32000
|
||||
|
||||
#endif
|
@ -7,11 +7,10 @@
|
||||
#define _ASM_TIMEX_H
|
||||
|
||||
#include <asm-generic/timex.h>
|
||||
#include <asm/timer-regs.h>
|
||||
#include <asm/hexagon_vm.h>
|
||||
|
||||
/* Using TCX0 as our clock. CLOCK_TICK_RATE scheduled to be removed. */
|
||||
#define CLOCK_TICK_RATE TCX0_CLK_RATE
|
||||
#define CLOCK_TICK_RATE 19200
|
||||
|
||||
#define ARCH_HAS_READ_CURRENT_TIMER
|
||||
|
||||
|
1
arch/hexagon/kernel/.gitignore
vendored
Normal file
1
arch/hexagon/kernel/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
vmlinux.lds
|
@ -17,9 +17,10 @@
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/timer-regs.h>
|
||||
#include <asm/hexagon_vm.h>
|
||||
|
||||
#define TIMER_ENABLE BIT(0)
|
||||
|
||||
/*
|
||||
* For the clocksource we need:
|
||||
* pcycle frequency (600MHz)
|
||||
@ -33,6 +34,13 @@ cycles_t pcycle_freq_mhz;
|
||||
cycles_t thread_freq_mhz;
|
||||
cycles_t sleep_clk_freq;
|
||||
|
||||
/*
|
||||
* 8x50 HDD Specs 5-8. Simulator co-sim not fixed until
|
||||
* release 1.1, and then it's "adjustable" and probably not defaulted.
|
||||
*/
|
||||
#define RTOS_TIMER_INT 3
|
||||
#define RTOS_TIMER_REGS_ADDR 0xAB000000UL
|
||||
|
||||
static struct resource rtos_timer_resources[] = {
|
||||
{
|
||||
.start = RTOS_TIMER_REGS_ADDR,
|
||||
@ -80,7 +88,7 @@ static int set_next_event(unsigned long delta, struct clock_event_device *evt)
|
||||
iowrite32(0, &rtos_timer->clear);
|
||||
|
||||
iowrite32(delta, &rtos_timer->match);
|
||||
iowrite32(1 << TIMER_ENABLE, &rtos_timer->enable);
|
||||
iowrite32(TIMER_ENABLE, &rtos_timer->enable);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,7 @@ void __raw_readsw(const void __iomem *addr, void *data, int len)
|
||||
*dst++ = *src;
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(__raw_readsw);
|
||||
|
||||
/*
|
||||
* __raw_writesw - read words a short at a time
|
||||
@ -47,6 +48,7 @@ void __raw_writesw(void __iomem *addr, const void *data, int len)
|
||||
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(__raw_writesw);
|
||||
|
||||
/* Pretty sure len is pre-adjusted for the length of the access already */
|
||||
void __raw_readsl(const void __iomem *addr, void *data, int len)
|
||||
@ -62,6 +64,7 @@ void __raw_readsl(const void __iomem *addr, void *data, int len)
|
||||
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(__raw_readsl);
|
||||
|
||||
void __raw_writesl(void __iomem *addr, const void *data, int len)
|
||||
{
|
||||
@ -76,3 +79,4 @@ void __raw_writesl(void __iomem *addr, const void *data, int len)
|
||||
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL(__raw_writesl);
|
||||
|
@ -369,3 +369,4 @@
|
||||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
@ -250,7 +250,6 @@ static inline void __flush_page_to_ram(void *vaddr)
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
#define flush_dcache_page(page) __flush_page_to_ram(page_address(page))
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
#define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
|
||||
|
@ -448,3 +448,4 @@
|
||||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
@ -1145,7 +1145,7 @@ asmlinkage void set_esp0(unsigned long ssp)
|
||||
*/
|
||||
asmlinkage void fpsp040_die(void)
|
||||
{
|
||||
force_fatal_sig(SIGSEGV);
|
||||
force_exit_sig(SIGSEGV);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_M68KFPU_EMU
|
||||
|
@ -454,3 +454,4 @@
|
||||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
@ -3097,7 +3097,7 @@ config STACKTRACE_SUPPORT
|
||||
config PGTABLE_LEVELS
|
||||
int
|
||||
default 4 if PAGE_SIZE_4KB && MIPS_VA_BITS_48
|
||||
default 3 if 64BIT && !PAGE_SIZE_64KB
|
||||
default 3 if 64BIT && (!PAGE_SIZE_64KB || MIPS_VA_BITS_48)
|
||||
default 2
|
||||
|
||||
config MIPS_AUTO_PFN_OFFSET
|
||||
|
@ -381,6 +381,12 @@ void clk_disable(struct clk *clk)
|
||||
|
||||
EXPORT_SYMBOL(clk_disable);
|
||||
|
||||
struct clk *clk_get_parent(struct clk *clk)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(clk_get_parent);
|
||||
|
||||
unsigned long clk_get_rate(struct clk *clk)
|
||||
{
|
||||
if (!clk)
|
||||
|
@ -52,7 +52,7 @@ endif
|
||||
|
||||
vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o
|
||||
|
||||
vmlinuzobjs-$(CONFIG_KERNEL_ZSTD) += $(obj)/bswapdi.o
|
||||
vmlinuzobjs-$(CONFIG_KERNEL_ZSTD) += $(obj)/bswapdi.o $(obj)/ashldi3.o
|
||||
|
||||
targets := $(notdir $(vmlinuzobjs-y))
|
||||
|
||||
|
@ -75,7 +75,7 @@ static unsigned int __init gen_fdt_mem_array(
|
||||
__init int yamon_dt_append_memory(void *fdt,
|
||||
const struct yamon_mem_region *regions)
|
||||
{
|
||||
unsigned long phys_memsize, memsize;
|
||||
unsigned long phys_memsize = 0, memsize;
|
||||
__be32 mem_array[2 * MAX_MEM_ARRAY_ENTRIES];
|
||||
unsigned int mem_entries;
|
||||
int i, err, mem_off;
|
||||
|
@ -61,8 +61,6 @@ static inline void flush_dcache_page(struct page *page)
|
||||
SetPageDcacheDirty(page);
|
||||
}
|
||||
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||
|
||||
|
@ -1734,8 +1734,6 @@ static inline void decode_cpucfg(struct cpuinfo_mips *c)
|
||||
|
||||
static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
|
||||
{
|
||||
decode_configs(c);
|
||||
|
||||
/* All Loongson processors covered here define ExcCode 16 as GSExc. */
|
||||
c->options |= MIPS_CPU_GSEXCEX;
|
||||
|
||||
@ -1796,6 +1794,8 @@ static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
|
||||
panic("Unknown Loongson Processor ID!");
|
||||
break;
|
||||
}
|
||||
|
||||
decode_configs(c);
|
||||
}
|
||||
#else
|
||||
static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { }
|
||||
|
@ -185,7 +185,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
seq_puts(m, " tx39_cache");
|
||||
if (cpu_has_octeon_cache)
|
||||
seq_puts(m, " octeon_cache");
|
||||
if (cpu_has_fpu)
|
||||
if (raw_cpu_has_fpu)
|
||||
seq_puts(m, " fpu");
|
||||
if (cpu_has_32fpr)
|
||||
seq_puts(m, " 32fpr");
|
||||
|
@ -387,3 +387,4 @@
|
||||
446 n32 landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 n32 process_mrelease sys_process_mrelease
|
||||
449 n32 futex_waitv sys_futex_waitv
|
||||
|
@ -363,3 +363,4 @@
|
||||
446 n64 landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 n64 process_mrelease sys_process_mrelease
|
||||
449 n64 futex_waitv sys_futex_waitv
|
||||
|
@ -436,3 +436,4 @@
|
||||
446 o32 landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 o32 process_mrelease sys_process_mrelease
|
||||
449 o32 futex_waitv sys_futex_waitv
|
||||
|
@ -1067,7 +1067,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
r = 1;
|
||||
break;
|
||||
case KVM_CAP_NR_VCPUS:
|
||||
r = num_online_cpus();
|
||||
r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
|
||||
break;
|
||||
case KVM_CAP_MAX_VCPUS:
|
||||
r = KVM_MAX_VCPUS;
|
||||
|
@ -158,6 +158,12 @@ void clk_deactivate(struct clk *clk)
|
||||
}
|
||||
EXPORT_SYMBOL(clk_deactivate);
|
||||
|
||||
struct clk *clk_get_parent(struct clk *clk)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(clk_get_parent);
|
||||
|
||||
static inline u32 get_counter_resolution(void)
|
||||
{
|
||||
u32 res;
|
||||
|
@ -27,7 +27,6 @@ void flush_cache_vunmap(unsigned long start, unsigned long end);
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
void flush_dcache_page(struct page *page);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long vaddr, void *dst, void *src, int len);
|
||||
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
|
@ -29,7 +29,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
|
||||
unsigned long pfn);
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
void flush_dcache_page(struct page *page);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
extern void flush_icache_range(unsigned long start, unsigned long end);
|
||||
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
|
||||
|
@ -15,7 +15,12 @@
|
||||
# Mike Shaver, Helge Deller and Martin K. Petersen
|
||||
#
|
||||
|
||||
ifdef CONFIG_PARISC_SELF_EXTRACT
|
||||
boot := arch/parisc/boot
|
||||
KBUILD_IMAGE := $(boot)/bzImage
|
||||
else
|
||||
KBUILD_IMAGE := vmlinuz
|
||||
endif
|
||||
|
||||
NM = sh $(srctree)/arch/parisc/nm
|
||||
CHECKFLAGS += -D__hppa__=1
|
||||
|
@ -231,6 +231,7 @@ CONFIG_CRYPTO_DEFLATE=y
|
||||
CONFIG_CRC_CCITT=m
|
||||
CONFIG_CRC_T10DIF=y
|
||||
CONFIG_FONTS=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_DEBUG_FS=y
|
||||
CONFIG_DEBUG_MEMORY_INIT=y
|
||||
|
@ -1,7 +1,9 @@
|
||||
CONFIG_LOCALVERSION="-64bit"
|
||||
# CONFIG_LOCALVERSION_AUTO is not set
|
||||
CONFIG_KERNEL_LZ4=y
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_POSIX_MQUEUE=y
|
||||
CONFIG_AUDIT=y
|
||||
CONFIG_BSD_PROCESS_ACCT=y
|
||||
CONFIG_BSD_PROCESS_ACCT_V3=y
|
||||
CONFIG_TASKSTATS=y
|
||||
@ -35,6 +37,7 @@ CONFIG_MODVERSIONS=y
|
||||
CONFIG_BLK_DEV_INTEGRITY=y
|
||||
CONFIG_BINFMT_MISC=m
|
||||
# CONFIG_COMPACTION is not set
|
||||
CONFIG_MEMORY_FAILURE=y
|
||||
CONFIG_NET=y
|
||||
CONFIG_PACKET=y
|
||||
CONFIG_UNIX=y
|
||||
@ -65,12 +68,15 @@ CONFIG_SCSI_ISCSI_ATTRS=y
|
||||
CONFIG_SCSI_SRP_ATTRS=y
|
||||
CONFIG_ISCSI_BOOT_SYSFS=y
|
||||
CONFIG_SCSI_MPT2SAS=y
|
||||
CONFIG_SCSI_LASI700=m
|
||||
CONFIG_SCSI_LASI700=y
|
||||
CONFIG_SCSI_SYM53C8XX_2=y
|
||||
CONFIG_SCSI_ZALON=y
|
||||
CONFIG_SCSI_QLA_ISCSI=m
|
||||
CONFIG_SCSI_DH=y
|
||||
CONFIG_ATA=y
|
||||
CONFIG_SATA_SIL=y
|
||||
CONFIG_SATA_SIS=y
|
||||
CONFIG_SATA_VIA=y
|
||||
CONFIG_PATA_NS87415=y
|
||||
CONFIG_PATA_SIL680=y
|
||||
CONFIG_ATA_GENERIC=y
|
||||
@ -79,6 +85,7 @@ CONFIG_MD_LINEAR=m
|
||||
CONFIG_BLK_DEV_DM=m
|
||||
CONFIG_DM_RAID=m
|
||||
CONFIG_DM_UEVENT=y
|
||||
CONFIG_DM_AUDIT=y
|
||||
CONFIG_FUSION=y
|
||||
CONFIG_FUSION_SPI=y
|
||||
CONFIG_FUSION_SAS=y
|
||||
@ -196,10 +203,15 @@ CONFIG_FB_MATROX_G=y
|
||||
CONFIG_FB_MATROX_I2C=y
|
||||
CONFIG_FB_MATROX_MAVEN=y
|
||||
CONFIG_FB_RADEON=y
|
||||
CONFIG_LOGO=y
|
||||
# CONFIG_LOGO_LINUX_CLUT224 is not set
|
||||
CONFIG_HIDRAW=y
|
||||
CONFIG_HID_PID=y
|
||||
CONFIG_USB_HIDDEV=y
|
||||
CONFIG_USB=y
|
||||
CONFIG_USB_EHCI_HCD=y
|
||||
CONFIG_USB_OHCI_HCD=y
|
||||
CONFIG_USB_OHCI_HCD_PLATFORM=y
|
||||
CONFIG_UIO=y
|
||||
CONFIG_UIO_PDRV_GENIRQ=m
|
||||
CONFIG_UIO_AEC=m
|
||||
|
@ -3,38 +3,19 @@
|
||||
* Copyright (C) 1999 Hewlett-Packard (Frank Rowand)
|
||||
* Copyright (C) 1999 Philipp Rumpf <prumpf@tux.org>
|
||||
* Copyright (C) 1999 SuSE GmbH
|
||||
* Copyright (C) 2021 Helge Deller <deller@gmx.de>
|
||||
*/
|
||||
|
||||
#ifndef _PARISC_ASSEMBLY_H
|
||||
#define _PARISC_ASSEMBLY_H
|
||||
|
||||
#define CALLEE_FLOAT_FRAME_SIZE 80
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define LDREG ldd
|
||||
#define STREG std
|
||||
#define LDREGX ldd,s
|
||||
#define LDREGM ldd,mb
|
||||
#define STREGM std,ma
|
||||
#define SHRREG shrd
|
||||
#define SHLREG shld
|
||||
#define ANDCM andcm,*
|
||||
#define COND(x) * ## x
|
||||
#define RP_OFFSET 16
|
||||
#define FRAME_SIZE 128
|
||||
#define CALLEE_REG_FRAME_SIZE 144
|
||||
#define REG_SZ 8
|
||||
#define ASM_ULONG_INSN .dword
|
||||
#else /* CONFIG_64BIT */
|
||||
#define LDREG ldw
|
||||
#define STREG stw
|
||||
#define LDREGX ldwx,s
|
||||
#define LDREGM ldwm
|
||||
#define STREGM stwm
|
||||
#define SHRREG shr
|
||||
#define SHLREG shlw
|
||||
#define ANDCM andcm
|
||||
#define COND(x) x
|
||||
#define RP_OFFSET 20
|
||||
#define FRAME_SIZE 64
|
||||
#define CALLEE_REG_FRAME_SIZE 128
|
||||
@ -45,6 +26,7 @@
|
||||
/* Frame alignment for 32- and 64-bit */
|
||||
#define FRAME_ALIGN 64
|
||||
|
||||
#define CALLEE_FLOAT_FRAME_SIZE 80
|
||||
#define CALLEE_SAVE_FRAME_SIZE (CALLEE_REG_FRAME_SIZE + CALLEE_FLOAT_FRAME_SIZE)
|
||||
|
||||
#ifdef CONFIG_PA20
|
||||
@ -67,6 +49,28 @@
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define LDREG ldd
|
||||
#define STREG std
|
||||
#define LDREGX ldd,s
|
||||
#define LDREGM ldd,mb
|
||||
#define STREGM std,ma
|
||||
#define SHRREG shrd
|
||||
#define SHLREG shld
|
||||
#define ANDCM andcm,*
|
||||
#define COND(x) * ## x
|
||||
#else /* CONFIG_64BIT */
|
||||
#define LDREG ldw
|
||||
#define STREG stw
|
||||
#define LDREGX ldwx,s
|
||||
#define LDREGM ldwm
|
||||
#define STREGM stwm
|
||||
#define SHRREG shr
|
||||
#define SHLREG shlw
|
||||
#define ANDCM andcm
|
||||
#define COND(x) x
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
/* the 64-bit pa gnu assembler unfortunately defaults to .level 1.1 or 2.0 so
|
||||
* work around that for now... */
|
||||
@ -143,6 +147,17 @@
|
||||
extrd,u \r, 63-(\sa), 64-(\sa), \t
|
||||
.endm
|
||||
|
||||
/* Extract unsigned for 32- and 64-bit
|
||||
* The extru instruction leaves the most significant 32 bits of the
|
||||
* target register in an undefined state on PA 2.0 systems. */
|
||||
.macro extru_safe r, p, len, t
|
||||
#ifdef CONFIG_64BIT
|
||||
extrd,u \r, 32+(\p), \len, \t
|
||||
#else
|
||||
extru \r, \p, \len, \t
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/* load 32-bit 'value' into 'reg' compensating for the ldil
|
||||
* sign-extension when running in wide mode.
|
||||
* WARNING!! neither 'value' nor 'reg' can be expressions
|
||||
|
@ -50,7 +50,6 @@ void invalidate_kernel_vmap_range(void *vaddr, int size);
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
void flush_dcache_page(struct page *page);
|
||||
void flush_dcache_folio(struct folio *folio);
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
|
||||
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
|
||||
|
@ -5,6 +5,7 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/stringify.h>
|
||||
#include <asm/assembly.h>
|
||||
|
||||
#define JUMP_LABEL_NOP_SIZE 4
|
||||
|
@ -2,7 +2,7 @@
|
||||
#ifndef _ASM_PARISC_RT_SIGFRAME_H
|
||||
#define _ASM_PARISC_RT_SIGFRAME_H
|
||||
|
||||
#define SIGRETURN_TRAMP 3
|
||||
#define SIGRETURN_TRAMP 4
|
||||
#define SIGRESTARTBLOCK_TRAMP 5
|
||||
#define TRAMP_SIZE (SIGRETURN_TRAMP + SIGRESTARTBLOCK_TRAMP)
|
||||
|
||||
|
@ -39,6 +39,7 @@ verify "$3"
|
||||
if [ -n "${INSTALLKERNEL}" ]; then
|
||||
if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
|
||||
if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
|
||||
if [ -x /usr/sbin/${INSTALLKERNEL} ]; then exec /usr/sbin/${INSTALLKERNEL} "$@"; fi
|
||||
fi
|
||||
|
||||
# Default install
|
||||
|
@ -366,17 +366,9 @@
|
||||
*/
|
||||
.macro L2_ptep pmd,pte,index,va,fault
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
extru \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
|
||||
extru_safe \va,31-ASM_PMD_SHIFT,ASM_BITS_PER_PMD,\index
|
||||
#else
|
||||
# if defined(CONFIG_64BIT)
|
||||
extrd,u \va,63-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
|
||||
#else
|
||||
# if PAGE_SIZE > 4096
|
||||
extru \va,31-ASM_PGDIR_SHIFT,32-ASM_PGDIR_SHIFT,\index
|
||||
# else
|
||||
extru \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
|
||||
# endif
|
||||
# endif
|
||||
extru_safe \va,31-ASM_PGDIR_SHIFT,ASM_BITS_PER_PGD,\index
|
||||
#endif
|
||||
dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
|
||||
#if CONFIG_PGTABLE_LEVELS < 3
|
||||
@ -386,7 +378,7 @@
|
||||
bb,>=,n \pmd,_PxD_PRESENT_BIT,\fault
|
||||
dep %r0,31,PxD_FLAG_SHIFT,\pmd /* clear flags */
|
||||
SHLREG \pmd,PxD_VALUE_SHIFT,\pmd
|
||||
extru \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
|
||||
extru_safe \va,31-PAGE_SHIFT,ASM_BITS_PER_PTE,\index
|
||||
dep %r0,31,PAGE_SHIFT,\pmd /* clear offset */
|
||||
shladd \index,BITS_PER_PTE_ENTRY,\pmd,\pmd /* pmd is now pte */
|
||||
.endm
|
||||
|
@ -288,21 +288,22 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs,
|
||||
already in userspace. The first words of tramp are used to
|
||||
save the previous sigrestartblock trampoline that might be
|
||||
on the stack. We start the sigreturn trampoline at
|
||||
SIGRESTARTBLOCK_TRAMP. */
|
||||
SIGRESTARTBLOCK_TRAMP+X. */
|
||||
err |= __put_user(in_syscall ? INSN_LDI_R25_1 : INSN_LDI_R25_0,
|
||||
&frame->tramp[SIGRESTARTBLOCK_TRAMP+0]);
|
||||
err |= __put_user(INSN_BLE_SR2_R0,
|
||||
err |= __put_user(INSN_LDI_R20,
|
||||
&frame->tramp[SIGRESTARTBLOCK_TRAMP+1]);
|
||||
err |= __put_user(INSN_LDI_R20,
|
||||
err |= __put_user(INSN_BLE_SR2_R0,
|
||||
&frame->tramp[SIGRESTARTBLOCK_TRAMP+2]);
|
||||
err |= __put_user(INSN_NOP, &frame->tramp[SIGRESTARTBLOCK_TRAMP+3]);
|
||||
|
||||
start = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP+0];
|
||||
end = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP+3];
|
||||
start = (unsigned long) &frame->tramp[0];
|
||||
end = (unsigned long) &frame->tramp[TRAMP_SIZE];
|
||||
flush_user_dcache_range_asm(start, end);
|
||||
flush_user_icache_range_asm(start, end);
|
||||
|
||||
/* TRAMP Words 0-4, Length 5 = SIGRESTARTBLOCK_TRAMP
|
||||
* TRAMP Words 5-7, Length 3 = SIGRETURN_TRAMP
|
||||
* TRAMP Words 5-9, Length 4 = SIGRETURN_TRAMP
|
||||
* So the SIGRETURN_TRAMP is at the end of SIGRESTARTBLOCK_TRAMP
|
||||
*/
|
||||
rp = (unsigned long) &frame->tramp[SIGRESTARTBLOCK_TRAMP];
|
||||
|
@ -36,7 +36,7 @@ struct compat_regfile {
|
||||
compat_int_t rf_sar;
|
||||
};
|
||||
|
||||
#define COMPAT_SIGRETURN_TRAMP 3
|
||||
#define COMPAT_SIGRETURN_TRAMP 4
|
||||
#define COMPAT_SIGRESTARTBLOCK_TRAMP 5
|
||||
#define COMPAT_TRAMP_SIZE (COMPAT_SIGRETURN_TRAMP + \
|
||||
COMPAT_SIGRESTARTBLOCK_TRAMP)
|
||||
|
@ -566,7 +566,7 @@ lws_compare_and_swap:
|
||||
ldo R%lws_lock_start(%r20), %r28
|
||||
|
||||
/* Extract eight bits from r26 and hash lock (Bits 3-11) */
|
||||
extru %r26, 28, 8, %r20
|
||||
extru_safe %r26, 28, 8, %r20
|
||||
|
||||
/* Find lock to use, the hash is either one of 0 to
|
||||
15, multiplied by 16 (keep it 16-byte aligned)
|
||||
@ -751,7 +751,7 @@ cas2_lock_start:
|
||||
ldo R%lws_lock_start(%r20), %r28
|
||||
|
||||
/* Extract eight bits from r26 and hash lock (Bits 3-11) */
|
||||
extru %r26, 28, 8, %r20
|
||||
extru_safe %r26, 28, 8, %r20
|
||||
|
||||
/* Find lock to use, the hash is either one of 0 to
|
||||
15, multiplied by 16 (keep it 16-byte aligned)
|
||||
|
@ -446,3 +446,4 @@
|
||||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
@ -249,30 +249,16 @@ void __init time_init(void)
|
||||
static int __init init_cr16_clocksource(void)
|
||||
{
|
||||
/*
|
||||
* The cr16 interval timers are not syncronized across CPUs on
|
||||
* different sockets, so mark them unstable and lower rating on
|
||||
* multi-socket SMP systems.
|
||||
* The cr16 interval timers are not syncronized across CPUs, even if
|
||||
* they share the same socket.
|
||||
*/
|
||||
if (num_online_cpus() > 1 && !running_on_qemu) {
|
||||
int cpu;
|
||||
unsigned long cpu0_loc;
|
||||
cpu0_loc = per_cpu(cpu_data, 0).cpu_loc;
|
||||
/* mark sched_clock unstable */
|
||||
clear_sched_clock_stable();
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
if (cpu == 0)
|
||||
continue;
|
||||
if ((cpu0_loc != 0) &&
|
||||
(cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc))
|
||||
continue;
|
||||
|
||||
/* mark sched_clock unstable */
|
||||
clear_sched_clock_stable();
|
||||
|
||||
clocksource_cr16.name = "cr16_unstable";
|
||||
clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
|
||||
clocksource_cr16.rating = 0;
|
||||
break;
|
||||
}
|
||||
clocksource_cr16.name = "cr16_unstable";
|
||||
clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
|
||||
clocksource_cr16.rating = 0;
|
||||
}
|
||||
|
||||
/* register at clocksource framework */
|
||||
|
@ -57,8 +57,6 @@ SECTIONS
|
||||
{
|
||||
. = KERNEL_BINARY_TEXT_START;
|
||||
|
||||
_stext = .; /* start of kernel text, includes init code & data */
|
||||
|
||||
__init_begin = .;
|
||||
HEAD_TEXT_SECTION
|
||||
MLONGCALL_DISCARD(INIT_TEXT_SECTION(8))
|
||||
@ -82,6 +80,7 @@ SECTIONS
|
||||
/* freed after init ends here */
|
||||
|
||||
_text = .; /* Text and read-only data */
|
||||
_stext = .;
|
||||
MLONGCALL_KEEP(INIT_TEXT_SECTION(8))
|
||||
.text ALIGN(PAGE_SIZE) : {
|
||||
TEXT_TEXT
|
||||
|
@ -196,3 +196,6 @@ clean-files := vmlinux.lds
|
||||
# Force dependency (incbin is bad)
|
||||
$(obj)/vdso32_wrapper.o : $(obj)/vdso32/vdso32.so.dbg
|
||||
$(obj)/vdso64_wrapper.o : $(obj)/vdso64/vdso64.so.dbg
|
||||
|
||||
# for cleaning
|
||||
subdir- += vdso32 vdso64
|
||||
|
@ -202,11 +202,11 @@ vmap_stack_overflow:
|
||||
mfspr r1, SPRN_SPRG_THREAD
|
||||
lwz r1, TASK_CPU - THREAD(r1)
|
||||
slwi r1, r1, 3
|
||||
addis r1, r1, emergency_ctx@ha
|
||||
addis r1, r1, emergency_ctx-PAGE_OFFSET@ha
|
||||
#else
|
||||
lis r1, emergency_ctx@ha
|
||||
lis r1, emergency_ctx-PAGE_OFFSET@ha
|
||||
#endif
|
||||
lwz r1, emergency_ctx@l(r1)
|
||||
lwz r1, emergency_ctx-PAGE_OFFSET@l(r1)
|
||||
addi r1, r1, THREAD_SIZE - INT_FRAME_SIZE
|
||||
EXCEPTION_PROLOG_2 0 vmap_stack_overflow
|
||||
prepare_transfer_to_handler
|
||||
|
@ -733,6 +733,7 @@ _GLOBAL(mmu_pin_tlb)
|
||||
#ifdef CONFIG_PIN_TLB_DATA
|
||||
LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET)
|
||||
LOAD_REG_IMMEDIATE(r7, MI_SVALID | MI_PS8MEG | _PMD_ACCESSED)
|
||||
li r8, 0
|
||||
#ifdef CONFIG_PIN_TLB_IMMR
|
||||
li r0, 3
|
||||
#else
|
||||
@ -741,26 +742,26 @@ _GLOBAL(mmu_pin_tlb)
|
||||
mtctr r0
|
||||
cmpwi r4, 0
|
||||
beq 4f
|
||||
LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
|
||||
LOAD_REG_ADDR(r9, _sinittext)
|
||||
|
||||
2: ori r0, r6, MD_EVALID
|
||||
ori r12, r8, 0xf0 | _PAGE_RO | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT
|
||||
mtspr SPRN_MD_CTR, r5
|
||||
mtspr SPRN_MD_EPN, r0
|
||||
mtspr SPRN_MD_TWC, r7
|
||||
mtspr SPRN_MD_RPN, r8
|
||||
mtspr SPRN_MD_RPN, r12
|
||||
addi r5, r5, 0x100
|
||||
addis r6, r6, SZ_8M@h
|
||||
addis r8, r8, SZ_8M@h
|
||||
cmplw r6, r9
|
||||
bdnzt lt, 2b
|
||||
|
||||
4: LOAD_REG_IMMEDIATE(r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT)
|
||||
4:
|
||||
2: ori r0, r6, MD_EVALID
|
||||
ori r12, r8, 0xf0 | _PAGE_DIRTY | _PAGE_SPS | _PAGE_SH | _PAGE_PRESENT
|
||||
mtspr SPRN_MD_CTR, r5
|
||||
mtspr SPRN_MD_EPN, r0
|
||||
mtspr SPRN_MD_TWC, r7
|
||||
mtspr SPRN_MD_RPN, r8
|
||||
mtspr SPRN_MD_RPN, r12
|
||||
addi r5, r5, 0x100
|
||||
addis r6, r6, SZ_8M@h
|
||||
addis r8, r8, SZ_8M@h
|
||||
@ -781,7 +782,7 @@ _GLOBAL(mmu_pin_tlb)
|
||||
#endif
|
||||
#if defined(CONFIG_PIN_TLB_IMMR) || defined(CONFIG_PIN_TLB_DATA)
|
||||
lis r0, (MD_RSV4I | MD_TWAM)@h
|
||||
mtspr SPRN_MI_CTR, r0
|
||||
mtspr SPRN_MD_CTR, r0
|
||||
#endif
|
||||
mtspr SPRN_SRR1, r10
|
||||
mtspr SPRN_SRR0, r11
|
||||
|
@ -25,8 +25,14 @@ static inline int __get_user_sigset(sigset_t *dst, const sigset_t __user *src)
|
||||
|
||||
return __get_user(dst->sig[0], (u64 __user *)&src->sig[0]);
|
||||
}
|
||||
#define unsafe_get_user_sigset(dst, src, label) \
|
||||
unsafe_get_user((dst)->sig[0], (u64 __user *)&(src)->sig[0], label)
|
||||
#define unsafe_get_user_sigset(dst, src, label) do { \
|
||||
sigset_t *__dst = dst; \
|
||||
const sigset_t __user *__src = src; \
|
||||
int i; \
|
||||
\
|
||||
for (i = 0; i < _NSIG_WORDS; i++) \
|
||||
unsafe_get_user(__dst->sig[i], &__src->sig[i], label); \
|
||||
} while (0)
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
extern unsigned long copy_vsx_to_user(void __user *to,
|
||||
|
@ -1063,7 +1063,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
|
||||
* We kill the task with a SIGSEGV in this situation.
|
||||
*/
|
||||
if (do_setcontext(new_ctx, regs, 0)) {
|
||||
force_fatal_sig(SIGSEGV);
|
||||
force_exit_sig(SIGSEGV);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
|
@ -704,7 +704,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
|
||||
*/
|
||||
|
||||
if (__get_user_sigset(&set, &new_ctx->uc_sigmask)) {
|
||||
force_fatal_sig(SIGSEGV);
|
||||
force_exit_sig(SIGSEGV);
|
||||
return -EFAULT;
|
||||
}
|
||||
set_current_blocked(&set);
|
||||
@ -713,7 +713,7 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
|
||||
return -EFAULT;
|
||||
if (__unsafe_restore_sigcontext(current, NULL, 0, &new_ctx->uc_mcontext)) {
|
||||
user_read_access_end();
|
||||
force_fatal_sig(SIGSEGV);
|
||||
force_exit_sig(SIGSEGV);
|
||||
return -EFAULT;
|
||||
}
|
||||
user_read_access_end();
|
||||
|
@ -528,3 +528,4 @@
|
||||
446 common landlock_restrict_self sys_landlock_restrict_self
|
||||
# 447 reserved for memfd_secret
|
||||
448 common process_mrelease sys_process_mrelease
|
||||
449 common futex_waitv sys_futex_waitv
|
||||
|
@ -187,6 +187,12 @@ static void watchdog_smp_panic(int cpu, u64 tb)
|
||||
if (sysctl_hardlockup_all_cpu_backtrace)
|
||||
trigger_allbutself_cpu_backtrace();
|
||||
|
||||
/*
|
||||
* Force flush any remote buffers that might be stuck in IRQ context
|
||||
* and therefore could not run their irq_work.
|
||||
*/
|
||||
printk_trigger_flush();
|
||||
|
||||
if (hardlockup_panic)
|
||||
nmi_panic(NULL, "Hard LOCKUP");
|
||||
|
||||
|
@ -695,6 +695,7 @@ static void flush_guest_tlb(struct kvm *kvm)
|
||||
"r" (0) : "memory");
|
||||
}
|
||||
asm volatile("ptesync": : :"memory");
|
||||
// POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
|
||||
asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
|
||||
} else {
|
||||
for (set = 0; set < kvm->arch.tlb_sets; ++set) {
|
||||
@ -705,7 +706,9 @@ static void flush_guest_tlb(struct kvm *kvm)
|
||||
rb += PPC_BIT(51); /* increment set number */
|
||||
}
|
||||
asm volatile("ptesync": : :"memory");
|
||||
asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
|
||||
// POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
||||
asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2005,7 +2005,7 @@ hcall_real_table:
|
||||
.globl hcall_real_table_end
|
||||
hcall_real_table_end:
|
||||
|
||||
_GLOBAL(kvmppc_h_set_xdabr)
|
||||
_GLOBAL_TOC(kvmppc_h_set_xdabr)
|
||||
EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr)
|
||||
andi. r0, r5, DABRX_USER | DABRX_KERNEL
|
||||
beq 6f
|
||||
@ -2015,7 +2015,7 @@ EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr)
|
||||
6: li r3, H_PARAMETER
|
||||
blr
|
||||
|
||||
_GLOBAL(kvmppc_h_set_dabr)
|
||||
_GLOBAL_TOC(kvmppc_h_set_dabr)
|
||||
EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr)
|
||||
li r5, DABRX_USER | DABRX_KERNEL
|
||||
3:
|
||||
|
@ -641,9 +641,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
* implementations just count online CPUs.
|
||||
*/
|
||||
if (hv_enabled)
|
||||
r = num_present_cpus();
|
||||
r = min_t(unsigned int, num_present_cpus(), KVM_MAX_VCPUS);
|
||||
else
|
||||
r = num_online_cpus();
|
||||
r = min_t(unsigned int, num_online_cpus(), KVM_MAX_VCPUS);
|
||||
break;
|
||||
case KVM_CAP_MAX_VCPUS:
|
||||
r = KVM_MAX_VCPUS;
|
||||
|
@ -314,7 +314,7 @@ static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size
|
||||
pr_warn("KASLR: No safe seed for randomizing the kernel base.\n");
|
||||
|
||||
ram = min_t(phys_addr_t, __max_low_memory, size);
|
||||
ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, false);
|
||||
ram = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM, true, true);
|
||||
linear_sz = min_t(unsigned long, ram, SZ_512M);
|
||||
|
||||
/* If the linear size is smaller than 64M, do not randmize */
|
||||
|
@ -645,7 +645,7 @@ static void early_init_this_mmu(void)
|
||||
|
||||
if (map)
|
||||
linear_map_top = map_mem_in_cams(linear_map_top,
|
||||
num_cams, true, true);
|
||||
num_cams, false, true);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -766,7 +766,7 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
|
||||
|
||||
linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
|
||||
false, true);
|
||||
true, true);
|
||||
|
||||
ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
|
||||
} else
|
||||
|
@ -376,9 +376,9 @@ static void initialize_form2_numa_distance_lookup_table(void)
|
||||
{
|
||||
int i, j;
|
||||
struct device_node *root;
|
||||
const __u8 *numa_dist_table;
|
||||
const __u8 *form2_distances;
|
||||
const __be32 *numa_lookup_index;
|
||||
int numa_dist_table_length;
|
||||
int form2_distances_length;
|
||||
int max_numa_index, distance_index;
|
||||
|
||||
if (firmware_has_feature(FW_FEATURE_OPAL))
|
||||
@ -392,45 +392,41 @@ static void initialize_form2_numa_distance_lookup_table(void)
|
||||
max_numa_index = of_read_number(&numa_lookup_index[0], 1);
|
||||
|
||||
/* first element of the array is the size and is encode-int */
|
||||
numa_dist_table = of_get_property(root, "ibm,numa-distance-table", NULL);
|
||||
numa_dist_table_length = of_read_number((const __be32 *)&numa_dist_table[0], 1);
|
||||
form2_distances = of_get_property(root, "ibm,numa-distance-table", NULL);
|
||||
form2_distances_length = of_read_number((const __be32 *)&form2_distances[0], 1);
|
||||
/* Skip the size which is encoded int */
|
||||
numa_dist_table += sizeof(__be32);
|
||||
form2_distances += sizeof(__be32);
|
||||
|
||||
pr_debug("numa_dist_table_len = %d, numa_dist_indexes_len = %d\n",
|
||||
numa_dist_table_length, max_numa_index);
|
||||
pr_debug("form2_distances_len = %d, numa_dist_indexes_len = %d\n",
|
||||
form2_distances_length, max_numa_index);
|
||||
|
||||
for (i = 0; i < max_numa_index; i++)
|
||||
/* +1 skip the max_numa_index in the property */
|
||||
numa_id_index_table[i] = of_read_number(&numa_lookup_index[i + 1], 1);
|
||||
|
||||
|
||||
if (numa_dist_table_length != max_numa_index * max_numa_index) {
|
||||
if (form2_distances_length != max_numa_index * max_numa_index) {
|
||||
WARN(1, "Wrong NUMA distance information\n");
|
||||
/* consider everybody else just remote. */
|
||||
for (i = 0; i < max_numa_index; i++) {
|
||||
for (j = 0; j < max_numa_index; j++) {
|
||||
int nodeA = numa_id_index_table[i];
|
||||
int nodeB = numa_id_index_table[j];
|
||||
|
||||
if (nodeA == nodeB)
|
||||
numa_distance_table[nodeA][nodeB] = LOCAL_DISTANCE;
|
||||
else
|
||||
numa_distance_table[nodeA][nodeB] = REMOTE_DISTANCE;
|
||||
}
|
||||
}
|
||||
form2_distances = NULL; // don't use it
|
||||
}
|
||||
|
||||
distance_index = 0;
|
||||
for (i = 0; i < max_numa_index; i++) {
|
||||
for (j = 0; j < max_numa_index; j++) {
|
||||
int nodeA = numa_id_index_table[i];
|
||||
int nodeB = numa_id_index_table[j];
|
||||
int dist;
|
||||
|
||||
numa_distance_table[nodeA][nodeB] = numa_dist_table[distance_index++];
|
||||
pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, numa_distance_table[nodeA][nodeB]);
|
||||
if (form2_distances)
|
||||
dist = form2_distances[distance_index++];
|
||||
else if (nodeA == nodeB)
|
||||
dist = LOCAL_DISTANCE;
|
||||
else
|
||||
dist = REMOTE_DISTANCE;
|
||||
numa_distance_table[nodeA][nodeB] = dist;
|
||||
pr_debug("dist[%d][%d]=%d ", nodeA, nodeB, dist);
|
||||
}
|
||||
}
|
||||
|
||||
of_node_put(root);
|
||||
}
|
||||
|
||||
|
@ -186,7 +186,6 @@ err:
|
||||
static int mcu_remove(struct i2c_client *client)
|
||||
{
|
||||
struct mcu *mcu = i2c_get_clientdata(client);
|
||||
int ret;
|
||||
|
||||
kthread_stop(shutdown_thread);
|
||||
|
||||
|
@ -1094,15 +1094,6 @@ static phys_addr_t ddw_memory_hotplug_max(void)
|
||||
phys_addr_t max_addr = memory_hotplug_max();
|
||||
struct device_node *memory;
|
||||
|
||||
/*
|
||||
* The "ibm,pmemory" can appear anywhere in the address space.
|
||||
* Assuming it is still backed by page structs, set the upper limit
|
||||
* for the huge DMA window as MAX_PHYSMEM_BITS.
|
||||
*/
|
||||
if (of_find_node_by_type(NULL, "ibm,pmemory"))
|
||||
return (sizeof(phys_addr_t) * 8 <= MAX_PHYSMEM_BITS) ?
|
||||
(phys_addr_t) -1 : (1ULL << MAX_PHYSMEM_BITS);
|
||||
|
||||
for_each_node_by_type(memory, "memory") {
|
||||
unsigned long start, size;
|
||||
int n_mem_addr_cells, n_mem_size_cells, len;
|
||||
@ -1238,7 +1229,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
||||
u32 ddw_avail[DDW_APPLICABLE_SIZE];
|
||||
struct dma_win *window;
|
||||
struct property *win64;
|
||||
bool ddw_enabled = false;
|
||||
struct failed_ddw_pdn *fpdn;
|
||||
bool default_win_removed = false, direct_mapping = false;
|
||||
bool pmem_present;
|
||||
@ -1253,7 +1243,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
||||
|
||||
if (find_existing_ddw(pdn, &dev->dev.archdata.dma_offset, &len)) {
|
||||
direct_mapping = (len >= max_ram_len);
|
||||
ddw_enabled = true;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
@ -1367,8 +1356,10 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
||||
len = order_base_2(query.largest_available_block << page_shift);
|
||||
win_name = DMA64_PROPNAME;
|
||||
} else {
|
||||
direct_mapping = true;
|
||||
win_name = DIRECT64_PROPNAME;
|
||||
direct_mapping = !default_win_removed ||
|
||||
(len == MAX_PHYSMEM_BITS) ||
|
||||
(!pmem_present && (len == max_ram_len));
|
||||
win_name = direct_mapping ? DIRECT64_PROPNAME : DMA64_PROPNAME;
|
||||
}
|
||||
|
||||
ret = create_ddw(dev, ddw_avail, &create, page_shift, len);
|
||||
@ -1406,8 +1397,8 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
||||
dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n",
|
||||
dn, ret);
|
||||
|
||||
/* Make sure to clean DDW if any TCE was set*/
|
||||
clean_dma_window(pdn, win64->value);
|
||||
/* Make sure to clean DDW if any TCE was set*/
|
||||
clean_dma_window(pdn, win64->value);
|
||||
goto out_del_list;
|
||||
}
|
||||
} else {
|
||||
@ -1454,7 +1445,6 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
||||
spin_unlock(&dma_win_list_lock);
|
||||
|
||||
dev->dev.archdata.dma_offset = win_addr;
|
||||
ddw_enabled = true;
|
||||
goto out_unlock;
|
||||
|
||||
out_del_list:
|
||||
@ -1490,10 +1480,10 @@ out_unlock:
|
||||
* as RAM, then we failed to create a window to cover persistent
|
||||
* memory and need to set the DMA limit.
|
||||
*/
|
||||
if (pmem_present && ddw_enabled && direct_mapping && len == max_ram_len)
|
||||
if (pmem_present && direct_mapping && len == max_ram_len)
|
||||
dev->dev.bus_dma_limit = dev->dev.archdata.dma_offset + (1ULL << len);
|
||||
|
||||
return ddw_enabled && direct_mapping;
|
||||
return direct_mapping;
|
||||
}
|
||||
|
||||
static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
|
||||
|
@ -3,7 +3,6 @@ config PPC_XIVE
|
||||
bool
|
||||
select PPC_SMP_MUXED_IPI
|
||||
select HARDIRQS_SW_RESEND
|
||||
select IRQ_DOMAIN_NOMAP
|
||||
|
||||
config PPC_XIVE_NATIVE
|
||||
bool
|
||||
|
@ -1443,8 +1443,7 @@ static const struct irq_domain_ops xive_irq_domain_ops = {
|
||||
|
||||
static void __init xive_init_host(struct device_node *np)
|
||||
{
|
||||
xive_irq_domain = irq_domain_add_nomap(np, XIVE_MAX_IRQ,
|
||||
&xive_irq_domain_ops, NULL);
|
||||
xive_irq_domain = irq_domain_add_tree(np, &xive_irq_domain_ops, NULL);
|
||||
if (WARN_ON(xive_irq_domain == NULL))
|
||||
return;
|
||||
irq_set_default_host(xive_irq_domain);
|
||||
|
@ -107,11 +107,13 @@ PHONY += vdso_install
|
||||
vdso_install:
|
||||
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
|
||||
|
||||
ifeq ($(KBUILD_EXTMOD),)
|
||||
ifeq ($(CONFIG_MMU),y)
|
||||
prepare: vdso_prepare
|
||||
vdso_prepare: prepare0
|
||||
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso include/generated/vdso-offsets.h
|
||||
endif
|
||||
endif
|
||||
|
||||
ifneq ($(CONFIG_XIP_KERNEL),y)
|
||||
ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_SOC_CANAAN),yy)
|
||||
|
@ -19,6 +19,8 @@ CONFIG_SOC_VIRT=y
|
||||
CONFIG_SOC_MICROCHIP_POLARFIRE=y
|
||||
CONFIG_SMP=y
|
||||
CONFIG_HOTPLUG_CPU=y
|
||||
CONFIG_VIRTUALIZATION=y
|
||||
CONFIG_KVM=m
|
||||
CONFIG_JUMP_LABEL=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
|
@ -19,6 +19,8 @@ CONFIG_SOC_VIRT=y
|
||||
CONFIG_ARCH_RV32I=y
|
||||
CONFIG_SMP=y
|
||||
CONFIG_HOTPLUG_CPU=y
|
||||
CONFIG_VIRTUALIZATION=y
|
||||
CONFIG_KVM=m
|
||||
CONFIG_JUMP_LABEL=y
|
||||
CONFIG_MODULES=y
|
||||
CONFIG_MODULE_UNLOAD=y
|
||||
|
@ -12,14 +12,12 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_types.h>
|
||||
#include <asm/csr.h>
|
||||
#include <asm/kvm_vcpu_fp.h>
|
||||
#include <asm/kvm_vcpu_timer.h>
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define KVM_MAX_VCPUS (1U << 16)
|
||||
#else
|
||||
#define KVM_MAX_VCPUS (1U << 9)
|
||||
#endif
|
||||
#define KVM_MAX_VCPUS \
|
||||
((HGATP_VMID_MASK >> HGATP_VMID_SHIFT) + 1)
|
||||
|
||||
#define KVM_HALT_POLL_NS_DEFAULT 500000
|
||||
|
||||
|
@ -453,6 +453,12 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm)
|
||||
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot)
|
||||
{
|
||||
gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
|
||||
phys_addr_t size = slot->npages << PAGE_SHIFT;
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
stage2_unmap_range(kvm, gpa, size, false);
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
}
|
||||
|
||||
void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||
|
@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/**
|
||||
/*
|
||||
* Copyright (c) 2019 Western Digital Corporation or its affiliates.
|
||||
*
|
||||
* Authors:
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user