forked from Minki/linux
asm-generic: merge branch 'master' of torvalds/linux-2.6
Fixes a merge conflict against the x86 tree caused by a fix to atomic.h which I renamed to atomic_long.h. Signed-off-by: Arnd Bergmann <arnd@arndb.de>
This commit is contained in:
commit
5b02ee3d21
@ -60,3 +60,62 @@ Description:
|
||||
Indicates whether the block layer should automatically
|
||||
generate checksums for write requests bound for
|
||||
devices that support receiving integrity metadata.
|
||||
|
||||
What: /sys/block/<disk>/alignment_offset
|
||||
Date: April 2009
|
||||
Contact: Martin K. Petersen <martin.petersen@oracle.com>
|
||||
Description:
|
||||
Storage devices may report a physical block size that is
|
||||
bigger than the logical block size (for instance a drive
|
||||
with 4KB physical sectors exposing 512-byte logical
|
||||
blocks to the operating system). This parameter
|
||||
indicates how many bytes the beginning of the device is
|
||||
offset from the disk's natural alignment.
|
||||
|
||||
What: /sys/block/<disk>/<partition>/alignment_offset
|
||||
Date: April 2009
|
||||
Contact: Martin K. Petersen <martin.petersen@oracle.com>
|
||||
Description:
|
||||
Storage devices may report a physical block size that is
|
||||
bigger than the logical block size (for instance a drive
|
||||
with 4KB physical sectors exposing 512-byte logical
|
||||
blocks to the operating system). This parameter
|
||||
indicates how many bytes the beginning of the partition
|
||||
is offset from the disk's natural alignment.
|
||||
|
||||
What: /sys/block/<disk>/queue/logical_block_size
|
||||
Date: May 2009
|
||||
Contact: Martin K. Petersen <martin.petersen@oracle.com>
|
||||
Description:
|
||||
This is the smallest unit the storage device can
|
||||
address. It is typically 512 bytes.
|
||||
|
||||
What: /sys/block/<disk>/queue/physical_block_size
|
||||
Date: May 2009
|
||||
Contact: Martin K. Petersen <martin.petersen@oracle.com>
|
||||
Description:
|
||||
This is the smallest unit the storage device can write
|
||||
without resorting to read-modify-write operation. It is
|
||||
usually the same as the logical block size but may be
|
||||
bigger. One example is SATA drives with 4KB sectors
|
||||
that expose a 512-byte logical block size to the
|
||||
operating system.
|
||||
|
||||
What: /sys/block/<disk>/queue/minimum_io_size
|
||||
Date: April 2009
|
||||
Contact: Martin K. Petersen <martin.petersen@oracle.com>
|
||||
Description:
|
||||
Storage devices may report a preferred minimum I/O size,
|
||||
which is the smallest request the device can perform
|
||||
without incurring a read-modify-write penalty. For disk
|
||||
drives this is often the physical block size. For RAID
|
||||
arrays it is often the stripe chunk size.
|
||||
|
||||
What: /sys/block/<disk>/queue/optimal_io_size
|
||||
Date: April 2009
|
||||
Contact: Martin K. Petersen <martin.petersen@oracle.com>
|
||||
Description:
|
||||
Storage devices may report an optimal I/O size, which is
|
||||
the device's preferred unit of receiving I/O. This is
|
||||
rarely reported for disk drives. For RAID devices it is
|
||||
usually the stripe width or the internal block size.
|
||||
|
33
Documentation/ABI/testing/sysfs-bus-pci-devices-cciss
Normal file
33
Documentation/ABI/testing/sysfs-bus-pci-devices-cciss
Normal file
@ -0,0 +1,33 @@
|
||||
Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/model
|
||||
Date: March 2009
|
||||
Kernel Version: 2.6.30
|
||||
Contact: iss_storagedev@hp.com
|
||||
Description: Displays the SCSI INQUIRY page 0 model for logical drive
|
||||
Y of controller X.
|
||||
|
||||
Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/rev
|
||||
Date: March 2009
|
||||
Kernel Version: 2.6.30
|
||||
Contact: iss_storagedev@hp.com
|
||||
Description: Displays the SCSI INQUIRY page 0 revision for logical
|
||||
drive Y of controller X.
|
||||
|
||||
Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/unique_id
|
||||
Date: March 2009
|
||||
Kernel Version: 2.6.30
|
||||
Contact: iss_storagedev@hp.com
|
||||
Description: Displays the SCSI INQUIRY page 83 serial number for logical
|
||||
drive Y of controller X.
|
||||
|
||||
Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/vendor
|
||||
Date: March 2009
|
||||
Kernel Version: 2.6.30
|
||||
Contact: iss_storagedev@hp.com
|
||||
Description: Displays the SCSI INQUIRY page 0 vendor for logical drive
|
||||
Y of controller X.
|
||||
|
||||
Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/block:cciss!cXdY
|
||||
Date: March 2009
|
||||
Kernel Version: 2.6.30
|
||||
Contact: iss_storagedev@hp.com
|
||||
Description: A symbolic link to /sys/block/cciss!cXdY
|
18
Documentation/ABI/testing/sysfs-devices-cache_disable
Normal file
18
Documentation/ABI/testing/sysfs-devices-cache_disable
Normal file
@ -0,0 +1,18 @@
|
||||
What: /sys/devices/system/cpu/cpu*/cache/index*/cache_disable_X
|
||||
Date: August 2008
|
||||
KernelVersion: 2.6.27
|
||||
Contact: mark.langsdorf@amd.com
|
||||
Description: These files exist in every cpu's cache index directories.
|
||||
There are currently 2 cache_disable_# files in each
|
||||
directory. Reading from these files on a supported
|
||||
processor will return that cache disable index value
|
||||
for that processor and node. Writing to one of these
|
||||
files will cause the specificed cache index to be disabled.
|
||||
|
||||
Currently, only AMD Family 10h Processors support cache index
|
||||
disable, and only for their L3 caches. See the BIOS and
|
||||
Kernel Developer's Guide at
|
||||
http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/31116-Public-GH-BKDG_3.20_2-4-09.pdf
|
||||
for formatting information and other details on the
|
||||
cache index disable.
|
||||
Users: joachim.deguara@amd.com
|
@ -704,12 +704,24 @@ this directory the following files can currently be found:
|
||||
The current number of free dma_debug_entries
|
||||
in the allocator.
|
||||
|
||||
dma-api/driver-filter
|
||||
You can write a name of a driver into this file
|
||||
to limit the debug output to requests from that
|
||||
particular driver. Write an empty string to
|
||||
that file to disable the filter and see
|
||||
all errors again.
|
||||
|
||||
If you have this code compiled into your kernel it will be enabled by default.
|
||||
If you want to boot without the bookkeeping anyway you can provide
|
||||
'dma_debug=off' as a boot parameter. This will disable DMA-API debugging.
|
||||
Notice that you can not enable it again at runtime. You have to reboot to do
|
||||
so.
|
||||
|
||||
If you want to see debug messages only for a special device driver you can
|
||||
specify the dma_debug_driver=<drivername> parameter. This will enable the
|
||||
driver filter at boot time. The debug code will only print errors for that
|
||||
driver afterwards. This filter can be disabled or changed later using debugfs.
|
||||
|
||||
When the code disables itself at runtime this is most likely because it ran
|
||||
out of dma_debug_entries. These entries are preallocated at boot. The number
|
||||
of preallocated entries is defined per architecture. If it is too low for you
|
||||
|
@ -13,7 +13,8 @@ DOCBOOKS := z8530book.xml mcabook.xml device-drivers.xml \
|
||||
gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
|
||||
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
|
||||
mac80211.xml debugobjects.xml sh.xml regulator.xml \
|
||||
alsa-driver-api.xml writing-an-alsa-driver.xml
|
||||
alsa-driver-api.xml writing-an-alsa-driver.xml \
|
||||
tracepoint.xml
|
||||
|
||||
###
|
||||
# The build process is as follows (targets):
|
||||
|
89
Documentation/DocBook/tracepoint.tmpl
Normal file
89
Documentation/DocBook/tracepoint.tmpl
Normal file
@ -0,0 +1,89 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
|
||||
"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
|
||||
|
||||
<book id="Tracepoints">
|
||||
<bookinfo>
|
||||
<title>The Linux Kernel Tracepoint API</title>
|
||||
|
||||
<authorgroup>
|
||||
<author>
|
||||
<firstname>Jason</firstname>
|
||||
<surname>Baron</surname>
|
||||
<affiliation>
|
||||
<address>
|
||||
<email>jbaron@redhat.com</email>
|
||||
</address>
|
||||
</affiliation>
|
||||
</author>
|
||||
</authorgroup>
|
||||
|
||||
<legalnotice>
|
||||
<para>
|
||||
This documentation is free software; you can redistribute
|
||||
it and/or modify it under the terms of the GNU General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2 of the License, or (at your option) any later
|
||||
version.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
This program is distributed in the hope that it will be
|
||||
useful, but WITHOUT ANY WARRANTY; without even the implied
|
||||
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
See the GNU General Public License for more details.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
You should have received a copy of the GNU General Public
|
||||
License along with this program; if not, write to the Free
|
||||
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
|
||||
MA 02111-1307 USA
|
||||
</para>
|
||||
|
||||
<para>
|
||||
For more details see the file COPYING in the source
|
||||
distribution of Linux.
|
||||
</para>
|
||||
</legalnotice>
|
||||
</bookinfo>
|
||||
|
||||
<toc></toc>
|
||||
<chapter id="intro">
|
||||
<title>Introduction</title>
|
||||
<para>
|
||||
Tracepoints are static probe points that are located in strategic points
|
||||
throughout the kernel. 'Probes' register/unregister with tracepoints
|
||||
via a callback mechanism. The 'probes' are strictly typed functions that
|
||||
are passed a unique set of parameters defined by each tracepoint.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
From this simple callback mechanism, 'probes' can be used to profile, debug,
|
||||
and understand kernel behavior. There are a number of tools that provide a
|
||||
framework for using 'probes'. These tools include Systemtap, ftrace, and
|
||||
LTTng.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Tracepoints are defined in a number of header files via various macros. Thus,
|
||||
the purpose of this document is to provide a clear accounting of the available
|
||||
tracepoints. The intention is to understand not only what tracepoints are
|
||||
available but also to understand where future tracepoints might be added.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
The API presented has functions of the form:
|
||||
<function>trace_tracepointname(function parameters)</function>. These are the
|
||||
tracepoints callbacks that are found throughout the code. Registering and
|
||||
unregistering probes with these callback sites is covered in the
|
||||
<filename>Documentation/trace/*</filename> directory.
|
||||
</para>
|
||||
</chapter>
|
||||
|
||||
<chapter id="irq">
|
||||
<title>IRQ</title>
|
||||
!Iinclude/trace/events/irq.h
|
||||
</chapter>
|
||||
|
||||
</book>
|
@ -192,23 +192,24 @@ rcu/rcuhier (which displays the struct rcu_node hierarchy).
|
||||
The output of "cat rcu/rcudata" looks as follows:
|
||||
|
||||
rcu:
|
||||
0 c=4011 g=4012 pq=1 pqc=4011 qp=0 rpfq=1 rp=3c2a dt=23301/73 dn=2 df=1882 of=0 ri=2126 ql=2 b=10
|
||||
1 c=4011 g=4012 pq=1 pqc=4011 qp=0 rpfq=3 rp=39a6 dt=78073/1 dn=2 df=1402 of=0 ri=1875 ql=46 b=10
|
||||
2 c=4010 g=4010 pq=1 pqc=4010 qp=0 rpfq=-5 rp=1d12 dt=16646/0 dn=2 df=3140 of=0 ri=2080 ql=0 b=10
|
||||
3 c=4012 g=4013 pq=1 pqc=4012 qp=1 rpfq=3 rp=2b50 dt=21159/1 dn=2 df=2230 of=0 ri=1923 ql=72 b=10
|
||||
4 c=4012 g=4013 pq=1 pqc=4012 qp=1 rpfq=3 rp=1644 dt=5783/1 dn=2 df=3348 of=0 ri=2805 ql=7 b=10
|
||||
5 c=4012 g=4013 pq=0 pqc=4011 qp=1 rpfq=3 rp=1aac dt=5879/1 dn=2 df=3140 of=0 ri=2066 ql=10 b=10
|
||||
6 c=4012 g=4013 pq=1 pqc=4012 qp=1 rpfq=3 rp=ed8 dt=5847/1 dn=2 df=3797 of=0 ri=1266 ql=10 b=10
|
||||
7 c=4012 g=4013 pq=1 pqc=4012 qp=1 rpfq=3 rp=1fa2 dt=6199/1 dn=2 df=2795 of=0 ri=2162 ql=28 b=10
|
||||
rcu:
|
||||
0 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=10951/1 dn=0 df=1101 of=0 ri=36 ql=0 b=10
|
||||
1 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=16117/1 dn=0 df=1015 of=0 ri=0 ql=0 b=10
|
||||
2 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=1445/1 dn=0 df=1839 of=0 ri=0 ql=0 b=10
|
||||
3 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=6681/1 dn=0 df=1545 of=0 ri=0 ql=0 b=10
|
||||
4 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=1003/1 dn=0 df=1992 of=0 ri=0 ql=0 b=10
|
||||
5 c=17829 g=17830 pq=1 pqc=17829 qp=1 dt=3887/1 dn=0 df=3331 of=0 ri=4 ql=2 b=10
|
||||
6 c=17829 g=17829 pq=1 pqc=17829 qp=0 dt=859/1 dn=0 df=3224 of=0 ri=0 ql=0 b=10
|
||||
7 c=17829 g=17830 pq=0 pqc=17829 qp=1 dt=3761/1 dn=0 df=1818 of=0 ri=0 ql=2 b=10
|
||||
rcu_bh:
|
||||
0 c=-268 g=-268 pq=1 pqc=-268 qp=0 rpfq=-145 rp=21d6 dt=23301/73 dn=2 df=0 of=0 ri=0 ql=0 b=10
|
||||
1 c=-268 g=-268 pq=1 pqc=-268 qp=1 rpfq=-170 rp=20ce dt=78073/1 dn=2 df=26 of=0 ri=5 ql=0 b=10
|
||||
2 c=-268 g=-268 pq=1 pqc=-268 qp=1 rpfq=-83 rp=fbd dt=16646/0 dn=2 df=28 of=0 ri=4 ql=0 b=10
|
||||
3 c=-268 g=-268 pq=1 pqc=-268 qp=0 rpfq=-105 rp=178c dt=21159/1 dn=2 df=28 of=0 ri=2 ql=0 b=10
|
||||
4 c=-268 g=-268 pq=1 pqc=-268 qp=1 rpfq=-30 rp=b54 dt=5783/1 dn=2 df=32 of=0 ri=0 ql=0 b=10
|
||||
5 c=-268 g=-268 pq=1 pqc=-268 qp=1 rpfq=-29 rp=df5 dt=5879/1 dn=2 df=30 of=0 ri=3 ql=0 b=10
|
||||
6 c=-268 g=-268 pq=1 pqc=-268 qp=1 rpfq=-28 rp=788 dt=5847/1 dn=2 df=32 of=0 ri=0 ql=0 b=10
|
||||
7 c=-268 g=-268 pq=1 pqc=-268 qp=1 rpfq=-53 rp=1098 dt=6199/1 dn=2 df=30 of=0 ri=3 ql=0 b=10
|
||||
0 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=10951/1 dn=0 df=0 of=0 ri=0 ql=0 b=10
|
||||
1 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=16117/1 dn=0 df=13 of=0 ri=0 ql=0 b=10
|
||||
2 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=1445/1 dn=0 df=15 of=0 ri=0 ql=0 b=10
|
||||
3 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=6681/1 dn=0 df=9 of=0 ri=0 ql=0 b=10
|
||||
4 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=1003/1 dn=0 df=15 of=0 ri=0 ql=0 b=10
|
||||
5 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=3887/1 dn=0 df=15 of=0 ri=0 ql=0 b=10
|
||||
6 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=859/1 dn=0 df=15 of=0 ri=0 ql=0 b=10
|
||||
7 c=-275 g=-275 pq=1 pqc=-275 qp=0 dt=3761/1 dn=0 df=15 of=0 ri=0 ql=0 b=10
|
||||
|
||||
The first section lists the rcu_data structures for rcu, the second for
|
||||
rcu_bh. Each section has one line per CPU, or eight for this 8-CPU system.
|
||||
@ -253,12 +254,6 @@ o "pqc" indicates which grace period the last-observed quiescent
|
||||
o "qp" indicates that RCU still expects a quiescent state from
|
||||
this CPU.
|
||||
|
||||
o "rpfq" is the number of rcu_pending() calls on this CPU required
|
||||
to induce this CPU to invoke force_quiescent_state().
|
||||
|
||||
o "rp" is low-order four hex digits of the count of how many times
|
||||
rcu_pending() has been invoked on this CPU.
|
||||
|
||||
o "dt" is the current value of the dyntick counter that is incremented
|
||||
when entering or leaving dynticks idle state, either by the
|
||||
scheduler or by irq. The number after the "/" is the interrupt
|
||||
@ -305,6 +300,9 @@ o "b" is the batch limit for this CPU. If more than this number
|
||||
of RCU callbacks is ready to invoke, then the remainder will
|
||||
be deferred.
|
||||
|
||||
There is also an rcu/rcudata.csv file with the same information in
|
||||
comma-separated-variable spreadsheet format.
|
||||
|
||||
|
||||
The output of "cat rcu/rcugp" looks as follows:
|
||||
|
||||
@ -411,3 +409,63 @@ o Each element of the form "1/1 0:127 ^0" represents one struct
|
||||
For example, the first entry at the lowest level shows
|
||||
"^0", indicating that it corresponds to bit zero in
|
||||
the first entry at the middle level.
|
||||
|
||||
|
||||
The output of "cat rcu/rcu_pending" looks as follows:
|
||||
|
||||
rcu:
|
||||
0 np=255892 qsp=53936 cbr=0 cng=14417 gpc=10033 gps=24320 nf=6445 nn=146741
|
||||
1 np=261224 qsp=54638 cbr=0 cng=25723 gpc=16310 gps=2849 nf=5912 nn=155792
|
||||
2 np=237496 qsp=49664 cbr=0 cng=2762 gpc=45478 gps=1762 nf=1201 nn=136629
|
||||
3 np=236249 qsp=48766 cbr=0 cng=286 gpc=48049 gps=1218 nf=207 nn=137723
|
||||
4 np=221310 qsp=46850 cbr=0 cng=26 gpc=43161 gps=4634 nf=3529 nn=123110
|
||||
5 np=237332 qsp=48449 cbr=0 cng=54 gpc=47920 gps=3252 nf=201 nn=137456
|
||||
6 np=219995 qsp=46718 cbr=0 cng=50 gpc=42098 gps=6093 nf=4202 nn=120834
|
||||
7 np=249893 qsp=49390 cbr=0 cng=72 gpc=38400 gps=17102 nf=41 nn=144888
|
||||
rcu_bh:
|
||||
0 np=146741 qsp=1419 cbr=0 cng=6 gpc=0 gps=0 nf=2 nn=145314
|
||||
1 np=155792 qsp=12597 cbr=0 cng=0 gpc=4 gps=8 nf=3 nn=143180
|
||||
2 np=136629 qsp=18680 cbr=0 cng=0 gpc=7 gps=6 nf=0 nn=117936
|
||||
3 np=137723 qsp=2843 cbr=0 cng=0 gpc=10 gps=7 nf=0 nn=134863
|
||||
4 np=123110 qsp=12433 cbr=0 cng=0 gpc=4 gps=2 nf=0 nn=110671
|
||||
5 np=137456 qsp=4210 cbr=0 cng=0 gpc=6 gps=5 nf=0 nn=133235
|
||||
6 np=120834 qsp=9902 cbr=0 cng=0 gpc=6 gps=3 nf=2 nn=110921
|
||||
7 np=144888 qsp=26336 cbr=0 cng=0 gpc=8 gps=2 nf=0 nn=118542
|
||||
|
||||
As always, this is once again split into "rcu" and "rcu_bh" portions.
|
||||
The fields are as follows:
|
||||
|
||||
o "np" is the number of times that __rcu_pending() has been invoked
|
||||
for the corresponding flavor of RCU.
|
||||
|
||||
o "qsp" is the number of times that the RCU was waiting for a
|
||||
quiescent state from this CPU.
|
||||
|
||||
o "cbr" is the number of times that this CPU had RCU callbacks
|
||||
that had passed through a grace period, and were thus ready
|
||||
to be invoked.
|
||||
|
||||
o "cng" is the number of times that this CPU needed another
|
||||
grace period while RCU was idle.
|
||||
|
||||
o "gpc" is the number of times that an old grace period had
|
||||
completed, but this CPU was not yet aware of it.
|
||||
|
||||
o "gps" is the number of times that a new grace period had started,
|
||||
but this CPU was not yet aware of it.
|
||||
|
||||
o "nf" is the number of times that this CPU suspected that the
|
||||
current grace period had run for too long, and thus needed to
|
||||
be forced.
|
||||
|
||||
Please note that "forcing" consists of sending resched IPIs
|
||||
to holdout CPUs. If that CPU really still is in an old RCU
|
||||
read-side critical section, then we really do have to wait for it.
|
||||
The assumption behing "forcing" is that the CPU is not still in
|
||||
an old RCU read-side critical section, but has not yet responded
|
||||
for some other reason.
|
||||
|
||||
o "nn" is the number of times that this CPU needed nothing. Alert
|
||||
readers will note that the rcu "nn" number for a given CPU very
|
||||
closely matches the rcu_bh "np" number for that same CPU. This
|
||||
is due to short-circuit evaluation in rcu_pending().
|
||||
|
@ -184,8 +184,9 @@ length. Single character labels using special characters, that being anything
|
||||
other than a letter or digit, are reserved for use by the Smack development
|
||||
team. Smack labels are unstructured, case sensitive, and the only operation
|
||||
ever performed on them is comparison for equality. Smack labels cannot
|
||||
contain unprintable characters or the "/" (slash) character. Smack labels
|
||||
cannot begin with a '-', which is reserved for special options.
|
||||
contain unprintable characters, the "/" (slash), the "\" (backslash), the "'"
|
||||
(quote) and '"' (double-quote) characters.
|
||||
Smack labels cannot begin with a '-', which is reserved for special options.
|
||||
|
||||
There are some predefined labels:
|
||||
|
||||
@ -523,3 +524,18 @@ Smack supports some mount options:
|
||||
|
||||
These mount options apply to all file system types.
|
||||
|
||||
Smack auditing
|
||||
|
||||
If you want Smack auditing of security events, you need to set CONFIG_AUDIT
|
||||
in your kernel configuration.
|
||||
By default, all denied events will be audited. You can change this behavior by
|
||||
writing a single character to the /smack/logging file :
|
||||
0 : no logging
|
||||
1 : log denied (default)
|
||||
2 : log accepted
|
||||
3 : log denied & accepted
|
||||
|
||||
Events are logged as 'key=value' pairs, for each event you at least will get
|
||||
the subjet, the object, the rights requested, the action, the kernel function
|
||||
that triggered the event, plus other pairs depending on the type of event
|
||||
audited.
|
||||
|
@ -186,7 +186,7 @@ a virtual address mapping (unlike the earlier scheme of virtual address
|
||||
do not have a corresponding kernel virtual address space mapping) and
|
||||
low-memory pages.
|
||||
|
||||
Note: Please refer to Documentation/PCI/PCI-DMA-mapping.txt for a discussion
|
||||
Note: Please refer to Documentation/DMA-mapping.txt for a discussion
|
||||
on PCI high mem DMA aspects and mapping of scatter gather lists, and support
|
||||
for 64 bit PCI.
|
||||
|
||||
|
@ -60,7 +60,7 @@ go_lock | Called for the first local holder of a lock
|
||||
go_unlock | Called on the final local unlock of a lock
|
||||
go_dump | Called to print content of object for debugfs file, or on
|
||||
| error to dump glock to the log.
|
||||
go_type; | The type of the glock, LM_TYPE_.....
|
||||
go_type | The type of the glock, LM_TYPE_.....
|
||||
go_min_hold_time | The minimum hold time
|
||||
|
||||
The minimum hold time for each lock is the time after a remote lock
|
||||
|
@ -11,18 +11,15 @@ their I/O so file system consistency is maintained. One of the nifty
|
||||
features of GFS is perfect consistency -- changes made to the file system
|
||||
on one machine show up immediately on all other machines in the cluster.
|
||||
|
||||
GFS uses interchangable inter-node locking mechanisms. Different lock
|
||||
modules can plug into GFS and each file system selects the appropriate
|
||||
lock module at mount time. Lock modules include:
|
||||
GFS uses interchangable inter-node locking mechanisms, the currently
|
||||
supported mechanisms are:
|
||||
|
||||
lock_nolock -- allows gfs to be used as a local file system
|
||||
|
||||
lock_dlm -- uses a distributed lock manager (dlm) for inter-node locking
|
||||
The dlm is found at linux/fs/dlm/
|
||||
|
||||
In addition to interfacing with an external locking manager, a gfs lock
|
||||
module is responsible for interacting with external cluster management
|
||||
systems. Lock_dlm depends on user space cluster management systems found
|
||||
Lock_dlm depends on user space cluster management systems found
|
||||
at the URL above.
|
||||
|
||||
To use gfs as a local file system, no external clustering systems are
|
||||
@ -31,13 +28,19 @@ needed, simply:
|
||||
$ mkfs -t gfs2 -p lock_nolock -j 1 /dev/block_device
|
||||
$ mount -t gfs2 /dev/block_device /dir
|
||||
|
||||
GFS2 is not on-disk compatible with previous versions of GFS.
|
||||
If you are using Fedora, you need to install the gfs2-utils package
|
||||
and, for lock_dlm, you will also need to install the cman package
|
||||
and write a cluster.conf as per the documentation.
|
||||
|
||||
GFS2 is not on-disk compatible with previous versions of GFS, but it
|
||||
is pretty close.
|
||||
|
||||
The following man pages can be found at the URL above:
|
||||
gfs2_fsck to repair a filesystem
|
||||
fsck.gfs2 to repair a filesystem
|
||||
gfs2_grow to expand a filesystem online
|
||||
gfs2_jadd to add journals to a filesystem online
|
||||
gfs2_tool to manipulate, examine and tune a filesystem
|
||||
gfs2_quota to examine and change quota values in a filesystem
|
||||
gfs2_convert to convert a gfs filesystem to gfs2 in-place
|
||||
mount.gfs2 to help mount(8) mount a filesystem
|
||||
mkfs.gfs2 to make a filesystem
|
||||
|
131
Documentation/futex-requeue-pi.txt
Normal file
131
Documentation/futex-requeue-pi.txt
Normal file
@ -0,0 +1,131 @@
|
||||
Futex Requeue PI
|
||||
----------------
|
||||
|
||||
Requeueing of tasks from a non-PI futex to a PI futex requires
|
||||
special handling in order to ensure the underlying rt_mutex is never
|
||||
left without an owner if it has waiters; doing so would break the PI
|
||||
boosting logic [see rt-mutex-desgin.txt] For the purposes of
|
||||
brevity, this action will be referred to as "requeue_pi" throughout
|
||||
this document. Priority inheritance is abbreviated throughout as
|
||||
"PI".
|
||||
|
||||
Motivation
|
||||
----------
|
||||
|
||||
Without requeue_pi, the glibc implementation of
|
||||
pthread_cond_broadcast() must resort to waking all the tasks waiting
|
||||
on a pthread_condvar and letting them try to sort out which task
|
||||
gets to run first in classic thundering-herd formation. An ideal
|
||||
implementation would wake the highest-priority waiter, and leave the
|
||||
rest to the natural wakeup inherent in unlocking the mutex
|
||||
associated with the condvar.
|
||||
|
||||
Consider the simplified glibc calls:
|
||||
|
||||
/* caller must lock mutex */
|
||||
pthread_cond_wait(cond, mutex)
|
||||
{
|
||||
lock(cond->__data.__lock);
|
||||
unlock(mutex);
|
||||
do {
|
||||
unlock(cond->__data.__lock);
|
||||
futex_wait(cond->__data.__futex);
|
||||
lock(cond->__data.__lock);
|
||||
} while(...)
|
||||
unlock(cond->__data.__lock);
|
||||
lock(mutex);
|
||||
}
|
||||
|
||||
pthread_cond_broadcast(cond)
|
||||
{
|
||||
lock(cond->__data.__lock);
|
||||
unlock(cond->__data.__lock);
|
||||
futex_requeue(cond->data.__futex, cond->mutex);
|
||||
}
|
||||
|
||||
Once pthread_cond_broadcast() requeues the tasks, the cond->mutex
|
||||
has waiters. Note that pthread_cond_wait() attempts to lock the
|
||||
mutex only after it has returned to user space. This will leave the
|
||||
underlying rt_mutex with waiters, and no owner, breaking the
|
||||
previously mentioned PI-boosting algorithms.
|
||||
|
||||
In order to support PI-aware pthread_condvar's, the kernel needs to
|
||||
be able to requeue tasks to PI futexes. This support implies that
|
||||
upon a successful futex_wait system call, the caller would return to
|
||||
user space already holding the PI futex. The glibc implementation
|
||||
would be modified as follows:
|
||||
|
||||
|
||||
/* caller must lock mutex */
|
||||
pthread_cond_wait_pi(cond, mutex)
|
||||
{
|
||||
lock(cond->__data.__lock);
|
||||
unlock(mutex);
|
||||
do {
|
||||
unlock(cond->__data.__lock);
|
||||
futex_wait_requeue_pi(cond->__data.__futex);
|
||||
lock(cond->__data.__lock);
|
||||
} while(...)
|
||||
unlock(cond->__data.__lock);
|
||||
/* the kernel acquired the the mutex for us */
|
||||
}
|
||||
|
||||
pthread_cond_broadcast_pi(cond)
|
||||
{
|
||||
lock(cond->__data.__lock);
|
||||
unlock(cond->__data.__lock);
|
||||
futex_requeue_pi(cond->data.__futex, cond->mutex);
|
||||
}
|
||||
|
||||
The actual glibc implementation will likely test for PI and make the
|
||||
necessary changes inside the existing calls rather than creating new
|
||||
calls for the PI cases. Similar changes are needed for
|
||||
pthread_cond_timedwait() and pthread_cond_signal().
|
||||
|
||||
Implementation
|
||||
--------------
|
||||
|
||||
In order to ensure the rt_mutex has an owner if it has waiters, it
|
||||
is necessary for both the requeue code, as well as the waiting code,
|
||||
to be able to acquire the rt_mutex before returning to user space.
|
||||
The requeue code cannot simply wake the waiter and leave it to
|
||||
acquire the rt_mutex as it would open a race window between the
|
||||
requeue call returning to user space and the waiter waking and
|
||||
starting to run. This is especially true in the uncontended case.
|
||||
|
||||
The solution involves two new rt_mutex helper routines,
|
||||
rt_mutex_start_proxy_lock() and rt_mutex_finish_proxy_lock(), which
|
||||
allow the requeue code to acquire an uncontended rt_mutex on behalf
|
||||
of the waiter and to enqueue the waiter on a contended rt_mutex.
|
||||
Two new system calls provide the kernel<->user interface to
|
||||
requeue_pi: FUTEX_WAIT_REQUEUE_PI and FUTEX_REQUEUE_CMP_PI.
|
||||
|
||||
FUTEX_WAIT_REQUEUE_PI is called by the waiter (pthread_cond_wait()
|
||||
and pthread_cond_timedwait()) to block on the initial futex and wait
|
||||
to be requeued to a PI-aware futex. The implementation is the
|
||||
result of a high-speed collision between futex_wait() and
|
||||
futex_lock_pi(), with some extra logic to check for the additional
|
||||
wake-up scenarios.
|
||||
|
||||
FUTEX_REQUEUE_CMP_PI is called by the waker
|
||||
(pthread_cond_broadcast() and pthread_cond_signal()) to requeue and
|
||||
possibly wake the waiting tasks. Internally, this system call is
|
||||
still handled by futex_requeue (by passing requeue_pi=1). Before
|
||||
requeueing, futex_requeue() attempts to acquire the requeue target
|
||||
PI futex on behalf of the top waiter. If it can, this waiter is
|
||||
woken. futex_requeue() then proceeds to requeue the remaining
|
||||
nr_wake+nr_requeue tasks to the PI futex, calling
|
||||
rt_mutex_start_proxy_lock() prior to each requeue to prepare the
|
||||
task as a waiter on the underlying rt_mutex. It is possible that
|
||||
the lock can be acquired at this stage as well, if so, the next
|
||||
waiter is woken to finish the acquisition of the lock.
|
||||
|
||||
FUTEX_REQUEUE_PI accepts nr_wake and nr_requeue as arguments, but
|
||||
their sum is all that really matters. futex_requeue() will wake or
|
||||
requeue up to nr_wake + nr_requeue tasks. It will wake only as many
|
||||
tasks as it can acquire the lock for, which in the majority of cases
|
||||
should be 0 as good programming practice dictates that the caller of
|
||||
either pthread_cond_broadcast() or pthread_cond_signal() acquire the
|
||||
mutex prior to making the call. FUTEX_REQUEUE_PI requires that
|
||||
nr_wake=1. nr_requeue should be INT_MAX for broadcast and 0 for
|
||||
signal.
|
@ -56,7 +56,6 @@ parameter is applicable:
|
||||
ISAPNP ISA PnP code is enabled.
|
||||
ISDN Appropriate ISDN support is enabled.
|
||||
JOY Appropriate joystick support is enabled.
|
||||
KMEMTRACE kmemtrace is enabled.
|
||||
LIBATA Libata driver is enabled
|
||||
LP Printer support is enabled.
|
||||
LOOP Loopback device support is enabled.
|
||||
@ -329,11 +328,6 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||
flushed before they will be reused, which
|
||||
is a lot of faster
|
||||
|
||||
amd_iommu_size= [HW,X86-64]
|
||||
Define the size of the aperture for the AMD IOMMU
|
||||
driver. Possible values are:
|
||||
'32M', '64M' (default), '128M', '256M', '512M', '1G'
|
||||
|
||||
amijoy.map= [HW,JOY] Amiga joystick support
|
||||
Map of devices attached to JOY0DAT and JOY1DAT
|
||||
Format: <a>,<b>
|
||||
@ -646,6 +640,13 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||
DMA-API debugging code disables itself because the
|
||||
architectural default is too low.
|
||||
|
||||
dma_debug_driver=<driver_name>
|
||||
With this option the DMA-API debugging driver
|
||||
filter feature can be enabled at boot time. Just
|
||||
pass the driver to filter for as the parameter.
|
||||
The filter can be disabled or changed to another
|
||||
driver later using sysfs.
|
||||
|
||||
dscc4.setup= [NET]
|
||||
|
||||
dtc3181e= [HW,SCSI]
|
||||
@ -752,12 +753,25 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||
ia64_pal_cache_flush instead of SAL_CACHE_FLUSH.
|
||||
|
||||
ftrace=[tracer]
|
||||
[ftrace] will set and start the specified tracer
|
||||
[FTRACE] will set and start the specified tracer
|
||||
as early as possible in order to facilitate early
|
||||
boot debugging.
|
||||
|
||||
ftrace_dump_on_oops
|
||||
[ftrace] will dump the trace buffers on oops.
|
||||
[FTRACE] will dump the trace buffers on oops.
|
||||
|
||||
ftrace_filter=[function-list]
|
||||
[FTRACE] Limit the functions traced by the function
|
||||
tracer at boot up. function-list is a comma separated
|
||||
list of functions. This list can be changed at run
|
||||
time by the set_ftrace_filter file in the debugfs
|
||||
tracing directory.
|
||||
|
||||
ftrace_notrace=[function-list]
|
||||
[FTRACE] Do not trace the functions specified in
|
||||
function-list. This list can be changed at run time
|
||||
by the set_ftrace_notrace file in the debugfs
|
||||
tracing directory.
|
||||
|
||||
gamecon.map[2|3]=
|
||||
[HW,JOY] Multisystem joystick and NES/SNES/PSX pad
|
||||
@ -914,6 +928,12 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||
Formt: { "sha1" | "md5" }
|
||||
default: "sha1"
|
||||
|
||||
ima_tcb [IMA]
|
||||
Load a policy which meets the needs of the Trusted
|
||||
Computing Base. This means IMA will measure all
|
||||
programs exec'd, files mmap'd for exec, and all files
|
||||
opened for read by uid=0.
|
||||
|
||||
in2000= [HW,SCSI]
|
||||
See header of drivers/scsi/in2000.c.
|
||||
|
||||
@ -1054,15 +1074,6 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||
use the HighMem zone if it exists, and the Normal
|
||||
zone if it does not.
|
||||
|
||||
kmemtrace.enable= [KNL,KMEMTRACE] Format: { yes | no }
|
||||
Controls whether kmemtrace is enabled
|
||||
at boot-time.
|
||||
|
||||
kmemtrace.subbufs=n [KNL,KMEMTRACE] Overrides the number of
|
||||
subbufs kmemtrace's relay channel has. Set this
|
||||
higher than default (KMEMTRACE_N_SUBBUFS in code) if
|
||||
you experience buffer overruns.
|
||||
|
||||
kgdboc= [HW] kgdb over consoles.
|
||||
Requires a tty driver that supports console polling.
|
||||
(only serial suported for now)
|
||||
@ -1072,6 +1083,10 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||
Configure the RouterBoard 532 series on-chip
|
||||
Ethernet adapter MAC address.
|
||||
|
||||
kmemleak= [KNL] Boot-time kmemleak enable/disable
|
||||
Valid arguments: on, off
|
||||
Default: on
|
||||
|
||||
kstack=N [X86] Print N words from the kernel stack
|
||||
in oops dumps.
|
||||
|
||||
@ -1575,6 +1590,9 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||
noinitrd [RAM] Tells the kernel not to load any configured
|
||||
initial RAM disk.
|
||||
|
||||
nointremap [X86-64, Intel-IOMMU] Do not enable interrupt
|
||||
remapping.
|
||||
|
||||
nointroute [IA-64]
|
||||
|
||||
nojitter [IA64] Disables jitter checking for ITC timers.
|
||||
@ -1660,6 +1678,14 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||
oprofile.timer= [HW]
|
||||
Use timer interrupt instead of performance counters
|
||||
|
||||
oprofile.cpu_type= Force an oprofile cpu type
|
||||
This might be useful if you have an older oprofile
|
||||
userland or if you want common events.
|
||||
Format: { archperfmon }
|
||||
archperfmon: [X86] Force use of architectural
|
||||
perfmon on Intel CPUs instead of the
|
||||
CPU specific event set.
|
||||
|
||||
osst= [HW,SCSI] SCSI Tape Driver
|
||||
Format: <buffer_size>,<write_threshold>
|
||||
See also Documentation/scsi/st.txt.
|
||||
|
142
Documentation/kmemleak.txt
Normal file
142
Documentation/kmemleak.txt
Normal file
@ -0,0 +1,142 @@
|
||||
Kernel Memory Leak Detector
|
||||
===========================
|
||||
|
||||
Introduction
|
||||
------------
|
||||
|
||||
Kmemleak provides a way of detecting possible kernel memory leaks in a
|
||||
way similar to a tracing garbage collector
|
||||
(http://en.wikipedia.org/wiki/Garbage_collection_%28computer_science%29#Tracing_garbage_collectors),
|
||||
with the difference that the orphan objects are not freed but only
|
||||
reported via /sys/kernel/debug/kmemleak. A similar method is used by the
|
||||
Valgrind tool (memcheck --leak-check) to detect the memory leaks in
|
||||
user-space applications.
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
CONFIG_DEBUG_KMEMLEAK in "Kernel hacking" has to be enabled. A kernel
|
||||
thread scans the memory every 10 minutes (by default) and prints any new
|
||||
unreferenced objects found. To trigger an intermediate scan and display
|
||||
all the possible memory leaks:
|
||||
|
||||
# mount -t debugfs nodev /sys/kernel/debug/
|
||||
# cat /sys/kernel/debug/kmemleak
|
||||
|
||||
Note that the orphan objects are listed in the order they were allocated
|
||||
and one object at the beginning of the list may cause other subsequent
|
||||
objects to be reported as orphan.
|
||||
|
||||
Memory scanning parameters can be modified at run-time by writing to the
|
||||
/sys/kernel/debug/kmemleak file. The following parameters are supported:
|
||||
|
||||
off - disable kmemleak (irreversible)
|
||||
stack=on - enable the task stacks scanning
|
||||
stack=off - disable the tasks stacks scanning
|
||||
scan=on - start the automatic memory scanning thread
|
||||
scan=off - stop the automatic memory scanning thread
|
||||
scan=<secs> - set the automatic memory scanning period in seconds (0
|
||||
to disable it)
|
||||
|
||||
Kmemleak can also be disabled at boot-time by passing "kmemleak=off" on
|
||||
the kernel command line.
|
||||
|
||||
Basic Algorithm
|
||||
---------------
|
||||
|
||||
The memory allocations via kmalloc, vmalloc, kmem_cache_alloc and
|
||||
friends are traced and the pointers, together with additional
|
||||
information like size and stack trace, are stored in a prio search tree.
|
||||
The corresponding freeing function calls are tracked and the pointers
|
||||
removed from the kmemleak data structures.
|
||||
|
||||
An allocated block of memory is considered orphan if no pointer to its
|
||||
start address or to any location inside the block can be found by
|
||||
scanning the memory (including saved registers). This means that there
|
||||
might be no way for the kernel to pass the address of the allocated
|
||||
block to a freeing function and therefore the block is considered a
|
||||
memory leak.
|
||||
|
||||
The scanning algorithm steps:
|
||||
|
||||
1. mark all objects as white (remaining white objects will later be
|
||||
considered orphan)
|
||||
2. scan the memory starting with the data section and stacks, checking
|
||||
the values against the addresses stored in the prio search tree. If
|
||||
a pointer to a white object is found, the object is added to the
|
||||
gray list
|
||||
3. scan the gray objects for matching addresses (some white objects
|
||||
can become gray and added at the end of the gray list) until the
|
||||
gray set is finished
|
||||
4. the remaining white objects are considered orphan and reported via
|
||||
/sys/kernel/debug/kmemleak
|
||||
|
||||
Some allocated memory blocks have pointers stored in the kernel's
|
||||
internal data structures and they cannot be detected as orphans. To
|
||||
avoid this, kmemleak can also store the number of values pointing to an
|
||||
address inside the block address range that need to be found so that the
|
||||
block is not considered a leak. One example is __vmalloc().
|
||||
|
||||
Kmemleak API
|
||||
------------
|
||||
|
||||
See the include/linux/kmemleak.h header for the functions prototype.
|
||||
|
||||
kmemleak_init - initialize kmemleak
|
||||
kmemleak_alloc - notify of a memory block allocation
|
||||
kmemleak_free - notify of a memory block freeing
|
||||
kmemleak_not_leak - mark an object as not a leak
|
||||
kmemleak_ignore - do not scan or report an object as leak
|
||||
kmemleak_scan_area - add scan areas inside a memory block
|
||||
kmemleak_no_scan - do not scan a memory block
|
||||
kmemleak_erase - erase an old value in a pointer variable
|
||||
kmemleak_alloc_recursive - as kmemleak_alloc but checks the recursiveness
|
||||
kmemleak_free_recursive - as kmemleak_free but checks the recursiveness
|
||||
|
||||
Dealing with false positives/negatives
|
||||
--------------------------------------
|
||||
|
||||
The false negatives are real memory leaks (orphan objects) but not
|
||||
reported by kmemleak because values found during the memory scanning
|
||||
point to such objects. To reduce the number of false negatives, kmemleak
|
||||
provides the kmemleak_ignore, kmemleak_scan_area, kmemleak_no_scan and
|
||||
kmemleak_erase functions (see above). The task stacks also increase the
|
||||
amount of false negatives and their scanning is not enabled by default.
|
||||
|
||||
The false positives are objects wrongly reported as being memory leaks
|
||||
(orphan). For objects known not to be leaks, kmemleak provides the
|
||||
kmemleak_not_leak function. The kmemleak_ignore could also be used if
|
||||
the memory block is known not to contain other pointers and it will no
|
||||
longer be scanned.
|
||||
|
||||
Some of the reported leaks are only transient, especially on SMP
|
||||
systems, because of pointers temporarily stored in CPU registers or
|
||||
stacks. Kmemleak defines MSECS_MIN_AGE (defaulting to 1000) representing
|
||||
the minimum age of an object to be reported as a memory leak.
|
||||
|
||||
Limitations and Drawbacks
|
||||
-------------------------
|
||||
|
||||
The main drawback is the reduced performance of memory allocation and
|
||||
freeing. To avoid other penalties, the memory scanning is only performed
|
||||
when the /sys/kernel/debug/kmemleak file is read. Anyway, this tool is
|
||||
intended for debugging purposes where the performance might not be the
|
||||
most important requirement.
|
||||
|
||||
To keep the algorithm simple, kmemleak scans for values pointing to any
|
||||
address inside a block's address range. This may lead to an increased
|
||||
number of false negatives. However, it is likely that a real memory leak
|
||||
will eventually become visible.
|
||||
|
||||
Another source of false negatives is the data stored in non-pointer
|
||||
values. In a future version, kmemleak could only scan the pointer
|
||||
members in the allocated structures. This feature would solve many of
|
||||
the false negative cases described above.
|
||||
|
||||
The tool can report false positives. These are cases where an allocated
|
||||
block doesn't need to be freed (some cases in the init_call functions),
|
||||
the pointer is calculated by other methods than the usual container_of
|
||||
macro or the pointer is stored in a location not scanned by kmemleak.
|
||||
|
||||
Page allocations and ioremap are not tracked. Only the ARM and x86
|
||||
architectures are currently supported.
|
@ -31,6 +31,7 @@ Contents:
|
||||
|
||||
- Locking functions.
|
||||
- Interrupt disabling functions.
|
||||
- Sleep and wake-up functions.
|
||||
- Miscellaneous functions.
|
||||
|
||||
(*) Inter-CPU locking barrier effects.
|
||||
@ -1217,6 +1218,132 @@ barriers are required in such a situation, they must be provided from some
|
||||
other means.
|
||||
|
||||
|
||||
SLEEP AND WAKE-UP FUNCTIONS
|
||||
---------------------------
|
||||
|
||||
Sleeping and waking on an event flagged in global data can be viewed as an
|
||||
interaction between two pieces of data: the task state of the task waiting for
|
||||
the event and the global data used to indicate the event. To make sure that
|
||||
these appear to happen in the right order, the primitives to begin the process
|
||||
of going to sleep, and the primitives to initiate a wake up imply certain
|
||||
barriers.
|
||||
|
||||
Firstly, the sleeper normally follows something like this sequence of events:
|
||||
|
||||
for (;;) {
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
if (event_indicated)
|
||||
break;
|
||||
schedule();
|
||||
}
|
||||
|
||||
A general memory barrier is interpolated automatically by set_current_state()
|
||||
after it has altered the task state:
|
||||
|
||||
CPU 1
|
||||
===============================
|
||||
set_current_state();
|
||||
set_mb();
|
||||
STORE current->state
|
||||
<general barrier>
|
||||
LOAD event_indicated
|
||||
|
||||
set_current_state() may be wrapped by:
|
||||
|
||||
prepare_to_wait();
|
||||
prepare_to_wait_exclusive();
|
||||
|
||||
which therefore also imply a general memory barrier after setting the state.
|
||||
The whole sequence above is available in various canned forms, all of which
|
||||
interpolate the memory barrier in the right place:
|
||||
|
||||
wait_event();
|
||||
wait_event_interruptible();
|
||||
wait_event_interruptible_exclusive();
|
||||
wait_event_interruptible_timeout();
|
||||
wait_event_killable();
|
||||
wait_event_timeout();
|
||||
wait_on_bit();
|
||||
wait_on_bit_lock();
|
||||
|
||||
|
||||
Secondly, code that performs a wake up normally follows something like this:
|
||||
|
||||
event_indicated = 1;
|
||||
wake_up(&event_wait_queue);
|
||||
|
||||
or:
|
||||
|
||||
event_indicated = 1;
|
||||
wake_up_process(event_daemon);
|
||||
|
||||
A write memory barrier is implied by wake_up() and co. if and only if they wake
|
||||
something up. The barrier occurs before the task state is cleared, and so sits
|
||||
between the STORE to indicate the event and the STORE to set TASK_RUNNING:
|
||||
|
||||
CPU 1 CPU 2
|
||||
=============================== ===============================
|
||||
set_current_state(); STORE event_indicated
|
||||
set_mb(); wake_up();
|
||||
STORE current->state <write barrier>
|
||||
<general barrier> STORE current->state
|
||||
LOAD event_indicated
|
||||
|
||||
The available waker functions include:
|
||||
|
||||
complete();
|
||||
wake_up();
|
||||
wake_up_all();
|
||||
wake_up_bit();
|
||||
wake_up_interruptible();
|
||||
wake_up_interruptible_all();
|
||||
wake_up_interruptible_nr();
|
||||
wake_up_interruptible_poll();
|
||||
wake_up_interruptible_sync();
|
||||
wake_up_interruptible_sync_poll();
|
||||
wake_up_locked();
|
||||
wake_up_locked_poll();
|
||||
wake_up_nr();
|
||||
wake_up_poll();
|
||||
wake_up_process();
|
||||
|
||||
|
||||
[!] Note that the memory barriers implied by the sleeper and the waker do _not_
|
||||
order multiple stores before the wake-up with respect to loads of those stored
|
||||
values after the sleeper has called set_current_state(). For instance, if the
|
||||
sleeper does:
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (event_indicated)
|
||||
break;
|
||||
__set_current_state(TASK_RUNNING);
|
||||
do_something(my_data);
|
||||
|
||||
and the waker does:
|
||||
|
||||
my_data = value;
|
||||
event_indicated = 1;
|
||||
wake_up(&event_wait_queue);
|
||||
|
||||
there's no guarantee that the change to event_indicated will be perceived by
|
||||
the sleeper as coming after the change to my_data. In such a circumstance, the
|
||||
code on both sides must interpolate its own memory barriers between the
|
||||
separate data accesses. Thus the above sleeper ought to do:
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (event_indicated) {
|
||||
smp_rmb();
|
||||
do_something(my_data);
|
||||
}
|
||||
|
||||
and the waker should do:
|
||||
|
||||
my_data = value;
|
||||
smp_wmb();
|
||||
event_indicated = 1;
|
||||
wake_up(&event_wait_queue);
|
||||
|
||||
|
||||
MISCELLANEOUS FUNCTIONS
|
||||
-----------------------
|
||||
|
||||
@ -1366,7 +1493,7 @@ WHERE ARE MEMORY BARRIERS NEEDED?
|
||||
|
||||
Under normal operation, memory operation reordering is generally not going to
|
||||
be a problem as a single-threaded linear piece of code will still appear to
|
||||
work correctly, even if it's in an SMP kernel. There are, however, three
|
||||
work correctly, even if it's in an SMP kernel. There are, however, four
|
||||
circumstances in which reordering definitely _could_ be a problem:
|
||||
|
||||
(*) Interprocessor interaction.
|
||||
|
@ -4,6 +4,7 @@
|
||||
CONTENTS
|
||||
========
|
||||
|
||||
0. WARNING
|
||||
1. Overview
|
||||
1.1 The problem
|
||||
1.2 The solution
|
||||
@ -14,6 +15,23 @@ CONTENTS
|
||||
3. Future plans
|
||||
|
||||
|
||||
0. WARNING
|
||||
==========
|
||||
|
||||
Fiddling with these settings can result in an unstable system, the knobs are
|
||||
root only and assumes root knows what he is doing.
|
||||
|
||||
Most notable:
|
||||
|
||||
* very small values in sched_rt_period_us can result in an unstable
|
||||
system when the period is smaller than either the available hrtimer
|
||||
resolution, or the time it takes to handle the budget refresh itself.
|
||||
|
||||
* very small values in sched_rt_runtime_us can result in an unstable
|
||||
system when the runtime is so small the system has difficulty making
|
||||
forward progress (NOTE: the migration thread and kstopmachine both
|
||||
are real-time processes).
|
||||
|
||||
1. Overview
|
||||
===========
|
||||
|
||||
@ -169,7 +187,7 @@ get their allocated time.
|
||||
|
||||
Implementing SCHED_EDF might take a while to complete. Priority Inheritance is
|
||||
the biggest challenge as the current linux PI infrastructure is geared towards
|
||||
the limited static priority levels 0-139. With deadline scheduling you need to
|
||||
the limited static priority levels 0-99. With deadline scheduling you need to
|
||||
do deadline inheritance (since priority is inversely proportional to the
|
||||
deadline delta (deadline - now).
|
||||
|
||||
|
@ -32,6 +32,7 @@ show up in /proc/sys/kernel:
|
||||
- kstack_depth_to_print [ X86 only ]
|
||||
- l2cr [ PPC only ]
|
||||
- modprobe ==> Documentation/debugging-modules.txt
|
||||
- modules_disabled
|
||||
- msgmax
|
||||
- msgmnb
|
||||
- msgmni
|
||||
@ -184,6 +185,16 @@ kernel stack.
|
||||
|
||||
==============================================================
|
||||
|
||||
modules_disabled:
|
||||
|
||||
A toggle value indicating if modules are allowed to be loaded
|
||||
in an otherwise modular kernel. This toggle defaults to off
|
||||
(0), but can be set true (1). Once true, modules can be
|
||||
neither loaded nor unloaded, and the toggle cannot be set back
|
||||
to false.
|
||||
|
||||
==============================================================
|
||||
|
||||
osrelease, ostype & version:
|
||||
|
||||
# cat osrelease
|
||||
|
90
Documentation/trace/events.txt
Normal file
90
Documentation/trace/events.txt
Normal file
@ -0,0 +1,90 @@
|
||||
Event Tracing
|
||||
|
||||
Documentation written by Theodore Ts'o
|
||||
Updated by Li Zefan
|
||||
|
||||
1. Introduction
|
||||
===============
|
||||
|
||||
Tracepoints (see Documentation/trace/tracepoints.txt) can be used
|
||||
without creating custom kernel modules to register probe functions
|
||||
using the event tracing infrastructure.
|
||||
|
||||
Not all tracepoints can be traced using the event tracing system;
|
||||
the kernel developer must provide code snippets which define how the
|
||||
tracing information is saved into the tracing buffer, and how the
|
||||
tracing information should be printed.
|
||||
|
||||
2. Using Event Tracing
|
||||
======================
|
||||
|
||||
2.1 Via the 'set_event' interface
|
||||
---------------------------------
|
||||
|
||||
The events which are available for tracing can be found in the file
|
||||
/debug/tracing/available_events.
|
||||
|
||||
To enable a particular event, such as 'sched_wakeup', simply echo it
|
||||
to /debug/tracing/set_event. For example:
|
||||
|
||||
# echo sched_wakeup >> /debug/tracing/set_event
|
||||
|
||||
[ Note: '>>' is necessary, otherwise it will firstly disable
|
||||
all the events. ]
|
||||
|
||||
To disable an event, echo the event name to the set_event file prefixed
|
||||
with an exclamation point:
|
||||
|
||||
# echo '!sched_wakeup' >> /debug/tracing/set_event
|
||||
|
||||
To disable all events, echo an empty line to the set_event file:
|
||||
|
||||
# echo > /debug/tracing/set_event
|
||||
|
||||
To enable all events, echo '*:*' or '*:' to the set_event file:
|
||||
|
||||
# echo *:* > /debug/tracing/set_event
|
||||
|
||||
The events are organized into subsystems, such as ext4, irq, sched,
|
||||
etc., and a full event name looks like this: <subsystem>:<event>. The
|
||||
subsystem name is optional, but it is displayed in the available_events
|
||||
file. All of the events in a subsystem can be specified via the syntax
|
||||
"<subsystem>:*"; for example, to enable all irq events, you can use the
|
||||
command:
|
||||
|
||||
# echo 'irq:*' > /debug/tracing/set_event
|
||||
|
||||
2.2 Via the 'enable' toggle
|
||||
---------------------------
|
||||
|
||||
The events available are also listed in /debug/tracing/events/ hierarchy
|
||||
of directories.
|
||||
|
||||
To enable event 'sched_wakeup':
|
||||
|
||||
# echo 1 > /debug/tracing/events/sched/sched_wakeup/enable
|
||||
|
||||
To disable it:
|
||||
|
||||
# echo 0 > /debug/tracing/events/sched/sched_wakeup/enable
|
||||
|
||||
To enable all events in sched subsystem:
|
||||
|
||||
# echo 1 > /debug/tracing/events/sched/enable
|
||||
|
||||
To eanble all events:
|
||||
|
||||
# echo 1 > /debug/tracing/events/enable
|
||||
|
||||
When reading one of these enable files, there are four results:
|
||||
|
||||
0 - all events this file affects are disabled
|
||||
1 - all events this file affects are enabled
|
||||
X - there is a mixture of events enabled and disabled
|
||||
? - this file does not affect any event
|
||||
|
||||
3. Defining an event-enabled tracepoint
|
||||
=======================================
|
||||
|
||||
See The example provided in samples/trace_events
|
||||
|
@ -179,7 +179,7 @@ Here is the list of current tracers that may be configured.
|
||||
|
||||
Function call tracer to trace all kernel functions.
|
||||
|
||||
"function_graph_tracer"
|
||||
"function_graph"
|
||||
|
||||
Similar to the function tracer except that the
|
||||
function tracer probes the functions on their entry
|
||||
@ -518,9 +518,18 @@ priority with zero (0) being the highest priority and the nice
|
||||
values starting at 100 (nice -20). Below is a quick chart to map
|
||||
the kernel priority to user land priorities.
|
||||
|
||||
Kernel priority: 0 to 99 ==> user RT priority 99 to 0
|
||||
Kernel priority: 100 to 139 ==> user nice -20 to 19
|
||||
Kernel priority: 140 ==> idle task priority
|
||||
Kernel Space User Space
|
||||
===============================================================
|
||||
0(high) to 98(low) user RT priority 99(high) to 1(low)
|
||||
with SCHED_RR or SCHED_FIFO
|
||||
---------------------------------------------------------------
|
||||
99 sched_priority is not used in scheduling
|
||||
decisions(it must be specified as 0)
|
||||
---------------------------------------------------------------
|
||||
100(high) to 139(low) user nice -20(high) to 19(low)
|
||||
---------------------------------------------------------------
|
||||
140 idle task priority
|
||||
---------------------------------------------------------------
|
||||
|
||||
The task states are:
|
||||
|
||||
|
17
Documentation/trace/power.txt
Normal file
17
Documentation/trace/power.txt
Normal file
@ -0,0 +1,17 @@
|
||||
The power tracer collects detailed information about C-state and P-state
|
||||
transitions, instead of just looking at the high-level "average"
|
||||
information.
|
||||
|
||||
There is a helper script found in scrips/tracing/power.pl in the kernel
|
||||
sources which can be used to parse this information and create a
|
||||
Scalable Vector Graphics (SVG) picture from the trace data.
|
||||
|
||||
To use this tracer:
|
||||
|
||||
echo 0 > /sys/kernel/debug/tracing/tracing_enabled
|
||||
echo power > /sys/kernel/debug/tracing/current_tracer
|
||||
echo 1 > /sys/kernel/debug/tracing/tracing_enabled
|
||||
sleep 1
|
||||
echo 0 > /sys/kernel/debug/tracing/tracing_enabled
|
||||
cat /sys/kernel/debug/tracing/trace | \
|
||||
perl scripts/tracing/power.pl > out.sv
|
@ -50,6 +50,10 @@ Protocol 2.08: (Kernel 2.6.26) Added crc32 checksum and ELF format
|
||||
Protocol 2.09: (Kernel 2.6.26) Added a field of 64-bit physical
|
||||
pointer to single linked list of struct setup_data.
|
||||
|
||||
Protocol 2.10: (Kernel 2.6.31) Added a protocol for relaxed alignment
|
||||
beyond the kernel_alignment added, new init_size and
|
||||
pref_address fields. Added extended boot loader IDs.
|
||||
|
||||
**** MEMORY LAYOUT
|
||||
|
||||
The traditional memory map for the kernel loader, used for Image or
|
||||
@ -168,12 +172,13 @@ Offset Proto Name Meaning
|
||||
021C/4 2.00+ ramdisk_size initrd size (set by boot loader)
|
||||
0220/4 2.00+ bootsect_kludge DO NOT USE - for bootsect.S use only
|
||||
0224/2 2.01+ heap_end_ptr Free memory after setup end
|
||||
0226/2 N/A pad1 Unused
|
||||
0226/1 2.02+(3 ext_loader_ver Extended boot loader version
|
||||
0227/1 2.02+(3 ext_loader_type Extended boot loader ID
|
||||
0228/4 2.02+ cmd_line_ptr 32-bit pointer to the kernel command line
|
||||
022C/4 2.03+ ramdisk_max Highest legal initrd address
|
||||
0230/4 2.05+ kernel_alignment Physical addr alignment required for kernel
|
||||
0234/1 2.05+ relocatable_kernel Whether kernel is relocatable or not
|
||||
0235/1 N/A pad2 Unused
|
||||
0235/1 2.10+ min_alignment Minimum alignment, as a power of two
|
||||
0236/2 N/A pad3 Unused
|
||||
0238/4 2.06+ cmdline_size Maximum size of the kernel command line
|
||||
023C/4 2.07+ hardware_subarch Hardware subarchitecture
|
||||
@ -182,6 +187,8 @@ Offset Proto Name Meaning
|
||||
024C/4 2.08+ payload_length Length of kernel payload
|
||||
0250/8 2.09+ setup_data 64-bit physical pointer to linked list
|
||||
of struct setup_data
|
||||
0258/8 2.10+ pref_address Preferred loading address
|
||||
0260/4 2.10+ init_size Linear memory required during initialization
|
||||
|
||||
(1) For backwards compatibility, if the setup_sects field contains 0, the
|
||||
real value is 4.
|
||||
@ -190,6 +197,8 @@ Offset Proto Name Meaning
|
||||
field are unusable, which means the size of a bzImage kernel
|
||||
cannot be determined.
|
||||
|
||||
(3) Ignored, but safe to set, for boot protocols 2.02-2.09.
|
||||
|
||||
If the "HdrS" (0x53726448) magic number is not found at offset 0x202,
|
||||
the boot protocol version is "old". Loading an old kernel, the
|
||||
following parameters should be assumed:
|
||||
@ -343,18 +352,32 @@ Protocol: 2.00+
|
||||
0xTV here, where T is an identifier for the boot loader and V is
|
||||
a version number. Otherwise, enter 0xFF here.
|
||||
|
||||
For boot loader IDs above T = 0xD, write T = 0xE to this field and
|
||||
write the extended ID minus 0x10 to the ext_loader_type field.
|
||||
Similarly, the ext_loader_ver field can be used to provide more than
|
||||
four bits for the bootloader version.
|
||||
|
||||
For example, for T = 0x15, V = 0x234, write:
|
||||
|
||||
type_of_loader <- 0xE4
|
||||
ext_loader_type <- 0x05
|
||||
ext_loader_ver <- 0x23
|
||||
|
||||
Assigned boot loader ids:
|
||||
0 LILO (0x00 reserved for pre-2.00 bootloader)
|
||||
1 Loadlin
|
||||
2 bootsect-loader (0x20, all other values reserved)
|
||||
3 SYSLINUX
|
||||
4 EtherBoot
|
||||
3 Syslinux
|
||||
4 Etherboot/gPXE
|
||||
5 ELILO
|
||||
7 GRUB
|
||||
8 U-BOOT
|
||||
8 U-Boot
|
||||
9 Xen
|
||||
A Gujin
|
||||
B Qemu
|
||||
C Arcturus Networks uCbootloader
|
||||
E Extended (see ext_loader_type)
|
||||
F Special (0xFF = undefined)
|
||||
|
||||
Please contact <hpa@zytor.com> if you need a bootloader ID
|
||||
value assigned.
|
||||
@ -453,6 +476,35 @@ Protocol: 2.01+
|
||||
Set this field to the offset (from the beginning of the real-mode
|
||||
code) of the end of the setup stack/heap, minus 0x0200.
|
||||
|
||||
Field name: ext_loader_ver
|
||||
Type: write (optional)
|
||||
Offset/size: 0x226/1
|
||||
Protocol: 2.02+
|
||||
|
||||
This field is used as an extension of the version number in the
|
||||
type_of_loader field. The total version number is considered to be
|
||||
(type_of_loader & 0x0f) + (ext_loader_ver << 4).
|
||||
|
||||
The use of this field is boot loader specific. If not written, it
|
||||
is zero.
|
||||
|
||||
Kernels prior to 2.6.31 did not recognize this field, but it is safe
|
||||
to write for protocol version 2.02 or higher.
|
||||
|
||||
Field name: ext_loader_type
|
||||
Type: write (obligatory if (type_of_loader & 0xf0) == 0xe0)
|
||||
Offset/size: 0x227/1
|
||||
Protocol: 2.02+
|
||||
|
||||
This field is used as an extension of the type number in
|
||||
type_of_loader field. If the type in type_of_loader is 0xE, then
|
||||
the actual type is (ext_loader_type + 0x10).
|
||||
|
||||
This field is ignored if the type in type_of_loader is not 0xE.
|
||||
|
||||
Kernels prior to 2.6.31 did not recognize this field, but it is safe
|
||||
to write for protocol version 2.02 or higher.
|
||||
|
||||
Field name: cmd_line_ptr
|
||||
Type: write (obligatory)
|
||||
Offset/size: 0x228/4
|
||||
@ -482,11 +534,19 @@ Protocol: 2.03+
|
||||
0x37FFFFFF, you can start your ramdisk at 0x37FE0000.)
|
||||
|
||||
Field name: kernel_alignment
|
||||
Type: read (reloc)
|
||||
Type: read/modify (reloc)
|
||||
Offset/size: 0x230/4
|
||||
Protocol: 2.05+
|
||||
Protocol: 2.05+ (read), 2.10+ (modify)
|
||||
|
||||
Alignment unit required by the kernel (if relocatable_kernel is true.)
|
||||
Alignment unit required by the kernel (if relocatable_kernel is
|
||||
true.) A relocatable kernel that is loaded at an alignment
|
||||
incompatible with the value in this field will be realigned during
|
||||
kernel initialization.
|
||||
|
||||
Starting with protocol version 2.10, this reflects the kernel
|
||||
alignment preferred for optimal performance; it is possible for the
|
||||
loader to modify this field to permit a lesser alignment. See the
|
||||
min_alignment and pref_address field below.
|
||||
|
||||
Field name: relocatable_kernel
|
||||
Type: read (reloc)
|
||||
@ -498,6 +558,22 @@ Protocol: 2.05+
|
||||
After loading, the boot loader must set the code32_start field to
|
||||
point to the loaded code, or to a boot loader hook.
|
||||
|
||||
Field name: min_alignment
|
||||
Type: read (reloc)
|
||||
Offset/size: 0x235/1
|
||||
Protocol: 2.10+
|
||||
|
||||
This field, if nonzero, indicates as a power of two the minimum
|
||||
alignment required, as opposed to preferred, by the kernel to boot.
|
||||
If a boot loader makes use of this field, it should update the
|
||||
kernel_alignment field with the alignment unit desired; typically:
|
||||
|
||||
kernel_alignment = 1 << min_alignment
|
||||
|
||||
There may be a considerable performance cost with an excessively
|
||||
misaligned kernel. Therefore, a loader should typically try each
|
||||
power-of-two alignment from kernel_alignment down to this alignment.
|
||||
|
||||
Field name: cmdline_size
|
||||
Type: read
|
||||
Offset/size: 0x238/4
|
||||
@ -582,6 +658,36 @@ Protocol: 2.09+
|
||||
sure to consider the case where the linked list already contains
|
||||
entries.
|
||||
|
||||
Field name: pref_address
|
||||
Type: read (reloc)
|
||||
Offset/size: 0x258/8
|
||||
Protocol: 2.10+
|
||||
|
||||
This field, if nonzero, represents a preferred load address for the
|
||||
kernel. A relocating bootloader should attempt to load at this
|
||||
address if possible.
|
||||
|
||||
A non-relocatable kernel will unconditionally move itself and to run
|
||||
at this address.
|
||||
|
||||
Field name: init_size
|
||||
Type: read
|
||||
Offset/size: 0x25c/4
|
||||
|
||||
This field indicates the amount of linear contiguous memory starting
|
||||
at the kernel runtime start address that the kernel needs before it
|
||||
is capable of examining its memory map. This is not the same thing
|
||||
as the total amount of memory the kernel needs to boot, but it can
|
||||
be used by a relocating boot loader to help select a safe load
|
||||
address for the kernel.
|
||||
|
||||
The kernel runtime start address is determined by the following algorithm:
|
||||
|
||||
if (relocatable_kernel)
|
||||
runtime_start = align_up(load_address, kernel_alignment)
|
||||
else
|
||||
runtime_start = pref_address
|
||||
|
||||
|
||||
**** THE IMAGE CHECKSUM
|
||||
|
||||
|
@ -150,11 +150,6 @@ NUMA
|
||||
Otherwise, the remaining system RAM is allocated to an
|
||||
additional node.
|
||||
|
||||
numa=hotadd=percent
|
||||
Only allow hotadd memory to preallocate page structures upto
|
||||
percent of already available memory.
|
||||
numa=hotadd=0 will disable hotadd memory.
|
||||
|
||||
ACPI
|
||||
|
||||
acpi=off Don't enable ACPI
|
||||
|
@ -6,10 +6,11 @@ Virtual memory map with 4 level page tables:
|
||||
0000000000000000 - 00007fffffffffff (=47 bits) user space, different per mm
|
||||
hole caused by [48:63] sign extension
|
||||
ffff800000000000 - ffff80ffffffffff (=40 bits) guard hole
|
||||
ffff880000000000 - ffffc0ffffffffff (=57 TB) direct mapping of all phys. memory
|
||||
ffffc10000000000 - ffffc1ffffffffff (=40 bits) hole
|
||||
ffffc20000000000 - ffffe1ffffffffff (=45 bits) vmalloc/ioremap space
|
||||
ffffe20000000000 - ffffe2ffffffffff (=40 bits) virtual memory map (1TB)
|
||||
ffff880000000000 - ffffc7ffffffffff (=64 TB) direct mapping of all phys. memory
|
||||
ffffc80000000000 - ffffc8ffffffffff (=40 bits) hole
|
||||
ffffc90000000000 - ffffe8ffffffffff (=45 bits) vmalloc/ioremap space
|
||||
ffffe90000000000 - ffffe9ffffffffff (=40 bits) hole
|
||||
ffffea0000000000 - ffffeaffffffffff (=40 bits) virtual memory map (1TB)
|
||||
... unused hole ...
|
||||
ffffffff80000000 - ffffffffa0000000 (=512 MB) kernel text mapping, from phys 0
|
||||
ffffffffa0000000 - fffffffffff00000 (=1536 MB) module mapping space
|
||||
|
40
MAINTAINERS
40
MAINTAINERS
@ -71,7 +71,7 @@ P: Person
|
||||
M: Mail patches to
|
||||
L: Mailing list that is relevant to this area
|
||||
W: Web-page with status/info
|
||||
T: SCM tree type and location. Type is one of: git, hg, quilt.
|
||||
T: SCM tree type and location. Type is one of: git, hg, quilt, stgit.
|
||||
S: Status, one of the following:
|
||||
|
||||
Supported: Someone is actually paid to look after this.
|
||||
@ -159,7 +159,8 @@ F: drivers/net/r8169.c
|
||||
8250/16?50 (AND CLONE UARTS) SERIAL DRIVER
|
||||
L: linux-serial@vger.kernel.org
|
||||
W: http://serial.sourceforge.net
|
||||
S: Orphan
|
||||
M: alan@lxorguk.ukuu.org.uk
|
||||
S: Odd Fixes
|
||||
F: drivers/serial/8250*
|
||||
F: include/linux/serial_8250.h
|
||||
|
||||
@ -1801,10 +1802,10 @@ F: drivers/char/epca*
|
||||
F: drivers/char/digi*
|
||||
|
||||
DIRECTORY NOTIFICATION (DNOTIFY)
|
||||
P: Stephen Rothwell
|
||||
M: sfr@canb.auug.org.au
|
||||
P: Eric Paris
|
||||
M: eparis@parisplace.org
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Supported
|
||||
S: Maintained
|
||||
F: Documentation/filesystems/dnotify.txt
|
||||
F: fs/notify/dnotify/
|
||||
F: include/linux/dnotify.h
|
||||
@ -1978,6 +1979,16 @@ F: Documentation/edac.txt
|
||||
F: drivers/edac/edac_*
|
||||
F: include/linux/edac.h
|
||||
|
||||
EDAC-AMD64
|
||||
P: Doug Thompson
|
||||
M: dougthompson@xmission.com
|
||||
P: Borislav Petkov
|
||||
M: borislav.petkov@amd.com
|
||||
L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
|
||||
W: bluesmoke.sourceforge.net
|
||||
S: Supported
|
||||
F: drivers/edac/amd64_edac*
|
||||
|
||||
EDAC-E752X
|
||||
P: Mark Gross
|
||||
M: mark.gross@intel.com
|
||||
@ -2847,6 +2858,8 @@ P: John McCutchan
|
||||
M: john@johnmccutchan.com
|
||||
P: Robert Love
|
||||
M: rlove@rlove.org
|
||||
P: Eric Paris
|
||||
M: eparis@parisplace.org
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/filesystems/inotify.txt
|
||||
@ -3359,6 +3372,12 @@ F: Documentation/trace/kmemtrace.txt
|
||||
F: include/trace/kmemtrace.h
|
||||
F: kernel/trace/kmemtrace.c
|
||||
|
||||
KMEMLEAK
|
||||
P: Catalin Marinas
|
||||
M: catalin.marinas@arm.com
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
|
||||
KPROBES
|
||||
P: Ananth N Mavinakayanahalli
|
||||
M: ananth@in.ibm.com
|
||||
@ -4392,6 +4411,16 @@ S: Maintained
|
||||
F: include/linux/delayacct.h
|
||||
F: kernel/delayacct.c
|
||||
|
||||
PERFORMANCE COUNTER SUBSYSTEM
|
||||
P: Peter Zijlstra
|
||||
M: a.p.zijlstra@chello.nl
|
||||
P: Paul Mackerras
|
||||
M: paulus@samba.org
|
||||
P: Ingo Molnar
|
||||
M: mingo@elte.hu
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Supported
|
||||
|
||||
PERSONALITY HANDLING
|
||||
P: Christoph Hellwig
|
||||
M: hch@infradead.org
|
||||
@ -5629,6 +5658,7 @@ P: Alan Cox
|
||||
M: alan@lxorguk.ukuu.org.uk
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
T: stgit http://zeniv.linux.org.uk/~alan/ttydev/
|
||||
|
||||
TULIP NETWORK DRIVERS
|
||||
P: Grant Grundler
|
||||
|
@ -371,8 +371,6 @@ SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, char __user *, path,
|
||||
int retval = -EINVAL;
|
||||
char *name;
|
||||
|
||||
lock_kernel();
|
||||
|
||||
name = getname(path);
|
||||
retval = PTR_ERR(name);
|
||||
if (IS_ERR(name))
|
||||
@ -392,7 +390,6 @@ SYSCALL_DEFINE4(osf_mount, unsigned long, typenr, char __user *, path,
|
||||
}
|
||||
putname(name);
|
||||
out:
|
||||
unlock_kernel();
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
@ -176,22 +176,26 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
|
||||
{
|
||||
spin_lock(&dp264_irq_lock);
|
||||
cpu_set_irq_affinity(irq, *affinity);
|
||||
tsunami_update_irq_hw(cached_irq_mask);
|
||||
spin_unlock(&dp264_irq_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
|
||||
{
|
||||
spin_lock(&dp264_irq_lock);
|
||||
cpu_set_irq_affinity(irq - 16, *affinity);
|
||||
tsunami_update_irq_hw(cached_irq_mask);
|
||||
spin_unlock(&dp264_irq_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct hw_interrupt_type dp264_irq_type = {
|
||||
|
@ -157,13 +157,15 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
|
||||
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
|
||||
{
|
||||
spin_lock(&titan_irq_lock);
|
||||
titan_cpu_set_irq_affinity(irq - 16, *affinity);
|
||||
titan_update_irq_hw(titan_cached_irq_mask);
|
||||
spin_unlock(&titan_irq_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -109,7 +109,7 @@ static void gic_unmask_irq(unsigned int irq)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void gic_set_cpu(unsigned int irq, const struct cpumask *mask_val)
|
||||
static int gic_set_cpu(unsigned int irq, const struct cpumask *mask_val)
|
||||
{
|
||||
void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3);
|
||||
unsigned int shift = (irq % 4) * 8;
|
||||
@ -122,6 +122,8 @@ static void gic_set_cpu(unsigned int irq, const struct cpumask *mask_val)
|
||||
val |= 1 << (cpu + shift);
|
||||
writel(val, reg);
|
||||
spin_unlock(&irq_controller_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -20,11 +20,16 @@
|
||||
#define ASMARM_ARCH_UART_H
|
||||
|
||||
#define IMXUART_HAVE_RTSCTS (1<<0)
|
||||
#define IMXUART_IRDA (1<<1)
|
||||
|
||||
struct imxuart_platform_data {
|
||||
int (*init)(struct platform_device *pdev);
|
||||
int (*exit)(struct platform_device *pdev);
|
||||
unsigned int flags;
|
||||
void (*irda_enable)(int enable);
|
||||
unsigned int irda_inv_rx:1;
|
||||
unsigned int irda_inv_tx:1;
|
||||
unsigned short transceiver_delay;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
@ -147,24 +147,40 @@ static int __mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg, void *arg)
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct omap_msg_tx_data {
|
||||
mbox_msg_t msg;
|
||||
void *arg;
|
||||
};
|
||||
|
||||
static void omap_msg_tx_end_io(struct request *rq, int error)
|
||||
{
|
||||
kfree(rq->special);
|
||||
__blk_put_request(rq->q, rq);
|
||||
}
|
||||
|
||||
int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg, void* arg)
|
||||
{
|
||||
struct omap_msg_tx_data *tx_data;
|
||||
struct request *rq;
|
||||
struct request_queue *q = mbox->txq->queue;
|
||||
int ret = 0;
|
||||
|
||||
tx_data = kmalloc(sizeof(*tx_data), GFP_ATOMIC);
|
||||
if (unlikely(!tx_data))
|
||||
return -ENOMEM;
|
||||
|
||||
rq = blk_get_request(q, WRITE, GFP_ATOMIC);
|
||||
if (unlikely(!rq)) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
kfree(tx_data);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rq->data = (void *)msg;
|
||||
blk_insert_request(q, rq, 0, arg);
|
||||
tx_data->msg = msg;
|
||||
tx_data->arg = arg;
|
||||
rq->end_io = omap_msg_tx_end_io;
|
||||
blk_insert_request(q, rq, 0, tx_data);
|
||||
|
||||
schedule_work(&mbox->txq->work);
|
||||
fail:
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(omap_mbox_msg_send);
|
||||
|
||||
@ -178,22 +194,28 @@ static void mbox_tx_work(struct work_struct *work)
|
||||
struct request_queue *q = mbox->txq->queue;
|
||||
|
||||
while (1) {
|
||||
struct omap_msg_tx_data *tx_data;
|
||||
|
||||
spin_lock(q->queue_lock);
|
||||
rq = elv_next_request(q);
|
||||
rq = blk_fetch_request(q);
|
||||
spin_unlock(q->queue_lock);
|
||||
|
||||
if (!rq)
|
||||
break;
|
||||
|
||||
ret = __mbox_msg_send(mbox, (mbox_msg_t) rq->data, rq->special);
|
||||
tx_data = rq->special;
|
||||
|
||||
ret = __mbox_msg_send(mbox, tx_data->msg, tx_data->arg);
|
||||
if (ret) {
|
||||
enable_mbox_irq(mbox, IRQ_TX);
|
||||
spin_lock(q->queue_lock);
|
||||
blk_requeue_request(q, rq);
|
||||
spin_unlock(q->queue_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock(q->queue_lock);
|
||||
if (__blk_end_request(rq, 0, 0))
|
||||
BUG();
|
||||
__blk_end_request_all(rq, 0);
|
||||
spin_unlock(q->queue_lock);
|
||||
}
|
||||
}
|
||||
@ -218,16 +240,13 @@ static void mbox_rx_work(struct work_struct *work)
|
||||
|
||||
while (1) {
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
rq = elv_next_request(q);
|
||||
rq = blk_fetch_request(q);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
if (!rq)
|
||||
break;
|
||||
|
||||
msg = (mbox_msg_t) rq->data;
|
||||
|
||||
if (blk_end_request(rq, 0, 0))
|
||||
BUG();
|
||||
|
||||
msg = (mbox_msg_t)rq->special;
|
||||
blk_end_request_all(rq, 0);
|
||||
mbox->rxq->callback((void *)msg);
|
||||
}
|
||||
}
|
||||
@ -264,7 +283,6 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
|
||||
goto nomem;
|
||||
|
||||
msg = mbox_fifo_read(mbox);
|
||||
rq->data = (void *)msg;
|
||||
|
||||
if (unlikely(mbox_seq_test(mbox, msg))) {
|
||||
pr_info("mbox: Illegal seq bit!(%08x)\n", msg);
|
||||
@ -272,7 +290,7 @@ static void __mbox_rx_interrupt(struct omap_mbox *mbox)
|
||||
mbox->err_notify();
|
||||
}
|
||||
|
||||
blk_insert_request(q, rq, 0, NULL);
|
||||
blk_insert_request(q, rq, 0, (void *)msg);
|
||||
if (mbox->ops->type == OMAP_MBOX_TYPE1)
|
||||
break;
|
||||
}
|
||||
@ -329,16 +347,15 @@ omap_mbox_read(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
|
||||
while (1) {
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
rq = elv_next_request(q);
|
||||
rq = blk_fetch_request(q);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
|
||||
if (!rq)
|
||||
break;
|
||||
|
||||
*p = (mbox_msg_t) rq->data;
|
||||
*p = (mbox_msg_t)rq->special;
|
||||
|
||||
if (blk_end_request(rq, 0, 0))
|
||||
BUG();
|
||||
blk_end_request_all(rq, 0);
|
||||
|
||||
if (unlikely(mbox_seq_test(mbox, *p))) {
|
||||
pr_info("mbox: Illegal seq bit!(%08x) ignored\n", *p);
|
||||
|
@ -325,12 +325,14 @@ static void end_crisv32_irq(unsigned int irq)
|
||||
{
|
||||
}
|
||||
|
||||
void set_affinity_crisv32_irq(unsigned int irq, const struct cpumask *dest)
|
||||
int set_affinity_crisv32_irq(unsigned int irq, const struct cpumask *dest)
|
||||
{
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&irq_lock, flags);
|
||||
irq_allocations[irq - FIRST_IRQ].mask = *dest;
|
||||
spin_unlock_irqrestore(&irq_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct irq_chip crisv32_irq_type = {
|
||||
|
@ -6,6 +6,7 @@ config FRV
|
||||
bool
|
||||
default y
|
||||
select HAVE_IDE
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
|
||||
config ZONE_DMA
|
||||
bool
|
||||
|
@ -112,7 +112,7 @@ extern unsigned long atomic_test_and_XOR_mask(unsigned long mask, volatile unsig
|
||||
#define atomic_clear_mask(mask, v) atomic_test_and_ANDNOT_mask((mask), (v))
|
||||
#define atomic_set_mask(mask, v) atomic_test_and_OR_mask((mask), (v))
|
||||
|
||||
static inline int test_and_clear_bit(int nr, volatile void *addr)
|
||||
static inline int test_and_clear_bit(unsigned long nr, volatile void *addr)
|
||||
{
|
||||
volatile unsigned long *ptr = addr;
|
||||
unsigned long mask = 1UL << (nr & 31);
|
||||
@ -120,7 +120,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
|
||||
return (atomic_test_and_ANDNOT_mask(mask, ptr) & mask) != 0;
|
||||
}
|
||||
|
||||
static inline int test_and_set_bit(int nr, volatile void *addr)
|
||||
static inline int test_and_set_bit(unsigned long nr, volatile void *addr)
|
||||
{
|
||||
volatile unsigned long *ptr = addr;
|
||||
unsigned long mask = 1UL << (nr & 31);
|
||||
@ -128,7 +128,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
|
||||
return (atomic_test_and_OR_mask(mask, ptr) & mask) != 0;
|
||||
}
|
||||
|
||||
static inline int test_and_change_bit(int nr, volatile void *addr)
|
||||
static inline int test_and_change_bit(unsigned long nr, volatile void *addr)
|
||||
{
|
||||
volatile unsigned long *ptr = addr;
|
||||
unsigned long mask = 1UL << (nr & 31);
|
||||
@ -136,22 +136,22 @@ static inline int test_and_change_bit(int nr, volatile void *addr)
|
||||
return (atomic_test_and_XOR_mask(mask, ptr) & mask) != 0;
|
||||
}
|
||||
|
||||
static inline void clear_bit(int nr, volatile void *addr)
|
||||
static inline void clear_bit(unsigned long nr, volatile void *addr)
|
||||
{
|
||||
test_and_clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline void set_bit(int nr, volatile void *addr)
|
||||
static inline void set_bit(unsigned long nr, volatile void *addr)
|
||||
{
|
||||
test_and_set_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline void change_bit(int nr, volatile void * addr)
|
||||
static inline void change_bit(unsigned long nr, volatile void *addr)
|
||||
{
|
||||
test_and_change_bit(nr, addr);
|
||||
}
|
||||
|
||||
static inline void __clear_bit(int nr, volatile void * addr)
|
||||
static inline void __clear_bit(unsigned long nr, volatile void *addr)
|
||||
{
|
||||
volatile unsigned long *a = addr;
|
||||
int mask;
|
||||
@ -161,7 +161,7 @@ static inline void __clear_bit(int nr, volatile void * addr)
|
||||
*a &= ~mask;
|
||||
}
|
||||
|
||||
static inline void __set_bit(int nr, volatile void * addr)
|
||||
static inline void __set_bit(unsigned long nr, volatile void *addr)
|
||||
{
|
||||
volatile unsigned long *a = addr;
|
||||
int mask;
|
||||
@ -171,7 +171,7 @@ static inline void __set_bit(int nr, volatile void * addr)
|
||||
*a |= mask;
|
||||
}
|
||||
|
||||
static inline void __change_bit(int nr, volatile void *addr)
|
||||
static inline void __change_bit(unsigned long nr, volatile void *addr)
|
||||
{
|
||||
volatile unsigned long *a = addr;
|
||||
int mask;
|
||||
@ -181,7 +181,7 @@ static inline void __change_bit(int nr, volatile void *addr)
|
||||
*a ^= mask;
|
||||
}
|
||||
|
||||
static inline int __test_and_clear_bit(int nr, volatile void * addr)
|
||||
static inline int __test_and_clear_bit(unsigned long nr, volatile void *addr)
|
||||
{
|
||||
volatile unsigned long *a = addr;
|
||||
int mask, retval;
|
||||
@ -193,7 +193,7 @@ static inline int __test_and_clear_bit(int nr, volatile void * addr)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static inline int __test_and_set_bit(int nr, volatile void * addr)
|
||||
static inline int __test_and_set_bit(unsigned long nr, volatile void *addr)
|
||||
{
|
||||
volatile unsigned long *a = addr;
|
||||
int mask, retval;
|
||||
@ -205,7 +205,7 @@ static inline int __test_and_set_bit(int nr, volatile void * addr)
|
||||
return retval;
|
||||
}
|
||||
|
||||
static inline int __test_and_change_bit(int nr, volatile void * addr)
|
||||
static inline int __test_and_change_bit(unsigned long nr, volatile void *addr)
|
||||
{
|
||||
volatile unsigned long *a = addr;
|
||||
int mask, retval;
|
||||
@ -220,12 +220,13 @@ static inline int __test_and_change_bit(int nr, volatile void * addr)
|
||||
/*
|
||||
* This routine doesn't need to be atomic.
|
||||
*/
|
||||
static inline int __constant_test_bit(int nr, const volatile void * addr)
|
||||
static inline int
|
||||
__constant_test_bit(unsigned long nr, const volatile void *addr)
|
||||
{
|
||||
return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
|
||||
}
|
||||
|
||||
static inline int __test_bit(int nr, const volatile void * addr)
|
||||
static inline int __test_bit(unsigned long nr, const volatile void *addr)
|
||||
{
|
||||
int * a = (int *) addr;
|
||||
int mask;
|
||||
|
@ -116,6 +116,7 @@ do { \
|
||||
} while(0)
|
||||
|
||||
#define USE_ELF_CORE_DUMP
|
||||
#define CORE_DUMP_USE_REGSET
|
||||
#define ELF_FDPIC_CORE_EFLAGS EF_FRV_FDPIC
|
||||
#define ELF_EXEC_PAGESIZE 16384
|
||||
|
||||
|
@ -81,8 +81,7 @@ static inline void pci_dma_sync_single(struct pci_dev *hwdev,
|
||||
dma_addr_t dma_handle,
|
||||
size_t size, int direction)
|
||||
{
|
||||
if (direction == PCI_DMA_NONE)
|
||||
BUG();
|
||||
BUG_ON(direction == PCI_DMA_NONE);
|
||||
|
||||
frv_cache_wback_inv((unsigned long)bus_to_virt(dma_handle),
|
||||
(unsigned long)bus_to_virt(dma_handle) + size);
|
||||
@ -99,9 +98,7 @@ static inline void pci_dma_sync_sg(struct pci_dev *hwdev,
|
||||
int nelems, int direction)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (direction == PCI_DMA_NONE)
|
||||
BUG();
|
||||
BUG_ON(direction == PCI_DMA_NONE);
|
||||
|
||||
for (i = 0; i < nelems; i++)
|
||||
frv_cache_wback_inv(sg_dma_address(&sg[i]),
|
||||
|
@ -65,6 +65,8 @@
|
||||
#ifdef __KERNEL__
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
struct task_struct;
|
||||
|
||||
/*
|
||||
* we dedicate GR28 to keeping a pointer to the current exception frame
|
||||
* - gr28 is destroyed on entry to the kernel from userspace
|
||||
@ -73,11 +75,18 @@ register struct pt_regs *__frame asm("gr28");
|
||||
|
||||
#define user_mode(regs) (!((regs)->psr & PSR_S))
|
||||
#define instruction_pointer(regs) ((regs)->pc)
|
||||
#define user_stack_pointer(regs) ((regs)->sp)
|
||||
|
||||
extern unsigned long user_stack(const struct pt_regs *);
|
||||
extern void show_regs(struct pt_regs *);
|
||||
#define profile_pc(regs) ((regs)->pc)
|
||||
#endif
|
||||
|
||||
#define task_pt_regs(task) ((task)->thread.frame0)
|
||||
|
||||
#define arch_has_single_step() (1)
|
||||
extern void user_enable_single_step(struct task_struct *);
|
||||
extern void user_disable_single_step(struct task_struct *);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_PTRACE_H */
|
||||
|
123
arch/frv/include/asm/syscall.h
Normal file
123
arch/frv/include/asm/syscall.h
Normal file
@ -0,0 +1,123 @@
|
||||
/* syscall parameter access functions
|
||||
*
|
||||
* Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
|
||||
* Written by David Howells (dhowells@redhat.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public Licence
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the Licence, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_SYSCALL_H
|
||||
#define _ASM_SYSCALL_H
|
||||
|
||||
#include <linux/err.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
/*
|
||||
* Get the system call number or -1
|
||||
*/
|
||||
static inline long syscall_get_nr(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
return regs->syscallno;
|
||||
}
|
||||
|
||||
/*
|
||||
* Restore the clobbered GR8 register
|
||||
* (1st syscall arg was overwritten with syscall return or error)
|
||||
*/
|
||||
static inline void syscall_rollback(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
regs->gr8 = regs->orig_gr8;
|
||||
}
|
||||
|
||||
/*
|
||||
* See if the syscall return value is an error, returning it if it is and 0 if
|
||||
* not
|
||||
*/
|
||||
static inline long syscall_get_error(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
return IS_ERR_VALUE(regs->gr8) ? regs->gr8 : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the syscall return value
|
||||
*/
|
||||
static inline long syscall_get_return_value(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
return regs->gr8;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the syscall return value
|
||||
*/
|
||||
static inline void syscall_set_return_value(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
int error, long val)
|
||||
{
|
||||
if (error)
|
||||
regs->gr8 = -error;
|
||||
else
|
||||
regs->gr8 = val;
|
||||
}
|
||||
|
||||
/*
|
||||
* Retrieve the system call arguments
|
||||
*/
|
||||
static inline void syscall_get_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
unsigned long *args)
|
||||
{
|
||||
/*
|
||||
* Do this simply for now. If we need to start supporting
|
||||
* fetching arguments from arbitrary indices, this will need some
|
||||
* extra logic. Presently there are no in-tree users that depend
|
||||
* on this behaviour.
|
||||
*/
|
||||
BUG_ON(i);
|
||||
|
||||
/* Argument pattern is: GR8, GR9, GR10, GR11, GR12, GR13 */
|
||||
switch (n) {
|
||||
case 6: args[5] = regs->gr13;
|
||||
case 5: args[4] = regs->gr12;
|
||||
case 4: args[3] = regs->gr11;
|
||||
case 3: args[2] = regs->gr10;
|
||||
case 2: args[1] = regs->gr9;
|
||||
case 1: args[0] = regs->gr8;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Alter the system call arguments
|
||||
*/
|
||||
static inline void syscall_set_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
const unsigned long *args)
|
||||
{
|
||||
/* Same note as above applies */
|
||||
BUG_ON(i);
|
||||
|
||||
switch (n) {
|
||||
case 6: regs->gr13 = args[5];
|
||||
case 5: regs->gr12 = args[4];
|
||||
case 4: regs->gr11 = args[3];
|
||||
case 3: regs->gr10 = args[2];
|
||||
case 2: regs->gr9 = args[1];
|
||||
case 1: regs->gr8 = args[0];
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* _ASM_SYSCALL_H */
|
@ -109,20 +109,20 @@ register struct thread_info *__current_thread_info asm("gr15");
|
||||
* - other flags in MSW
|
||||
*/
|
||||
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
|
||||
#define TIF_SIGPENDING 1 /* signal pending */
|
||||
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
|
||||
#define TIF_SINGLESTEP 3 /* restore singlestep on return to user mode */
|
||||
#define TIF_IRET 4 /* return with iret */
|
||||
#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
|
||||
#define TIF_SIGPENDING 2 /* signal pending */
|
||||
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
|
||||
#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */
|
||||
#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
|
||||
#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_MEMDIE 17 /* OOM killer killed process */
|
||||
#define TIF_FREEZE 18 /* freezing for suspend */
|
||||
|
||||
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
|
||||
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
|
||||
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
|
||||
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
|
||||
#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP)
|
||||
#define _TIF_IRET (1 << TIF_IRET)
|
||||
#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
|
||||
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
|
||||
#define _TIF_FREEZE (1 << TIF_FREEZE)
|
||||
|
@ -886,7 +886,6 @@ system_call:
|
||||
bnc icc0,#0,__syscall_badsys
|
||||
|
||||
ldi @(gr15,#TI_FLAGS),gr4
|
||||
ori gr4,#_TIF_SYSCALL_TRACE,gr4
|
||||
andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
|
||||
bne icc0,#0,__syscall_trace_entry
|
||||
|
||||
@ -1150,11 +1149,10 @@ __entry_work_notifysig:
|
||||
# perform syscall entry tracing
|
||||
__syscall_trace_entry:
|
||||
LEDS 0x6320
|
||||
setlos.p #0,gr8
|
||||
call do_syscall_trace
|
||||
call syscall_trace_entry
|
||||
|
||||
ldi @(gr28,#REG_SYSCALLNO),gr7
|
||||
lddi @(gr28,#REG_GR(8)) ,gr8
|
||||
lddi.p @(gr28,#REG_GR(8)) ,gr8
|
||||
ori gr8,#0,gr7 ; syscall_trace_entry() returned new syscallno
|
||||
lddi @(gr28,#REG_GR(10)),gr10
|
||||
lddi.p @(gr28,#REG_GR(12)),gr12
|
||||
|
||||
@ -1169,11 +1167,10 @@ __syscall_exit_work:
|
||||
beq icc0,#1,__entry_work_pending
|
||||
|
||||
movsg psr,gr23
|
||||
andi gr23,#~PSR_PIL,gr23 ; could let do_syscall_trace() call schedule()
|
||||
andi gr23,#~PSR_PIL,gr23 ; could let syscall_trace_exit() call schedule()
|
||||
movgs gr23,psr
|
||||
|
||||
setlos.p #1,gr8
|
||||
call do_syscall_trace
|
||||
call syscall_trace_exit
|
||||
bra __entry_resume_userspace
|
||||
|
||||
__syscall_badsys:
|
||||
|
@ -19,6 +19,9 @@
|
||||
#include <linux/user.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/regset.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/tracehook.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/page.h>
|
||||
@ -32,6 +35,169 @@
|
||||
* in exit.c or in signal.c.
|
||||
*/
|
||||
|
||||
/*
|
||||
* retrieve the contents of FRV userspace general registers
|
||||
*/
|
||||
static int genregs_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf)
|
||||
{
|
||||
const struct user_int_regs *iregs = &target->thread.user->i;
|
||||
int ret;
|
||||
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
iregs, 0, sizeof(*iregs));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
|
||||
sizeof(*iregs), -1);
|
||||
}
|
||||
|
||||
/*
|
||||
* update the contents of the FRV userspace general registers
|
||||
*/
|
||||
static int genregs_set(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
struct user_int_regs *iregs = &target->thread.user->i;
|
||||
unsigned int offs_gr0, offs_gr1;
|
||||
int ret;
|
||||
|
||||
/* not allowed to set PSR or __status */
|
||||
if (pos < offsetof(struct user_int_regs, psr) + sizeof(long) &&
|
||||
pos + count > offsetof(struct user_int_regs, psr))
|
||||
return -EIO;
|
||||
|
||||
if (pos < offsetof(struct user_int_regs, __status) + sizeof(long) &&
|
||||
pos + count > offsetof(struct user_int_regs, __status))
|
||||
return -EIO;
|
||||
|
||||
/* set the control regs */
|
||||
offs_gr0 = offsetof(struct user_int_regs, gr[0]);
|
||||
offs_gr1 = offsetof(struct user_int_regs, gr[1]);
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
iregs, 0, offs_gr0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* skip GR0/TBR */
|
||||
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
||||
offs_gr0, offs_gr1);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* set the general regs */
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&iregs->gr[1], offs_gr1, sizeof(*iregs));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
||||
sizeof(*iregs), -1);
|
||||
}
|
||||
|
||||
/*
|
||||
* retrieve the contents of FRV userspace FP/Media registers
|
||||
*/
|
||||
static int fpmregs_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf)
|
||||
{
|
||||
const struct user_fpmedia_regs *fpregs = &target->thread.user->f;
|
||||
int ret;
|
||||
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
fpregs, 0, sizeof(*fpregs));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
|
||||
sizeof(*fpregs), -1);
|
||||
}
|
||||
|
||||
/*
|
||||
* update the contents of the FRV userspace FP/Media registers
|
||||
*/
|
||||
static int fpmregs_set(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
struct user_fpmedia_regs *fpregs = &target->thread.user->f;
|
||||
int ret;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
fpregs, 0, sizeof(*fpregs));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
||||
sizeof(*fpregs), -1);
|
||||
}
|
||||
|
||||
/*
|
||||
* determine if the FP/Media registers have actually been used
|
||||
*/
|
||||
static int fpmregs_active(struct task_struct *target,
|
||||
const struct user_regset *regset)
|
||||
{
|
||||
return tsk_used_math(target) ? regset->n : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Define the register sets available on the FRV under Linux
|
||||
*/
|
||||
enum frv_regset {
|
||||
REGSET_GENERAL,
|
||||
REGSET_FPMEDIA,
|
||||
};
|
||||
|
||||
static const struct user_regset frv_regsets[] = {
|
||||
/*
|
||||
* General register format is:
|
||||
* PSR, ISR, CCR, CCCR, LR, LCR, PC, (STATUS), SYSCALLNO, ORIG_G8
|
||||
* GNER0-1, IACC0, TBR, GR1-63
|
||||
*/
|
||||
[REGSET_GENERAL] = {
|
||||
.core_note_type = NT_PRSTATUS,
|
||||
.n = ELF_NGREG,
|
||||
.size = sizeof(long),
|
||||
.align = sizeof(long),
|
||||
.get = genregs_get,
|
||||
.set = genregs_set,
|
||||
},
|
||||
/*
|
||||
* FPU/Media register format is:
|
||||
* FR0-63, FNER0-1, MSR0-1, ACC0-7, ACCG0-8, FSR
|
||||
*/
|
||||
[REGSET_FPMEDIA] = {
|
||||
.core_note_type = NT_PRFPREG,
|
||||
.n = sizeof(struct user_fpmedia_regs) / sizeof(long),
|
||||
.size = sizeof(long),
|
||||
.align = sizeof(long),
|
||||
.get = fpmregs_get,
|
||||
.set = fpmregs_set,
|
||||
.active = fpmregs_active,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct user_regset_view user_frv_native_view = {
|
||||
.name = "frv",
|
||||
.e_machine = EM_FRV,
|
||||
.regsets = frv_regsets,
|
||||
.n = ARRAY_SIZE(frv_regsets),
|
||||
};
|
||||
|
||||
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
|
||||
{
|
||||
return &user_frv_native_view;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get contents of register REGNO in task TASK.
|
||||
*/
|
||||
@ -68,41 +234,24 @@ static inline int put_reg(struct task_struct *task, int regno,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* check that an address falls within the bounds of the target process's memory
|
||||
* mappings
|
||||
*/
|
||||
static inline int is_user_addr_valid(struct task_struct *child,
|
||||
unsigned long start, unsigned long len)
|
||||
{
|
||||
#ifdef CONFIG_MMU
|
||||
if (start >= PAGE_OFFSET || len > PAGE_OFFSET - start)
|
||||
return -EIO;
|
||||
return 0;
|
||||
#else
|
||||
struct vm_area_struct *vma;
|
||||
|
||||
vma = find_vma(child->mm, start);
|
||||
if (vma && start >= vma->vm_start && start + len <= vma->vm_end)
|
||||
return 0;
|
||||
|
||||
return -EIO;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by kernel/ptrace.c when detaching..
|
||||
*
|
||||
* Control h/w single stepping
|
||||
*/
|
||||
void ptrace_disable(struct task_struct *child)
|
||||
void user_enable_single_step(struct task_struct *child)
|
||||
{
|
||||
child->thread.frame0->__status |= REG__STATUS_STEP;
|
||||
}
|
||||
|
||||
void user_disable_single_step(struct task_struct *child)
|
||||
{
|
||||
child->thread.frame0->__status &= ~REG__STATUS_STEP;
|
||||
}
|
||||
|
||||
void ptrace_enable(struct task_struct *child)
|
||||
void ptrace_disable(struct task_struct *child)
|
||||
{
|
||||
child->thread.frame0->__status |= REG__STATUS_STEP;
|
||||
user_disable_single_step(child);
|
||||
}
|
||||
|
||||
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
@ -111,15 +260,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
int ret;
|
||||
|
||||
switch (request) {
|
||||
/* when I and D space are separate, these will need to be fixed. */
|
||||
case PTRACE_PEEKTEXT: /* read word at location addr. */
|
||||
case PTRACE_PEEKDATA:
|
||||
ret = -EIO;
|
||||
if (is_user_addr_valid(child, addr, sizeof(tmp)) < 0)
|
||||
break;
|
||||
ret = generic_ptrace_peekdata(child, addr, data);
|
||||
break;
|
||||
|
||||
/* read the word at location addr in the USER area. */
|
||||
case PTRACE_PEEKUSR: {
|
||||
tmp = 0;
|
||||
@ -163,15 +303,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
break;
|
||||
}
|
||||
|
||||
/* when I and D space are separate, this will have to be fixed. */
|
||||
case PTRACE_POKETEXT: /* write the word at location addr. */
|
||||
case PTRACE_POKEDATA:
|
||||
ret = -EIO;
|
||||
if (is_user_addr_valid(child, addr, sizeof(tmp)) < 0)
|
||||
break;
|
||||
ret = generic_ptrace_pokedata(child, addr, data);
|
||||
break;
|
||||
|
||||
case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
|
||||
ret = -EIO;
|
||||
if ((addr & 3) || addr < 0)
|
||||
@ -179,7 +310,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
|
||||
ret = 0;
|
||||
switch (addr >> 2) {
|
||||
case 0 ... PT__END-1:
|
||||
case 0 ... PT__END - 1:
|
||||
ret = put_reg(child, addr >> 2, data);
|
||||
break;
|
||||
|
||||
@ -189,95 +320,29 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
}
|
||||
break;
|
||||
|
||||
case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
|
||||
case PTRACE_CONT: /* restart after signal. */
|
||||
ret = -EIO;
|
||||
if (!valid_signal(data))
|
||||
break;
|
||||
if (request == PTRACE_SYSCALL)
|
||||
set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
|
||||
else
|
||||
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
|
||||
child->exit_code = data;
|
||||
ptrace_disable(child);
|
||||
wake_up_process(child);
|
||||
ret = 0;
|
||||
break;
|
||||
case PTRACE_GETREGS: /* Get all integer regs from the child. */
|
||||
return copy_regset_to_user(child, &user_frv_native_view,
|
||||
REGSET_GENERAL,
|
||||
0, sizeof(child->thread.user->i),
|
||||
(void __user *)data);
|
||||
|
||||
/* make the child exit. Best I can do is send it a sigkill.
|
||||
* perhaps it should be put in the status that it wants to
|
||||
* exit.
|
||||
*/
|
||||
case PTRACE_KILL:
|
||||
ret = 0;
|
||||
if (child->exit_state == EXIT_ZOMBIE) /* already dead */
|
||||
break;
|
||||
child->exit_code = SIGKILL;
|
||||
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
|
||||
ptrace_disable(child);
|
||||
wake_up_process(child);
|
||||
break;
|
||||
case PTRACE_SETREGS: /* Set all integer regs in the child. */
|
||||
return copy_regset_from_user(child, &user_frv_native_view,
|
||||
REGSET_GENERAL,
|
||||
0, sizeof(child->thread.user->i),
|
||||
(const void __user *)data);
|
||||
|
||||
case PTRACE_SINGLESTEP: /* set the trap flag. */
|
||||
ret = -EIO;
|
||||
if (!valid_signal(data))
|
||||
break;
|
||||
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
|
||||
ptrace_enable(child);
|
||||
child->exit_code = data;
|
||||
wake_up_process(child);
|
||||
ret = 0;
|
||||
break;
|
||||
case PTRACE_GETFPREGS: /* Get the child FP/Media state. */
|
||||
return copy_regset_to_user(child, &user_frv_native_view,
|
||||
REGSET_FPMEDIA,
|
||||
0, sizeof(child->thread.user->f),
|
||||
(void __user *)data);
|
||||
|
||||
case PTRACE_DETACH: /* detach a process that was attached. */
|
||||
ret = ptrace_detach(child, data);
|
||||
break;
|
||||
|
||||
case PTRACE_GETREGS: { /* Get all integer regs from the child. */
|
||||
int i;
|
||||
for (i = 0; i < PT__GPEND; i++) {
|
||||
tmp = get_reg(child, i);
|
||||
if (put_user(tmp, (unsigned long *) data)) {
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
data += sizeof(long);
|
||||
}
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
case PTRACE_SETREGS: { /* Set all integer regs in the child. */
|
||||
int i;
|
||||
for (i = 0; i < PT__GPEND; i++) {
|
||||
if (get_user(tmp, (unsigned long *) data)) {
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
put_reg(child, i, tmp);
|
||||
data += sizeof(long);
|
||||
}
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
case PTRACE_GETFPREGS: { /* Get the child FP/Media state. */
|
||||
ret = 0;
|
||||
if (copy_to_user((void *) data,
|
||||
&child->thread.user->f,
|
||||
sizeof(child->thread.user->f)))
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
case PTRACE_SETFPREGS: { /* Set the child FP/Media state. */
|
||||
ret = 0;
|
||||
if (copy_from_user(&child->thread.user->f,
|
||||
(void *) data,
|
||||
sizeof(child->thread.user->f)))
|
||||
ret = -EFAULT;
|
||||
break;
|
||||
}
|
||||
case PTRACE_SETFPREGS: /* Set the child FP/Media state. */
|
||||
return copy_regset_from_user(child, &user_frv_native_view,
|
||||
REGSET_FPMEDIA,
|
||||
0, sizeof(child->thread.user->f),
|
||||
(const void __user *)data);
|
||||
|
||||
case PTRACE_GETFDPIC:
|
||||
tmp = 0;
|
||||
@ -300,414 +365,36 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
break;
|
||||
|
||||
default:
|
||||
ret = -EIO;
|
||||
ret = ptrace_request(child, request, addr, data);
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __nongprelbss kstrace;
|
||||
|
||||
static const struct {
|
||||
const char *name;
|
||||
unsigned argmask;
|
||||
} __syscall_name_table[NR_syscalls] = {
|
||||
[0] = { "restart_syscall" },
|
||||
[1] = { "exit", 0x000001 },
|
||||
[2] = { "fork", 0xffffff },
|
||||
[3] = { "read", 0x000141 },
|
||||
[4] = { "write", 0x000141 },
|
||||
[5] = { "open", 0x000235 },
|
||||
[6] = { "close", 0x000001 },
|
||||
[7] = { "waitpid", 0x000141 },
|
||||
[8] = { "creat", 0x000025 },
|
||||
[9] = { "link", 0x000055 },
|
||||
[10] = { "unlink", 0x000005 },
|
||||
[11] = { "execve", 0x000445 },
|
||||
[12] = { "chdir", 0x000005 },
|
||||
[13] = { "time", 0x000004 },
|
||||
[14] = { "mknod", 0x000325 },
|
||||
[15] = { "chmod", 0x000025 },
|
||||
[16] = { "lchown", 0x000025 },
|
||||
[17] = { "break" },
|
||||
[18] = { "oldstat", 0x000045 },
|
||||
[19] = { "lseek", 0x000131 },
|
||||
[20] = { "getpid", 0xffffff },
|
||||
[21] = { "mount", 0x043555 },
|
||||
[22] = { "umount", 0x000005 },
|
||||
[23] = { "setuid", 0x000001 },
|
||||
[24] = { "getuid", 0xffffff },
|
||||
[25] = { "stime", 0x000004 },
|
||||
[26] = { "ptrace", 0x004413 },
|
||||
[27] = { "alarm", 0x000001 },
|
||||
[28] = { "oldfstat", 0x000041 },
|
||||
[29] = { "pause", 0xffffff },
|
||||
[30] = { "utime", 0x000045 },
|
||||
[31] = { "stty" },
|
||||
[32] = { "gtty" },
|
||||
[33] = { "access", 0x000025 },
|
||||
[34] = { "nice", 0x000001 },
|
||||
[35] = { "ftime" },
|
||||
[36] = { "sync", 0xffffff },
|
||||
[37] = { "kill", 0x000011 },
|
||||
[38] = { "rename", 0x000055 },
|
||||
[39] = { "mkdir", 0x000025 },
|
||||
[40] = { "rmdir", 0x000005 },
|
||||
[41] = { "dup", 0x000001 },
|
||||
[42] = { "pipe", 0x000004 },
|
||||
[43] = { "times", 0x000004 },
|
||||
[44] = { "prof" },
|
||||
[45] = { "brk", 0x000004 },
|
||||
[46] = { "setgid", 0x000001 },
|
||||
[47] = { "getgid", 0xffffff },
|
||||
[48] = { "signal", 0x000041 },
|
||||
[49] = { "geteuid", 0xffffff },
|
||||
[50] = { "getegid", 0xffffff },
|
||||
[51] = { "acct", 0x000005 },
|
||||
[52] = { "umount2", 0x000035 },
|
||||
[53] = { "lock" },
|
||||
[54] = { "ioctl", 0x000331 },
|
||||
[55] = { "fcntl", 0x000331 },
|
||||
[56] = { "mpx" },
|
||||
[57] = { "setpgid", 0x000011 },
|
||||
[58] = { "ulimit" },
|
||||
[60] = { "umask", 0x000002 },
|
||||
[61] = { "chroot", 0x000005 },
|
||||
[62] = { "ustat", 0x000043 },
|
||||
[63] = { "dup2", 0x000011 },
|
||||
[64] = { "getppid", 0xffffff },
|
||||
[65] = { "getpgrp", 0xffffff },
|
||||
[66] = { "setsid", 0xffffff },
|
||||
[67] = { "sigaction" },
|
||||
[68] = { "sgetmask" },
|
||||
[69] = { "ssetmask" },
|
||||
[70] = { "setreuid" },
|
||||
[71] = { "setregid" },
|
||||
[72] = { "sigsuspend" },
|
||||
[73] = { "sigpending" },
|
||||
[74] = { "sethostname" },
|
||||
[75] = { "setrlimit" },
|
||||
[76] = { "getrlimit" },
|
||||
[77] = { "getrusage" },
|
||||
[78] = { "gettimeofday" },
|
||||
[79] = { "settimeofday" },
|
||||
[80] = { "getgroups" },
|
||||
[81] = { "setgroups" },
|
||||
[82] = { "select" },
|
||||
[83] = { "symlink" },
|
||||
[84] = { "oldlstat" },
|
||||
[85] = { "readlink" },
|
||||
[86] = { "uselib" },
|
||||
[87] = { "swapon" },
|
||||
[88] = { "reboot" },
|
||||
[89] = { "readdir" },
|
||||
[91] = { "munmap", 0x000034 },
|
||||
[92] = { "truncate" },
|
||||
[93] = { "ftruncate" },
|
||||
[94] = { "fchmod" },
|
||||
[95] = { "fchown" },
|
||||
[96] = { "getpriority" },
|
||||
[97] = { "setpriority" },
|
||||
[99] = { "statfs" },
|
||||
[100] = { "fstatfs" },
|
||||
[102] = { "socketcall" },
|
||||
[103] = { "syslog" },
|
||||
[104] = { "setitimer" },
|
||||
[105] = { "getitimer" },
|
||||
[106] = { "stat" },
|
||||
[107] = { "lstat" },
|
||||
[108] = { "fstat" },
|
||||
[111] = { "vhangup" },
|
||||
[114] = { "wait4" },
|
||||
[115] = { "swapoff" },
|
||||
[116] = { "sysinfo" },
|
||||
[117] = { "ipc" },
|
||||
[118] = { "fsync" },
|
||||
[119] = { "sigreturn" },
|
||||
[120] = { "clone" },
|
||||
[121] = { "setdomainname" },
|
||||
[122] = { "uname" },
|
||||
[123] = { "modify_ldt" },
|
||||
[123] = { "cacheflush" },
|
||||
[124] = { "adjtimex" },
|
||||
[125] = { "mprotect" },
|
||||
[126] = { "sigprocmask" },
|
||||
[127] = { "create_module" },
|
||||
[128] = { "init_module" },
|
||||
[129] = { "delete_module" },
|
||||
[130] = { "get_kernel_syms" },
|
||||
[131] = { "quotactl" },
|
||||
[132] = { "getpgid" },
|
||||
[133] = { "fchdir" },
|
||||
[134] = { "bdflush" },
|
||||
[135] = { "sysfs" },
|
||||
[136] = { "personality" },
|
||||
[137] = { "afs_syscall" },
|
||||
[138] = { "setfsuid" },
|
||||
[139] = { "setfsgid" },
|
||||
[140] = { "_llseek", 0x014331 },
|
||||
[141] = { "getdents" },
|
||||
[142] = { "_newselect", 0x000141 },
|
||||
[143] = { "flock" },
|
||||
[144] = { "msync" },
|
||||
[145] = { "readv" },
|
||||
[146] = { "writev" },
|
||||
[147] = { "getsid", 0x000001 },
|
||||
[148] = { "fdatasync", 0x000001 },
|
||||
[149] = { "_sysctl", 0x000004 },
|
||||
[150] = { "mlock" },
|
||||
[151] = { "munlock" },
|
||||
[152] = { "mlockall" },
|
||||
[153] = { "munlockall" },
|
||||
[154] = { "sched_setparam" },
|
||||
[155] = { "sched_getparam" },
|
||||
[156] = { "sched_setscheduler" },
|
||||
[157] = { "sched_getscheduler" },
|
||||
[158] = { "sched_yield" },
|
||||
[159] = { "sched_get_priority_max" },
|
||||
[160] = { "sched_get_priority_min" },
|
||||
[161] = { "sched_rr_get_interval" },
|
||||
[162] = { "nanosleep", 0x000044 },
|
||||
[163] = { "mremap" },
|
||||
[164] = { "setresuid" },
|
||||
[165] = { "getresuid" },
|
||||
[166] = { "vm86" },
|
||||
[167] = { "query_module" },
|
||||
[168] = { "poll" },
|
||||
[169] = { "nfsservctl" },
|
||||
[170] = { "setresgid" },
|
||||
[171] = { "getresgid" },
|
||||
[172] = { "prctl", 0x333331 },
|
||||
[173] = { "rt_sigreturn", 0xffffff },
|
||||
[174] = { "rt_sigaction", 0x001441 },
|
||||
[175] = { "rt_sigprocmask", 0x001441 },
|
||||
[176] = { "rt_sigpending", 0x000014 },
|
||||
[177] = { "rt_sigtimedwait", 0x001444 },
|
||||
[178] = { "rt_sigqueueinfo", 0x000411 },
|
||||
[179] = { "rt_sigsuspend", 0x000014 },
|
||||
[180] = { "pread", 0x003341 },
|
||||
[181] = { "pwrite", 0x003341 },
|
||||
[182] = { "chown", 0x000115 },
|
||||
[183] = { "getcwd" },
|
||||
[184] = { "capget" },
|
||||
[185] = { "capset" },
|
||||
[186] = { "sigaltstack" },
|
||||
[187] = { "sendfile" },
|
||||
[188] = { "getpmsg" },
|
||||
[189] = { "putpmsg" },
|
||||
[190] = { "vfork", 0xffffff },
|
||||
[191] = { "ugetrlimit" },
|
||||
[192] = { "mmap2", 0x313314 },
|
||||
[193] = { "truncate64" },
|
||||
[194] = { "ftruncate64" },
|
||||
[195] = { "stat64", 0x000045 },
|
||||
[196] = { "lstat64", 0x000045 },
|
||||
[197] = { "fstat64", 0x000041 },
|
||||
[198] = { "lchown32" },
|
||||
[199] = { "getuid32", 0xffffff },
|
||||
[200] = { "getgid32", 0xffffff },
|
||||
[201] = { "geteuid32", 0xffffff },
|
||||
[202] = { "getegid32", 0xffffff },
|
||||
[203] = { "setreuid32" },
|
||||
[204] = { "setregid32" },
|
||||
[205] = { "getgroups32" },
|
||||
[206] = { "setgroups32" },
|
||||
[207] = { "fchown32" },
|
||||
[208] = { "setresuid32" },
|
||||
[209] = { "getresuid32" },
|
||||
[210] = { "setresgid32" },
|
||||
[211] = { "getresgid32" },
|
||||
[212] = { "chown32" },
|
||||
[213] = { "setuid32" },
|
||||
[214] = { "setgid32" },
|
||||
[215] = { "setfsuid32" },
|
||||
[216] = { "setfsgid32" },
|
||||
[217] = { "pivot_root" },
|
||||
[218] = { "mincore" },
|
||||
[219] = { "madvise" },
|
||||
[220] = { "getdents64" },
|
||||
[221] = { "fcntl64" },
|
||||
[223] = { "security" },
|
||||
[224] = { "gettid" },
|
||||
[225] = { "readahead" },
|
||||
[226] = { "setxattr" },
|
||||
[227] = { "lsetxattr" },
|
||||
[228] = { "fsetxattr" },
|
||||
[229] = { "getxattr" },
|
||||
[230] = { "lgetxattr" },
|
||||
[231] = { "fgetxattr" },
|
||||
[232] = { "listxattr" },
|
||||
[233] = { "llistxattr" },
|
||||
[234] = { "flistxattr" },
|
||||
[235] = { "removexattr" },
|
||||
[236] = { "lremovexattr" },
|
||||
[237] = { "fremovexattr" },
|
||||
[238] = { "tkill" },
|
||||
[239] = { "sendfile64" },
|
||||
[240] = { "futex" },
|
||||
[241] = { "sched_setaffinity" },
|
||||
[242] = { "sched_getaffinity" },
|
||||
[243] = { "set_thread_area" },
|
||||
[244] = { "get_thread_area" },
|
||||
[245] = { "io_setup" },
|
||||
[246] = { "io_destroy" },
|
||||
[247] = { "io_getevents" },
|
||||
[248] = { "io_submit" },
|
||||
[249] = { "io_cancel" },
|
||||
[250] = { "fadvise64" },
|
||||
[252] = { "exit_group", 0x000001 },
|
||||
[253] = { "lookup_dcookie" },
|
||||
[254] = { "epoll_create" },
|
||||
[255] = { "epoll_ctl" },
|
||||
[256] = { "epoll_wait" },
|
||||
[257] = { "remap_file_pages" },
|
||||
[258] = { "set_tid_address" },
|
||||
[259] = { "timer_create" },
|
||||
[260] = { "timer_settime" },
|
||||
[261] = { "timer_gettime" },
|
||||
[262] = { "timer_getoverrun" },
|
||||
[263] = { "timer_delete" },
|
||||
[264] = { "clock_settime" },
|
||||
[265] = { "clock_gettime" },
|
||||
[266] = { "clock_getres" },
|
||||
[267] = { "clock_nanosleep" },
|
||||
[268] = { "statfs64" },
|
||||
[269] = { "fstatfs64" },
|
||||
[270] = { "tgkill" },
|
||||
[271] = { "utimes" },
|
||||
[272] = { "fadvise64_64" },
|
||||
[273] = { "vserver" },
|
||||
[274] = { "mbind" },
|
||||
[275] = { "get_mempolicy" },
|
||||
[276] = { "set_mempolicy" },
|
||||
[277] = { "mq_open" },
|
||||
[278] = { "mq_unlink" },
|
||||
[279] = { "mq_timedsend" },
|
||||
[280] = { "mq_timedreceive" },
|
||||
[281] = { "mq_notify" },
|
||||
[282] = { "mq_getsetattr" },
|
||||
[283] = { "sys_kexec_load" },
|
||||
};
|
||||
|
||||
asmlinkage void do_syscall_trace(int leaving)
|
||||
/*
|
||||
* handle tracing of system call entry
|
||||
* - return the revised system call number or ULONG_MAX to cause ENOSYS
|
||||
*/
|
||||
asmlinkage unsigned long syscall_trace_entry(void)
|
||||
{
|
||||
#if 0
|
||||
unsigned long *argp;
|
||||
const char *name;
|
||||
unsigned argmask;
|
||||
char buffer[16];
|
||||
|
||||
if (!kstrace)
|
||||
return;
|
||||
|
||||
if (!current->mm)
|
||||
return;
|
||||
|
||||
if (__frame->gr7 == __NR_close)
|
||||
return;
|
||||
|
||||
#if 0
|
||||
if (__frame->gr7 != __NR_mmap2 &&
|
||||
__frame->gr7 != __NR_vfork &&
|
||||
__frame->gr7 != __NR_execve &&
|
||||
__frame->gr7 != __NR_exit)
|
||||
return;
|
||||
#endif
|
||||
|
||||
argmask = 0;
|
||||
name = NULL;
|
||||
if (__frame->gr7 < NR_syscalls) {
|
||||
name = __syscall_name_table[__frame->gr7].name;
|
||||
argmask = __syscall_name_table[__frame->gr7].argmask;
|
||||
}
|
||||
if (!name) {
|
||||
sprintf(buffer, "sys_%lx", __frame->gr7);
|
||||
name = buffer;
|
||||
__frame->__status |= REG__STATUS_SYSC_ENTRY;
|
||||
if (tracehook_report_syscall_entry(__frame)) {
|
||||
/* tracing decided this syscall should not happen, so
|
||||
* We'll return a bogus call number to get an ENOSYS
|
||||
* error, but leave the original number in
|
||||
* __frame->syscallno
|
||||
*/
|
||||
return ULONG_MAX;
|
||||
}
|
||||
|
||||
if (!leaving) {
|
||||
if (!argmask) {
|
||||
printk(KERN_CRIT "[%d] %s(%lx,%lx,%lx,%lx,%lx,%lx)\n",
|
||||
current->pid,
|
||||
name,
|
||||
__frame->gr8,
|
||||
__frame->gr9,
|
||||
__frame->gr10,
|
||||
__frame->gr11,
|
||||
__frame->gr12,
|
||||
__frame->gr13);
|
||||
}
|
||||
else if (argmask == 0xffffff) {
|
||||
printk(KERN_CRIT "[%d] %s()\n",
|
||||
current->pid,
|
||||
name);
|
||||
}
|
||||
else {
|
||||
printk(KERN_CRIT "[%d] %s(",
|
||||
current->pid,
|
||||
name);
|
||||
|
||||
argp = &__frame->gr8;
|
||||
|
||||
do {
|
||||
switch (argmask & 0xf) {
|
||||
case 1:
|
||||
printk("%ld", (long) *argp);
|
||||
break;
|
||||
case 2:
|
||||
printk("%lo", *argp);
|
||||
break;
|
||||
case 3:
|
||||
printk("%lx", *argp);
|
||||
break;
|
||||
case 4:
|
||||
printk("%p", (void *) *argp);
|
||||
break;
|
||||
case 5:
|
||||
printk("\"%s\"", (char *) *argp);
|
||||
break;
|
||||
}
|
||||
|
||||
argp++;
|
||||
argmask >>= 4;
|
||||
if (argmask)
|
||||
printk(",");
|
||||
|
||||
} while (argmask);
|
||||
|
||||
printk(")\n");
|
||||
}
|
||||
}
|
||||
else {
|
||||
if ((int)__frame->gr8 > -4096 && (int)__frame->gr8 < 4096)
|
||||
printk(KERN_CRIT "[%d] %s() = %ld\n", current->pid, name, __frame->gr8);
|
||||
else
|
||||
printk(KERN_CRIT "[%d] %s() = %lx\n", current->pid, name, __frame->gr8);
|
||||
}
|
||||
return;
|
||||
#endif
|
||||
|
||||
if (!test_thread_flag(TIF_SYSCALL_TRACE))
|
||||
return;
|
||||
|
||||
if (!(current->ptrace & PT_PTRACED))
|
||||
return;
|
||||
|
||||
/* we need to indicate entry or exit to strace */
|
||||
if (leaving)
|
||||
__frame->__status |= REG__STATUS_SYSC_EXIT;
|
||||
else
|
||||
__frame->__status |= REG__STATUS_SYSC_ENTRY;
|
||||
|
||||
ptrace_notify(SIGTRAP);
|
||||
|
||||
/*
|
||||
* this isn't the same as continuing with a signal, but it will do
|
||||
* for normal use. strace only continues with a signal if the
|
||||
* stopping signal is not SIGTRAP. -brl
|
||||
*/
|
||||
if (current->exit_code) {
|
||||
send_sig(current->exit_code, current, 1);
|
||||
current->exit_code = 0;
|
||||
}
|
||||
return __frame->syscallno;
|
||||
}
|
||||
|
||||
/*
|
||||
* handle tracing of system call exit
|
||||
*/
|
||||
asmlinkage void syscall_trace_exit(void)
|
||||
{
|
||||
__frame->__status |= REG__STATUS_SYSC_EXIT;
|
||||
tracehook_report_syscall_exit(__frame, 0);
|
||||
}
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <linux/unistd.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/tracehook.h>
|
||||
#include <asm/ucontext.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/cacheflush.h>
|
||||
@ -516,6 +517,9 @@ static void do_signal(void)
|
||||
* clear the TIF_RESTORE_SIGMASK flag */
|
||||
if (test_thread_flag(TIF_RESTORE_SIGMASK))
|
||||
clear_thread_flag(TIF_RESTORE_SIGMASK);
|
||||
|
||||
tracehook_signal_handler(signr, &info, &ka, __frame,
|
||||
test_thread_flag(TIF_SINGLESTEP));
|
||||
}
|
||||
|
||||
return;
|
||||
@ -564,4 +568,10 @@ asmlinkage void do_notify_resume(__u32 thread_info_flags)
|
||||
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
|
||||
do_signal();
|
||||
|
||||
/* deal with notification on about to resume userspace execution */
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(__frame);
|
||||
}
|
||||
|
||||
} /* end do_notify_resume() */
|
||||
|
@ -23,8 +23,7 @@ long strncpy_from_user(char *dst, const char __user *src, long count)
|
||||
char *p, ch;
|
||||
long err = -EFAULT;
|
||||
|
||||
if (count < 0)
|
||||
BUG();
|
||||
BUG_ON(count < 0);
|
||||
|
||||
p = dst;
|
||||
|
||||
@ -76,8 +75,7 @@ long strnlen_user(const char __user *src, long count)
|
||||
long err = 0;
|
||||
char ch;
|
||||
|
||||
if (count < 0)
|
||||
BUG();
|
||||
BUG_ON(count < 0);
|
||||
|
||||
#ifndef CONFIG_MMU
|
||||
if ((unsigned long) src < memory_start)
|
||||
|
@ -116,8 +116,7 @@ EXPORT_SYMBOL(dma_free_coherent);
|
||||
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
if (direction == DMA_NONE)
|
||||
BUG();
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
|
||||
frv_cache_wback_inv((unsigned long) ptr, (unsigned long) ptr + size);
|
||||
|
||||
@ -151,8 +150,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
frv_cache_wback_inv(sg_dma_address(&sg[i]),
|
||||
sg_dma_address(&sg[i]) + sg_dma_len(&sg[i]));
|
||||
|
||||
if (direction == DMA_NONE)
|
||||
BUG();
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
|
||||
return nents;
|
||||
}
|
||||
|
@ -48,8 +48,7 @@ EXPORT_SYMBOL(dma_free_coherent);
|
||||
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
if (direction == DMA_NONE)
|
||||
BUG();
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
|
||||
frv_cache_wback_inv((unsigned long) ptr, (unsigned long) ptr + size);
|
||||
|
||||
@ -81,8 +80,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
|
||||
void *vaddr;
|
||||
int i;
|
||||
|
||||
if (direction == DMA_NONE)
|
||||
BUG();
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
|
||||
dampr2 = __get_DAMPR(2);
|
||||
|
||||
|
@ -21,9 +21,10 @@ hpsim_irq_noop (unsigned int irq)
|
||||
{
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
hpsim_set_affinity_noop(unsigned int a, const struct cpumask *b)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct hw_interrupt_type irq_type_hp_sim = {
|
||||
|
@ -371,6 +371,7 @@ struct kvm_vcpu_arch {
|
||||
int last_run_cpu;
|
||||
int vmm_tr_slot;
|
||||
int vm_tr_slot;
|
||||
int sn_rtc_tr_slot;
|
||||
|
||||
#define KVM_MP_STATE_RUNNABLE 0
|
||||
#define KVM_MP_STATE_UNINITIALIZED 1
|
||||
@ -465,6 +466,7 @@ struct kvm_arch {
|
||||
unsigned long vmm_init_rr;
|
||||
|
||||
int online_vcpus;
|
||||
int is_sn2;
|
||||
|
||||
struct kvm_ioapic *vioapic;
|
||||
struct kvm_vm_stat stat;
|
||||
@ -472,6 +474,7 @@ struct kvm_arch {
|
||||
|
||||
struct list_head assigned_dev_head;
|
||||
struct iommu_domain *iommu_domain;
|
||||
int iommu_flags;
|
||||
struct hlist_head irq_ack_notifier_list;
|
||||
|
||||
unsigned long irq_sources_bitmap;
|
||||
@ -578,6 +581,8 @@ struct kvm_vmm_info{
|
||||
kvm_vmm_entry *vmm_entry;
|
||||
kvm_tramp_entry *tramp_entry;
|
||||
unsigned long vmm_ivt;
|
||||
unsigned long patch_mov_ar;
|
||||
unsigned long patch_mov_ar_sn2;
|
||||
};
|
||||
|
||||
int kvm_highest_pending_irq(struct kvm_vcpu *vcpu);
|
||||
@ -585,7 +590,6 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu);
|
||||
int kvm_pal_emul(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
|
||||
void kvm_sal_emul(struct kvm_vcpu *vcpu);
|
||||
|
||||
static inline void kvm_inject_nmi(struct kvm_vcpu *vcpu) {}
|
||||
#endif /* __ASSEMBLY__*/
|
||||
|
||||
#endif
|
||||
|
@ -146,6 +146,8 @@
|
||||
#define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
|
||||
#define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
|
||||
#define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
|
||||
#define PAGE_KERNEL_UC __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX | \
|
||||
_PAGE_MA_UC)
|
||||
|
||||
# ifndef __ASSEMBLY__
|
||||
|
||||
|
@ -636,7 +636,7 @@ void __init acpi_numa_arch_fixup(void)
|
||||
* success: return IRQ number (>=0)
|
||||
* failure: return < 0
|
||||
*/
|
||||
int acpi_register_gsi(u32 gsi, int triggering, int polarity)
|
||||
int acpi_register_gsi(struct device *dev, u32 gsi, int triggering, int polarity)
|
||||
{
|
||||
if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM)
|
||||
return gsi;
|
||||
@ -678,7 +678,8 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
|
||||
|
||||
fadt = (struct acpi_table_fadt *)fadt_header;
|
||||
|
||||
acpi_register_gsi(fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
|
||||
acpi_register_gsi(NULL, fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE,
|
||||
ACPI_ACTIVE_LOW);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -329,7 +329,7 @@ unmask_irq (unsigned int irq)
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
static int
|
||||
iosapic_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
@ -343,15 +343,15 @@ iosapic_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
|
||||
cpu = cpumask_first_and(cpu_online_mask, mask);
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return;
|
||||
return -1;
|
||||
|
||||
if (irq_prepare_move(irq, cpu))
|
||||
return;
|
||||
return -1;
|
||||
|
||||
dest = cpu_physical_id(cpu);
|
||||
|
||||
if (!iosapic_intr_info[irq].count)
|
||||
return; /* not an IOSAPIC interrupt */
|
||||
return -1; /* not an IOSAPIC interrupt */
|
||||
|
||||
set_irq_affinity_info(irq, dest, redir);
|
||||
|
||||
@ -376,7 +376,9 @@ iosapic_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
iosapic_write(iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
|
||||
iosapic_write(iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
|
||||
}
|
||||
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -610,6 +610,9 @@ static struct irqaction ipi_irqaction = {
|
||||
.name = "IPI"
|
||||
};
|
||||
|
||||
/*
|
||||
* KVM uses this interrupt to force a cpu out of guest mode
|
||||
*/
|
||||
static struct irqaction resched_irqaction = {
|
||||
.handler = dummy_handler,
|
||||
.flags = IRQF_DISABLED,
|
||||
|
@ -12,7 +12,7 @@
|
||||
static struct irq_chip ia64_msi_chip;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void ia64_set_msi_irq_affinity(unsigned int irq,
|
||||
static int ia64_set_msi_irq_affinity(unsigned int irq,
|
||||
const cpumask_t *cpu_mask)
|
||||
{
|
||||
struct msi_msg msg;
|
||||
@ -20,10 +20,10 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
|
||||
int cpu = first_cpu(*cpu_mask);
|
||||
|
||||
if (!cpu_online(cpu))
|
||||
return;
|
||||
return -1;
|
||||
|
||||
if (irq_prepare_move(irq, cpu))
|
||||
return;
|
||||
return -1;
|
||||
|
||||
read_msi_msg(irq, &msg);
|
||||
|
||||
@ -39,6 +39,8 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
|
||||
|
||||
write_msi_msg(irq, &msg);
|
||||
cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
@ -130,17 +132,17 @@ void arch_teardown_msi_irq(unsigned int irq)
|
||||
|
||||
#ifdef CONFIG_DMAR
|
||||
#ifdef CONFIG_SMP
|
||||
static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
{
|
||||
struct irq_cfg *cfg = irq_cfg + irq;
|
||||
struct msi_msg msg;
|
||||
int cpu = cpumask_first(mask);
|
||||
|
||||
if (!cpu_online(cpu))
|
||||
return;
|
||||
return -1;
|
||||
|
||||
if (irq_prepare_move(irq, cpu))
|
||||
return;
|
||||
return -1;
|
||||
|
||||
dmar_msi_read(irq, &msg);
|
||||
|
||||
@ -151,6 +153,8 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
|
||||
dmar_msi_write(irq, &msg);
|
||||
cpumask_copy(irq_desc[irq].affinity, mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
|
@ -23,7 +23,7 @@ if VIRTUALIZATION
|
||||
|
||||
config KVM
|
||||
tristate "Kernel-based Virtual Machine (KVM) support"
|
||||
depends on HAVE_KVM && EXPERIMENTAL
|
||||
depends on HAVE_KVM && MODULES && EXPERIMENTAL
|
||||
# for device assignment:
|
||||
depends on PCI
|
||||
select PREEMPT_NOTIFIERS
|
||||
|
@ -41,6 +41,9 @@
|
||||
#include <asm/div64.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/elf.h>
|
||||
#include <asm/sn/addrs.h>
|
||||
#include <asm/sn/clksupport.h>
|
||||
#include <asm/sn/shub_mmr.h>
|
||||
|
||||
#include "misc.h"
|
||||
#include "vti.h"
|
||||
@ -65,6 +68,16 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static unsigned long kvm_get_itc(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
|
||||
if (vcpu->kvm->arch.is_sn2)
|
||||
return rtc_time();
|
||||
else
|
||||
#endif
|
||||
return ia64_getreg(_IA64_REG_AR_ITC);
|
||||
}
|
||||
|
||||
static void kvm_flush_icache(unsigned long start, unsigned long len)
|
||||
{
|
||||
int l;
|
||||
@ -119,8 +132,7 @@ void kvm_arch_hardware_enable(void *garbage)
|
||||
unsigned long saved_psr;
|
||||
int slot;
|
||||
|
||||
pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base),
|
||||
PAGE_KERNEL));
|
||||
pte = pte_val(mk_pte_phys(__pa(kvm_vmm_base), PAGE_KERNEL));
|
||||
local_irq_save(saved_psr);
|
||||
slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
|
||||
local_irq_restore(saved_psr);
|
||||
@ -283,6 +295,18 @@ static int handle_sal_call(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
||||
|
||||
}
|
||||
|
||||
static int __apic_accept_irq(struct kvm_vcpu *vcpu, uint64_t vector)
|
||||
{
|
||||
struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
|
||||
|
||||
if (!test_and_set_bit(vector, &vpd->irr[0])) {
|
||||
vcpu->arch.irq_new_pending = 1;
|
||||
kvm_vcpu_kick(vcpu);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* offset: address offset to IPI space.
|
||||
* value: deliver value.
|
||||
@ -292,20 +316,20 @@ static void vcpu_deliver_ipi(struct kvm_vcpu *vcpu, uint64_t dm,
|
||||
{
|
||||
switch (dm) {
|
||||
case SAPIC_FIXED:
|
||||
kvm_apic_set_irq(vcpu, vector, 0);
|
||||
break;
|
||||
case SAPIC_NMI:
|
||||
kvm_apic_set_irq(vcpu, 2, 0);
|
||||
vector = 2;
|
||||
break;
|
||||
case SAPIC_EXTINT:
|
||||
kvm_apic_set_irq(vcpu, 0, 0);
|
||||
vector = 0;
|
||||
break;
|
||||
case SAPIC_INIT:
|
||||
case SAPIC_PMI:
|
||||
default:
|
||||
printk(KERN_ERR"kvm: Unimplemented Deliver reserved IPI!\n");
|
||||
break;
|
||||
return;
|
||||
}
|
||||
__apic_accept_irq(vcpu, vector);
|
||||
}
|
||||
|
||||
static struct kvm_vcpu *lid_to_vcpu(struct kvm *kvm, unsigned long id,
|
||||
@ -413,6 +437,23 @@ static int handle_switch_rr6(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int kvm_sn2_setup_mappings(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long pte, rtc_phys_addr, map_addr;
|
||||
int slot;
|
||||
|
||||
map_addr = KVM_VMM_BASE + (1UL << KVM_VMM_SHIFT);
|
||||
rtc_phys_addr = LOCAL_MMR_OFFSET | SH_RTC;
|
||||
pte = pte_val(mk_pte_phys(rtc_phys_addr, PAGE_KERNEL_UC));
|
||||
slot = ia64_itr_entry(0x3, map_addr, pte, PAGE_SHIFT);
|
||||
vcpu->arch.sn_rtc_tr_slot = slot;
|
||||
if (slot < 0) {
|
||||
printk(KERN_ERR "Mayday mayday! RTC mapping failed!\n");
|
||||
slot = 0;
|
||||
}
|
||||
return slot;
|
||||
}
|
||||
|
||||
int kvm_emulate_halt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
||||
@ -426,7 +467,7 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
|
||||
|
||||
if (irqchip_in_kernel(vcpu->kvm)) {
|
||||
|
||||
vcpu_now_itc = ia64_getreg(_IA64_REG_AR_ITC) + vcpu->arch.itc_offset;
|
||||
vcpu_now_itc = kvm_get_itc(vcpu) + vcpu->arch.itc_offset;
|
||||
|
||||
if (time_after(vcpu_now_itc, vpd->itm)) {
|
||||
vcpu->arch.timer_check = 1;
|
||||
@ -447,10 +488,10 @@ int kvm_emulate_halt(struct kvm_vcpu *vcpu)
|
||||
hrtimer_cancel(p_ht);
|
||||
vcpu->arch.ht_active = 0;
|
||||
|
||||
if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
|
||||
if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests) ||
|
||||
kvm_cpu_has_pending_timer(vcpu))
|
||||
if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
|
||||
vcpu->arch.mp_state =
|
||||
KVM_MP_STATE_RUNNABLE;
|
||||
vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
|
||||
|
||||
if (vcpu->arch.mp_state != KVM_MP_STATE_RUNNABLE)
|
||||
return -EINTR;
|
||||
@ -551,22 +592,35 @@ static int kvm_insert_vmm_mapping(struct kvm_vcpu *vcpu)
|
||||
if (r < 0)
|
||||
goto out;
|
||||
vcpu->arch.vm_tr_slot = r;
|
||||
|
||||
#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
|
||||
if (kvm->arch.is_sn2) {
|
||||
r = kvm_sn2_setup_mappings(vcpu);
|
||||
if (r < 0)
|
||||
goto out;
|
||||
}
|
||||
#endif
|
||||
|
||||
r = 0;
|
||||
out:
|
||||
return r;
|
||||
|
||||
}
|
||||
|
||||
static void kvm_purge_vmm_mapping(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
ia64_ptr_entry(0x3, vcpu->arch.vmm_tr_slot);
|
||||
ia64_ptr_entry(0x3, vcpu->arch.vm_tr_slot);
|
||||
|
||||
#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
|
||||
if (kvm->arch.is_sn2)
|
||||
ia64_ptr_entry(0x3, vcpu->arch.sn_rtc_tr_slot);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long psr;
|
||||
int r;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
if (vcpu->arch.last_run_cpu != cpu ||
|
||||
@ -578,36 +632,27 @@ static int kvm_vcpu_pre_transition(struct kvm_vcpu *vcpu)
|
||||
|
||||
vcpu->arch.host_rr6 = ia64_get_rr(RR6);
|
||||
vti_set_rr6(vcpu->arch.vmm_rr);
|
||||
return kvm_insert_vmm_mapping(vcpu);
|
||||
local_irq_save(psr);
|
||||
r = kvm_insert_vmm_mapping(vcpu);
|
||||
local_irq_restore(psr);
|
||||
return r;
|
||||
}
|
||||
|
||||
static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_purge_vmm_mapping(vcpu);
|
||||
vti_set_rr6(vcpu->arch.host_rr6);
|
||||
}
|
||||
|
||||
static int vti_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
||||
static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
||||
{
|
||||
union context *host_ctx, *guest_ctx;
|
||||
int r;
|
||||
|
||||
/*Get host and guest context with guest address space.*/
|
||||
host_ctx = kvm_get_host_context(vcpu);
|
||||
guest_ctx = kvm_get_guest_context(vcpu);
|
||||
|
||||
r = kvm_vcpu_pre_transition(vcpu);
|
||||
if (r < 0)
|
||||
goto out;
|
||||
kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
|
||||
kvm_vcpu_post_transition(vcpu);
|
||||
r = 0;
|
||||
out:
|
||||
return r;
|
||||
}
|
||||
|
||||
static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
|
||||
{
|
||||
int r;
|
||||
/*
|
||||
* down_read() may sleep and return with interrupts enabled
|
||||
*/
|
||||
down_read(&vcpu->kvm->slots_lock);
|
||||
|
||||
again:
|
||||
if (signal_pending(current)) {
|
||||
@ -616,26 +661,31 @@ again:
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* down_read() may sleep and return with interrupts enabled
|
||||
*/
|
||||
down_read(&vcpu->kvm->slots_lock);
|
||||
|
||||
preempt_disable();
|
||||
local_irq_disable();
|
||||
|
||||
vcpu->guest_mode = 1;
|
||||
/*Get host and guest context with guest address space.*/
|
||||
host_ctx = kvm_get_host_context(vcpu);
|
||||
guest_ctx = kvm_get_guest_context(vcpu);
|
||||
|
||||
clear_bit(KVM_REQ_KICK, &vcpu->requests);
|
||||
|
||||
r = kvm_vcpu_pre_transition(vcpu);
|
||||
if (r < 0)
|
||||
goto vcpu_run_fail;
|
||||
|
||||
up_read(&vcpu->kvm->slots_lock);
|
||||
kvm_guest_enter();
|
||||
r = vti_vcpu_run(vcpu, kvm_run);
|
||||
if (r < 0) {
|
||||
local_irq_enable();
|
||||
preempt_enable();
|
||||
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Transition to the guest
|
||||
*/
|
||||
kvm_vmm_info->tramp_entry(host_ctx, guest_ctx);
|
||||
|
||||
kvm_vcpu_post_transition(vcpu);
|
||||
|
||||
vcpu->arch.launched = 1;
|
||||
vcpu->guest_mode = 0;
|
||||
set_bit(KVM_REQ_KICK, &vcpu->requests);
|
||||
local_irq_enable();
|
||||
|
||||
/*
|
||||
@ -646,9 +696,10 @@ again:
|
||||
*/
|
||||
barrier();
|
||||
kvm_guest_exit();
|
||||
up_read(&vcpu->kvm->slots_lock);
|
||||
preempt_enable();
|
||||
|
||||
down_read(&vcpu->kvm->slots_lock);
|
||||
|
||||
r = kvm_handle_exit(kvm_run, vcpu);
|
||||
|
||||
if (r > 0) {
|
||||
@ -657,12 +708,20 @@ again:
|
||||
}
|
||||
|
||||
out:
|
||||
up_read(&vcpu->kvm->slots_lock);
|
||||
if (r > 0) {
|
||||
kvm_resched(vcpu);
|
||||
down_read(&vcpu->kvm->slots_lock);
|
||||
goto again;
|
||||
}
|
||||
|
||||
return r;
|
||||
|
||||
vcpu_run_fail:
|
||||
local_irq_enable();
|
||||
preempt_enable();
|
||||
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
|
||||
@ -788,6 +847,9 @@ struct kvm *kvm_arch_create_vm(void)
|
||||
|
||||
if (IS_ERR(kvm))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
kvm->arch.is_sn2 = ia64_platform_is("sn2");
|
||||
|
||||
kvm_init_vm(kvm);
|
||||
|
||||
kvm->arch.online_vcpus = 0;
|
||||
@ -884,7 +946,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||
RESTORE_REGS(saved_gp);
|
||||
|
||||
vcpu->arch.irq_new_pending = 1;
|
||||
vcpu->arch.itc_offset = regs->saved_itc - ia64_getreg(_IA64_REG_AR_ITC);
|
||||
vcpu->arch.itc_offset = regs->saved_itc - kvm_get_itc(vcpu);
|
||||
set_bit(KVM_REQ_RESUME, &vcpu->requests);
|
||||
|
||||
vcpu_put(vcpu);
|
||||
@ -1043,10 +1105,6 @@ static void kvm_free_vmm_area(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void vti_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
}
|
||||
|
||||
static int vti_init_vpd(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int i;
|
||||
@ -1165,7 +1223,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
regs->cr_iip = PALE_RESET_ENTRY;
|
||||
|
||||
/*Initialize itc offset for vcpus*/
|
||||
itc_offset = 0UL - ia64_getreg(_IA64_REG_AR_ITC);
|
||||
itc_offset = 0UL - kvm_get_itc(vcpu);
|
||||
for (i = 0; i < kvm->arch.online_vcpus; i++) {
|
||||
v = (struct kvm_vcpu *)((char *)vcpu +
|
||||
sizeof(struct kvm_vcpu_data) * i);
|
||||
@ -1237,6 +1295,7 @@ static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
|
||||
|
||||
local_irq_save(psr);
|
||||
r = kvm_insert_vmm_mapping(vcpu);
|
||||
local_irq_restore(psr);
|
||||
if (r)
|
||||
goto fail;
|
||||
r = kvm_vcpu_init(vcpu, vcpu->kvm, id);
|
||||
@ -1254,13 +1313,11 @@ static int vti_vcpu_setup(struct kvm_vcpu *vcpu, int id)
|
||||
goto uninit;
|
||||
|
||||
kvm_purge_vmm_mapping(vcpu);
|
||||
local_irq_restore(psr);
|
||||
|
||||
return 0;
|
||||
uninit:
|
||||
kvm_vcpu_uninit(vcpu);
|
||||
fail:
|
||||
local_irq_restore(psr);
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -1291,7 +1348,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
|
||||
vcpu->kvm = kvm;
|
||||
|
||||
cpu = get_cpu();
|
||||
vti_vcpu_load(vcpu, cpu);
|
||||
r = vti_vcpu_setup(vcpu, id);
|
||||
put_cpu();
|
||||
|
||||
@ -1427,7 +1483,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||
}
|
||||
for (i = 0; i < 4; i++)
|
||||
regs->insvc[i] = vcpu->arch.insvc[i];
|
||||
regs->saved_itc = vcpu->arch.itc_offset + ia64_getreg(_IA64_REG_AR_ITC);
|
||||
regs->saved_itc = vcpu->arch.itc_offset + kvm_get_itc(vcpu);
|
||||
SAVE_REGS(xtp);
|
||||
SAVE_REGS(metaphysical_rr0);
|
||||
SAVE_REGS(metaphysical_rr4);
|
||||
@ -1574,6 +1630,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
|
||||
|
||||
void kvm_arch_flush_shadow(struct kvm *kvm)
|
||||
{
|
||||
kvm_flush_remote_tlbs(kvm);
|
||||
}
|
||||
|
||||
long kvm_arch_dev_ioctl(struct file *filp,
|
||||
@ -1616,8 +1673,37 @@ out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* On SN2, the ITC isn't stable, so copy in fast path code to use the
|
||||
* SN2 RTC, replacing the ITC based default verion.
|
||||
*/
|
||||
static void kvm_patch_vmm(struct kvm_vmm_info *vmm_info,
|
||||
struct module *module)
|
||||
{
|
||||
unsigned long new_ar, new_ar_sn2;
|
||||
unsigned long module_base;
|
||||
|
||||
if (!ia64_platform_is("sn2"))
|
||||
return;
|
||||
|
||||
module_base = (unsigned long)module->module_core;
|
||||
|
||||
new_ar = kvm_vmm_base + vmm_info->patch_mov_ar - module_base;
|
||||
new_ar_sn2 = kvm_vmm_base + vmm_info->patch_mov_ar_sn2 - module_base;
|
||||
|
||||
printk(KERN_INFO "kvm: Patching ITC emulation to use SGI SN2 RTC "
|
||||
"as source\n");
|
||||
|
||||
/*
|
||||
* Copy the SN2 version of mov_ar into place. They are both
|
||||
* the same size, so 6 bundles is sufficient (6 * 0x10).
|
||||
*/
|
||||
memcpy((void *)new_ar, (void *)new_ar_sn2, 0x60);
|
||||
}
|
||||
|
||||
static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
|
||||
struct module *module)
|
||||
struct module *module)
|
||||
{
|
||||
unsigned long module_base;
|
||||
unsigned long vmm_size;
|
||||
@ -1639,6 +1725,7 @@ static int kvm_relocate_vmm(struct kvm_vmm_info *vmm_info,
|
||||
return -EFAULT;
|
||||
|
||||
memcpy((void *)kvm_vmm_base, (void *)module_base, vmm_size);
|
||||
kvm_patch_vmm(vmm_info, module);
|
||||
kvm_flush_icache(kvm_vmm_base, vmm_size);
|
||||
|
||||
/*Recalculate kvm_vmm_info based on new VMM*/
|
||||
@ -1792,38 +1879,24 @@ void kvm_arch_hardware_unsetup(void)
|
||||
{
|
||||
}
|
||||
|
||||
static void vcpu_kick_intr(void *info)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)info;
|
||||
printk(KERN_DEBUG"vcpu_kick_intr %p \n", vcpu);
|
||||
#endif
|
||||
}
|
||||
|
||||
void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int ipi_pcpu = vcpu->cpu;
|
||||
int cpu = get_cpu();
|
||||
int me;
|
||||
int cpu = vcpu->cpu;
|
||||
|
||||
if (waitqueue_active(&vcpu->wq))
|
||||
wake_up_interruptible(&vcpu->wq);
|
||||
|
||||
if (vcpu->guest_mode && cpu != ipi_pcpu)
|
||||
smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0);
|
||||
me = get_cpu();
|
||||
if (cpu != me && (unsigned) cpu < nr_cpu_ids && cpu_online(cpu))
|
||||
if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
|
||||
smp_send_reschedule(cpu);
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
|
||||
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
|
||||
{
|
||||
|
||||
struct vpd *vpd = to_host(vcpu->kvm, vcpu->arch.vpd);
|
||||
|
||||
if (!test_and_set_bit(vec, &vpd->irr[0])) {
|
||||
vcpu->arch.irq_new_pending = 1;
|
||||
kvm_vcpu_kick(vcpu);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
return __apic_accept_irq(vcpu, irq->vector);
|
||||
}
|
||||
|
||||
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest)
|
||||
@ -1836,20 +1909,18 @@ int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct kvm_vcpu *kvm_get_lowest_prio_vcpu(struct kvm *kvm, u8 vector,
|
||||
unsigned long bitmap)
|
||||
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
|
||||
{
|
||||
struct kvm_vcpu *lvcpu = kvm->vcpus[0];
|
||||
int i;
|
||||
return vcpu1->arch.xtp - vcpu2->arch.xtp;
|
||||
}
|
||||
|
||||
for (i = 1; i < kvm->arch.online_vcpus; i++) {
|
||||
if (!kvm->vcpus[i])
|
||||
continue;
|
||||
if (lvcpu->arch.xtp > kvm->vcpus[i]->arch.xtp)
|
||||
lvcpu = kvm->vcpus[i];
|
||||
}
|
||||
|
||||
return lvcpu;
|
||||
int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
|
||||
int short_hand, int dest, int dest_mode)
|
||||
{
|
||||
struct kvm_lapic *target = vcpu->arch.apic;
|
||||
return (dest_mode == 0) ?
|
||||
kvm_apic_match_physical_addr(target, dest) :
|
||||
kvm_apic_match_logical_addr(target, dest);
|
||||
}
|
||||
|
||||
static int find_highest_bits(int *dat)
|
||||
@ -1888,6 +1959,12 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* do real check here */
|
||||
return 1;
|
||||
}
|
||||
|
||||
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.timer_fired;
|
||||
@ -1918,6 +1995,7 @@ static int vcpu_reset(struct kvm_vcpu *vcpu)
|
||||
long psr;
|
||||
local_irq_save(psr);
|
||||
r = kvm_insert_vmm_mapping(vcpu);
|
||||
local_irq_restore(psr);
|
||||
if (r)
|
||||
goto fail;
|
||||
|
||||
@ -1930,7 +2008,6 @@ static int vcpu_reset(struct kvm_vcpu *vcpu)
|
||||
kvm_purge_vmm_mapping(vcpu);
|
||||
r = 0;
|
||||
fail:
|
||||
local_irq_restore(psr);
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,9 @@
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/smp.h>
|
||||
#include <asm/sn/addrs.h>
|
||||
#include <asm/sn/clksupport.h>
|
||||
#include <asm/sn/shub_mmr.h>
|
||||
|
||||
#include "vti.h"
|
||||
#include "misc.h"
|
||||
@ -188,12 +191,35 @@ static struct ia64_pal_retval pal_freq_base(struct kvm_vcpu *vcpu)
|
||||
return result;
|
||||
}
|
||||
|
||||
/*
|
||||
* On the SGI SN2, the ITC isn't stable. Emulation backed by the SN2
|
||||
* RTC is used instead. This function patches the ratios from SAL
|
||||
* to match the RTC before providing them to the guest.
|
||||
*/
|
||||
static void sn2_patch_itc_freq_ratios(struct ia64_pal_retval *result)
|
||||
{
|
||||
struct pal_freq_ratio *ratio;
|
||||
unsigned long sal_freq, sal_drift, factor;
|
||||
|
||||
result->status = ia64_sal_freq_base(SAL_FREQ_BASE_PLATFORM,
|
||||
&sal_freq, &sal_drift);
|
||||
ratio = (struct pal_freq_ratio *)&result->v2;
|
||||
factor = ((sal_freq * 3) + (sn_rtc_cycles_per_second / 2)) /
|
||||
sn_rtc_cycles_per_second;
|
||||
|
||||
ratio->num = 3;
|
||||
ratio->den = factor;
|
||||
}
|
||||
|
||||
static struct ia64_pal_retval pal_freq_ratios(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
||||
struct ia64_pal_retval result;
|
||||
|
||||
PAL_CALL(result, PAL_FREQ_RATIOS, 0, 0, 0);
|
||||
|
||||
if (vcpu->kvm->arch.is_sn2)
|
||||
sn2_patch_itc_freq_ratios(&result);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -20,6 +20,10 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu);
|
||||
|
||||
int kvm_apic_match_physical_addr(struct kvm_lapic *apic, u16 dest);
|
||||
int kvm_apic_match_logical_addr(struct kvm_lapic *apic, u8 mda);
|
||||
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig);
|
||||
int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
|
||||
int short_hand, int dest, int dest_mode);
|
||||
int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
|
||||
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
|
||||
#define kvm_apic_present(x) (true)
|
||||
|
||||
#endif
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
#include <asm/asmmacro.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/kvm_host.h>
|
||||
|
||||
#include "vti.h"
|
||||
#include "asm-offsets.h"
|
||||
@ -140,6 +141,35 @@ GLOBAL_ENTRY(kvm_asm_mov_from_ar)
|
||||
;;
|
||||
END(kvm_asm_mov_from_ar)
|
||||
|
||||
/*
|
||||
* Special SGI SN2 optimized version of mov_from_ar using the SN2 RTC
|
||||
* clock as it's source for emulating the ITC. This version will be
|
||||
* copied on top of the original version if the host is determined to
|
||||
* be an SN2.
|
||||
*/
|
||||
GLOBAL_ENTRY(kvm_asm_mov_from_ar_sn2)
|
||||
add r18=VMM_VCPU_ITC_OFS_OFFSET, r21
|
||||
movl r19 = (KVM_VMM_BASE+(1<<KVM_VMM_SHIFT))
|
||||
|
||||
add r16=VMM_VCPU_LAST_ITC_OFFSET,r21
|
||||
extr.u r17=r25,6,7
|
||||
mov r24=b0
|
||||
;;
|
||||
ld8 r18=[r18]
|
||||
ld8 r19=[r19]
|
||||
addl r20=@gprel(asm_mov_to_reg),gp
|
||||
;;
|
||||
add r19=r19,r18
|
||||
shladd r17=r17,4,r20
|
||||
;;
|
||||
adds r30=kvm_resume_to_guest-asm_mov_to_reg,r20
|
||||
st8 [r16] = r19
|
||||
mov b0=r17
|
||||
br.sptk.few b0
|
||||
;;
|
||||
END(kvm_asm_mov_from_ar_sn2)
|
||||
|
||||
|
||||
|
||||
// mov r1=rr[r3]
|
||||
GLOBAL_ENTRY(kvm_asm_mov_from_rr)
|
||||
|
@ -652,20 +652,25 @@ void kvm_ia64_handle_break(unsigned long ifa, struct kvm_pt_regs *regs,
|
||||
unsigned long isr, unsigned long iim)
|
||||
{
|
||||
struct kvm_vcpu *v = current_vcpu;
|
||||
long psr;
|
||||
|
||||
if (ia64_psr(regs)->cpl == 0) {
|
||||
/* Allow hypercalls only when cpl = 0. */
|
||||
if (iim == DOMN_PAL_REQUEST) {
|
||||
local_irq_save(psr);
|
||||
set_pal_call_data(v);
|
||||
vmm_transition(v);
|
||||
get_pal_call_result(v);
|
||||
vcpu_increment_iip(v);
|
||||
local_irq_restore(psr);
|
||||
return;
|
||||
} else if (iim == DOMN_SAL_REQUEST) {
|
||||
local_irq_save(psr);
|
||||
set_sal_call_data(v);
|
||||
vmm_transition(v);
|
||||
get_sal_call_result(v);
|
||||
vcpu_increment_iip(v);
|
||||
local_irq_restore(psr);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -788,13 +788,29 @@ void vcpu_set_fpreg(struct kvm_vcpu *vcpu, unsigned long reg,
|
||||
setfpreg(reg, val, regs); /* FIXME: handle NATs later*/
|
||||
}
|
||||
|
||||
/*
|
||||
* The Altix RTC is mapped specially here for the vmm module
|
||||
*/
|
||||
#define SN_RTC_BASE (u64 *)(KVM_VMM_BASE+(1UL<<KVM_VMM_SHIFT))
|
||||
static long kvm_get_itc(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
#if defined(CONFIG_IA64_SGI_SN2) || defined(CONFIG_IA64_GENERIC)
|
||||
struct kvm *kvm = (struct kvm *)KVM_VM_BASE;
|
||||
|
||||
if (kvm->arch.is_sn2)
|
||||
return (*SN_RTC_BASE);
|
||||
else
|
||||
#endif
|
||||
return ia64_getreg(_IA64_REG_AR_ITC);
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
* lsapic timer
|
||||
***********************************************************************/
|
||||
u64 vcpu_get_itc(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long guest_itc;
|
||||
guest_itc = VMX(vcpu, itc_offset) + ia64_getreg(_IA64_REG_AR_ITC);
|
||||
guest_itc = VMX(vcpu, itc_offset) + kvm_get_itc(vcpu);
|
||||
|
||||
if (guest_itc >= VMX(vcpu, last_itc)) {
|
||||
VMX(vcpu, last_itc) = guest_itc;
|
||||
@ -809,7 +825,7 @@ static void vcpu_set_itc(struct kvm_vcpu *vcpu, u64 val)
|
||||
struct kvm_vcpu *v;
|
||||
struct kvm *kvm;
|
||||
int i;
|
||||
long itc_offset = val - ia64_getreg(_IA64_REG_AR_ITC);
|
||||
long itc_offset = val - kvm_get_itc(vcpu);
|
||||
unsigned long vitv = VCPU(vcpu, itv);
|
||||
|
||||
kvm = (struct kvm *)KVM_VM_BASE;
|
||||
|
@ -30,15 +30,19 @@ MODULE_AUTHOR("Intel");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
extern char kvm_ia64_ivt;
|
||||
extern char kvm_asm_mov_from_ar;
|
||||
extern char kvm_asm_mov_from_ar_sn2;
|
||||
extern fpswa_interface_t *vmm_fpswa_interface;
|
||||
|
||||
long vmm_sanity = 1;
|
||||
|
||||
struct kvm_vmm_info vmm_info = {
|
||||
.module = THIS_MODULE,
|
||||
.vmm_entry = vmm_entry,
|
||||
.tramp_entry = vmm_trampoline,
|
||||
.vmm_ivt = (unsigned long)&kvm_ia64_ivt,
|
||||
.module = THIS_MODULE,
|
||||
.vmm_entry = vmm_entry,
|
||||
.tramp_entry = vmm_trampoline,
|
||||
.vmm_ivt = (unsigned long)&kvm_ia64_ivt,
|
||||
.patch_mov_ar = (unsigned long)&kvm_asm_mov_from_ar,
|
||||
.patch_mov_ar_sn2 = (unsigned long)&kvm_asm_mov_from_ar_sn2,
|
||||
};
|
||||
|
||||
static int __init kvm_vmm_init(void)
|
||||
|
@ -95,7 +95,7 @@ GLOBAL_ENTRY(kvm_vmm_panic)
|
||||
;;
|
||||
srlz.i // guarantee that interruption collection is on
|
||||
;;
|
||||
//(p15) ssm psr.i // restore psr.i
|
||||
(p15) ssm psr.i // restore psr.
|
||||
addl r14=@gprel(ia64_leave_hypervisor),gp
|
||||
;;
|
||||
KVM_SAVE_REST
|
||||
@ -249,7 +249,7 @@ ENTRY(kvm_break_fault)
|
||||
;;
|
||||
srlz.i // guarantee that interruption collection is on
|
||||
;;
|
||||
//(p15)ssm psr.i // restore psr.i
|
||||
(p15)ssm psr.i // restore psr.i
|
||||
addl r14=@gprel(ia64_leave_hypervisor),gp
|
||||
;;
|
||||
KVM_SAVE_REST
|
||||
@ -439,7 +439,7 @@ kvm_dispatch_vexirq:
|
||||
;;
|
||||
srlz.i // guarantee that interruption collection is on
|
||||
;;
|
||||
//(p15) ssm psr.i // restore psr.i
|
||||
(p15) ssm psr.i // restore psr.i
|
||||
adds r3=8,r2 // set up second base pointer
|
||||
;;
|
||||
KVM_SAVE_REST
|
||||
@ -819,7 +819,7 @@ ENTRY(kvm_dtlb_miss_dispatch)
|
||||
;;
|
||||
srlz.i // guarantee that interruption collection is on
|
||||
;;
|
||||
//(p15) ssm psr.i // restore psr.i
|
||||
(p15) ssm psr.i // restore psr.i
|
||||
addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
|
||||
;;
|
||||
KVM_SAVE_REST
|
||||
@ -842,7 +842,7 @@ ENTRY(kvm_itlb_miss_dispatch)
|
||||
;;
|
||||
srlz.i // guarantee that interruption collection is on
|
||||
;;
|
||||
//(p15) ssm psr.i // restore psr.i
|
||||
(p15) ssm psr.i // restore psr.i
|
||||
addl r14=@gprel(ia64_leave_hypervisor),gp
|
||||
;;
|
||||
KVM_SAVE_REST
|
||||
@ -871,7 +871,7 @@ ENTRY(kvm_dispatch_reflection)
|
||||
;;
|
||||
srlz.i // guarantee that interruption collection is on
|
||||
;;
|
||||
//(p15) ssm psr.i // restore psr.i
|
||||
(p15) ssm psr.i // restore psr.i
|
||||
addl r14=@gprel(ia64_leave_hypervisor),gp
|
||||
;;
|
||||
KVM_SAVE_REST
|
||||
@ -898,7 +898,7 @@ ENTRY(kvm_dispatch_virtualization_fault)
|
||||
;;
|
||||
srlz.i // guarantee that interruption collection is on
|
||||
;;
|
||||
//(p15) ssm psr.i // restore psr.i
|
||||
(p15) ssm psr.i // restore psr.i
|
||||
addl r14=@gprel(ia64_leave_hypervisor_prepare),gp
|
||||
;;
|
||||
KVM_SAVE_REST
|
||||
@ -920,7 +920,7 @@ ENTRY(kvm_dispatch_interrupt)
|
||||
;;
|
||||
srlz.i
|
||||
;;
|
||||
//(p15) ssm psr.i
|
||||
(p15) ssm psr.i
|
||||
addl r14=@gprel(ia64_leave_hypervisor),gp
|
||||
;;
|
||||
KVM_SAVE_REST
|
||||
@ -1333,7 +1333,7 @@ hostret = r24
|
||||
;;
|
||||
(p7) srlz.i
|
||||
;;
|
||||
//(p6) ssm psr.i
|
||||
(p6) ssm psr.i
|
||||
;;
|
||||
mov rp=rpsave
|
||||
mov ar.pfs=pfssave
|
||||
|
@ -254,7 +254,8 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte)
|
||||
"(p7) st8 [%2]=r9;;"
|
||||
"ssm psr.ic;;"
|
||||
"srlz.d;;"
|
||||
/* "ssm psr.i;;" Once interrupts in vmm open, need fix*/
|
||||
"ssm psr.i;;"
|
||||
"srlz.d;;"
|
||||
: "=r"(ret) : "r"(iha), "r"(pte):"memory");
|
||||
|
||||
return ret;
|
||||
|
@ -227,7 +227,7 @@ finish_up:
|
||||
return new_irq_info;
|
||||
}
|
||||
|
||||
static void sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask)
|
||||
static int sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask)
|
||||
{
|
||||
struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
|
||||
nasid_t nasid;
|
||||
@ -239,6 +239,8 @@ static void sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask)
|
||||
list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
|
||||
sn_irq_lh[irq], list)
|
||||
(void)sn_retarget_vector(sn_irq_info, nasid, slice);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -151,7 +151,7 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void sn_set_msi_irq_affinity(unsigned int irq,
|
||||
static int sn_set_msi_irq_affinity(unsigned int irq,
|
||||
const struct cpumask *cpu_mask)
|
||||
{
|
||||
struct msi_msg msg;
|
||||
@ -168,7 +168,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
|
||||
cpu = cpumask_first(cpu_mask);
|
||||
sn_irq_info = sn_msi_info[irq].sn_irq_info;
|
||||
if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
|
||||
return;
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* Release XIO resources for the old MSI PCI address
|
||||
@ -189,7 +189,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
|
||||
new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice);
|
||||
sn_msi_info[irq].sn_irq_info = new_irq_info;
|
||||
if (new_irq_info == NULL)
|
||||
return;
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* Map the xio address into bus space
|
||||
@ -206,6 +206,8 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
|
||||
|
||||
write_msi_msg(irq, &msg);
|
||||
cpumask_copy(irq_desc[irq].affinity, cpu_mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
|
@ -59,5 +59,14 @@
|
||||
#define MCFPIT_IMR MCFINTC_IMRL
|
||||
#define MCFPIT_IMR_IBIT (1 << MCFINT_PIT1)
|
||||
|
||||
/*
|
||||
* Reset Controll Unit.
|
||||
*/
|
||||
#define MCF_RCR 0xFC0A0000
|
||||
#define MCF_RSR 0xFC0A0001
|
||||
|
||||
#define MCF_RCR_SWRESET 0x80 /* Software reset bit */
|
||||
#define MCF_RCR_FRCSTOUT 0x40 /* Force external reset */
|
||||
|
||||
/****************************************************************************/
|
||||
#endif /* m520xsim_h */
|
||||
|
@ -41,5 +41,14 @@
|
||||
#define MCFSIM_DACR1 0x50 /* SDRAM base address 1 */
|
||||
#define MCFSIM_DMR1 0x54 /* SDRAM address mask 1 */
|
||||
|
||||
/*
|
||||
* Reset Controll Unit (relative to IPSBAR).
|
||||
*/
|
||||
#define MCF_RCR 0x110000
|
||||
#define MCF_RSR 0x110001
|
||||
|
||||
#define MCF_RCR_SWRESET 0x80 /* Software reset bit */
|
||||
#define MCF_RCR_FRCSTOUT 0x40 /* Force external reset */
|
||||
|
||||
/****************************************************************************/
|
||||
#endif /* m523xsim_h */
|
||||
|
@ -70,5 +70,14 @@
|
||||
#define UART2_ENABLE_MASK 0x3f00
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Reset Controll Unit (relative to IPSBAR).
|
||||
*/
|
||||
#define MCF_RCR 0x110000
|
||||
#define MCF_RSR 0x110001
|
||||
|
||||
#define MCF_RCR_SWRESET 0x80 /* Software reset bit */
|
||||
#define MCF_RCR_FRCSTOUT 0x40 /* Force external reset */
|
||||
|
||||
/****************************************************************************/
|
||||
#endif /* m527xsim_h */
|
||||
|
@ -56,6 +56,14 @@
|
||||
#define MCF5282_INTC0_ICR17 (volatile u8 *) (MCF_IPSBAR + 0x0C51)
|
||||
|
||||
|
||||
/*
|
||||
* Reset Control Unit (relative to IPSBAR).
|
||||
*/
|
||||
#define MCF_RCR 0x110000
|
||||
#define MCF_RSR 0x110001
|
||||
|
||||
#define MCF_RCR_SWRESET 0x80 /* Software reset bit */
|
||||
#define MCF_RCR_FRCSTOUT 0x40 /* Force external reset */
|
||||
|
||||
/*********************************************************************
|
||||
*
|
||||
|
@ -125,6 +125,18 @@
|
||||
#define ACR_CM_OFF_IMP (3<<5)
|
||||
#define ACR_WPROTECT (1<<2)
|
||||
|
||||
/*********************************************************************
|
||||
*
|
||||
* Reset Controller Module
|
||||
*
|
||||
*********************************************************************/
|
||||
|
||||
#define MCF_RCR 0xFC0A0000
|
||||
#define MCF_RSR 0xFC0A0001
|
||||
|
||||
#define MCF_RCR_SWRESET 0x80 /* Software reset bit */
|
||||
#define MCF_RCR_FRCSTOUT 0x40 /* Force external reset */
|
||||
|
||||
/*********************************************************************
|
||||
*
|
||||
* Inter-IC (I2C) Module
|
||||
|
@ -72,10 +72,10 @@ struct thread_struct {
|
||||
unsigned char fpstate[FPSTATESIZE]; /* floating point state */
|
||||
};
|
||||
|
||||
#define INIT_THREAD { \
|
||||
sizeof(init_stack) + (unsigned long) init_stack, 0, \
|
||||
PS_S, __KERNEL_DS, \
|
||||
{0, 0}, 0, {0,}, {0, 0, 0}, {0,}, \
|
||||
#define INIT_THREAD { \
|
||||
.ksp = sizeof(init_stack) + (unsigned long) init_stack, \
|
||||
.sr = PS_S, \
|
||||
.fs = __KERNEL_DS, \
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1,7 +1,7 @@
|
||||
#ifndef _M68K_SWAB_H
|
||||
#define _M68K_SWAB_H
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#define __SWAB_64_THRU_32__
|
||||
|
@ -203,113 +203,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
|
||||
#include <asm-generic/cmpxchg.h>
|
||||
#endif
|
||||
|
||||
#if defined( CONFIG_M68328 ) || defined( CONFIG_M68EZ328 ) || \
|
||||
defined (CONFIG_M68360) || defined( CONFIG_M68VZ328 )
|
||||
#define HARD_RESET_NOW() ({ \
|
||||
local_irq_disable(); \
|
||||
asm(" \
|
||||
moveal #0x10c00000, %a0; \
|
||||
moveb #0, 0xFFFFF300; \
|
||||
moveal 0(%a0), %sp; \
|
||||
moveal 4(%a0), %a0; \
|
||||
jmp (%a0); \
|
||||
"); \
|
||||
})
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_COLDFIRE
|
||||
#if defined(CONFIG_M5272) && defined(CONFIG_NETtel)
|
||||
/*
|
||||
* Need to account for broken early mask of 5272 silicon. So don't
|
||||
* jump through the original start address. Jump strait into the
|
||||
* known start of the FLASH code.
|
||||
*/
|
||||
#define HARD_RESET_NOW() ({ \
|
||||
asm(" \
|
||||
movew #0x2700, %sr; \
|
||||
jmp 0xf0000400; \
|
||||
"); \
|
||||
})
|
||||
#elif defined(CONFIG_NETtel) || \
|
||||
defined(CONFIG_SECUREEDGEMP3) || defined(CONFIG_CLEOPATRA)
|
||||
#define HARD_RESET_NOW() ({ \
|
||||
asm(" \
|
||||
movew #0x2700, %sr; \
|
||||
moveal #0x10000044, %a0; \
|
||||
movel #0xffffffff, (%a0); \
|
||||
moveal #0x10000001, %a0; \
|
||||
moveb #0x00, (%a0); \
|
||||
moveal #0xf0000004, %a0; \
|
||||
moveal (%a0), %a0; \
|
||||
jmp (%a0); \
|
||||
"); \
|
||||
})
|
||||
#elif defined(CONFIG_M5272)
|
||||
/*
|
||||
* Retrieve the boot address in flash using CSBR0 and CSOR0
|
||||
* find the reset vector at flash_address + 4 (e.g. 0x400)
|
||||
* remap it in the flash's current location (e.g. 0xf0000400)
|
||||
* and jump there.
|
||||
*/
|
||||
#define HARD_RESET_NOW() ({ \
|
||||
asm(" \
|
||||
movew #0x2700, %%sr; \
|
||||
move.l %0+0x40,%%d0; \
|
||||
and.l %0+0x44,%%d0; \
|
||||
andi.l #0xfffff000,%%d0; \
|
||||
mov.l %%d0,%%a0; \
|
||||
or.l 4(%%a0),%%d0; \
|
||||
mov.l %%d0,%%a0; \
|
||||
jmp (%%a0);" \
|
||||
: /* No output */ \
|
||||
: "o" (*(char *)MCF_MBAR) ); \
|
||||
})
|
||||
#elif defined(CONFIG_M528x)
|
||||
/*
|
||||
* The MCF528x has a bit (SOFTRST) in memory (Reset Control Register RCR),
|
||||
* that when set, resets the MCF528x.
|
||||
*/
|
||||
#define HARD_RESET_NOW() \
|
||||
({ \
|
||||
unsigned char volatile *reset; \
|
||||
asm("move.w #0x2700, %sr"); \
|
||||
reset = ((volatile unsigned char *)(MCF_IPSBAR + 0x110000)); \
|
||||
while(1) \
|
||||
*reset |= (0x01 << 7);\
|
||||
})
|
||||
#elif defined(CONFIG_M523x)
|
||||
#define HARD_RESET_NOW() ({ \
|
||||
asm(" \
|
||||
movew #0x2700, %sr; \
|
||||
movel #0x01000000, %sp; \
|
||||
moveal #0x40110000, %a0; \
|
||||
moveb #0x80, (%a0); \
|
||||
"); \
|
||||
})
|
||||
#elif defined(CONFIG_M520x)
|
||||
/*
|
||||
* The MCF5208 has a bit (SOFTRST) in memory (Reset Control Register
|
||||
* RCR), that when set, resets the MCF5208.
|
||||
*/
|
||||
#define HARD_RESET_NOW() \
|
||||
({ \
|
||||
unsigned char volatile *reset; \
|
||||
asm("move.w #0x2700, %sr"); \
|
||||
reset = ((volatile unsigned char *)(MCF_IPSBAR + 0xA0000)); \
|
||||
while(1) \
|
||||
*reset |= 0x80; \
|
||||
})
|
||||
#else
|
||||
#define HARD_RESET_NOW() ({ \
|
||||
asm(" \
|
||||
movew #0x2700, %sr; \
|
||||
moveal #0x4, %a0; \
|
||||
moveal (%a0), %a0; \
|
||||
jmp (%a0); \
|
||||
"); \
|
||||
})
|
||||
#endif
|
||||
#endif
|
||||
#define arch_align_stack(x) (x)
|
||||
|
||||
|
||||
|
@ -26,7 +26,6 @@
|
||||
|
||||
#include <linux/sys.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/segment.h>
|
||||
|
@ -166,15 +166,13 @@ void __init setup_arch(char **cmdline_p)
|
||||
printk(KERN_INFO "Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)\n");
|
||||
#endif
|
||||
|
||||
#ifdef DEBUG
|
||||
printk(KERN_DEBUG "KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x "
|
||||
"BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext,
|
||||
(int) &_sdata, (int) &_edata,
|
||||
(int) &_sbss, (int) &_ebss);
|
||||
printk(KERN_DEBUG "MEMORY -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x\n ",
|
||||
(int) &_ebss, (int) memory_start,
|
||||
(int) memory_start, (int) memory_end);
|
||||
#endif
|
||||
pr_debug("KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x "
|
||||
"BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext,
|
||||
(int) &_sdata, (int) &_edata,
|
||||
(int) &_sbss, (int) &_ebss);
|
||||
pr_debug("MEMORY -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x\n ",
|
||||
(int) &_ebss, (int) memory_start,
|
||||
(int) memory_start, (int) memory_end);
|
||||
|
||||
/* Keep a copy of command line */
|
||||
*cmdline_p = &command_line[0];
|
||||
|
@ -126,9 +126,7 @@ void __init mem_init(void)
|
||||
unsigned long start_mem = memory_start; /* DAVIDM - these must start at end of kernel */
|
||||
unsigned long end_mem = memory_end; /* DAVIDM - this must not include kernel stack at top */
|
||||
|
||||
#ifdef DEBUG
|
||||
printk(KERN_DEBUG "Mem_init: start=%lx, end=%lx\n", start_mem, end_mem);
|
||||
#endif
|
||||
pr_debug("Mem_init: start=%lx, end=%lx\n", start_mem, end_mem);
|
||||
|
||||
end_mem &= PAGE_MASK;
|
||||
high_memory = (void *) end_mem;
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/param.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/coldfire.h>
|
||||
@ -21,10 +20,6 @@
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void coldfire_reset(void);
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
static struct mcf_platform_uart m5206_uart_platform[] = {
|
||||
{
|
||||
.mapbase = MCF_MBAR + MCFUART_BASE1,
|
||||
@ -109,10 +104,21 @@ void mcf_settimericr(unsigned int timer, unsigned int level)
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void m5206_cpu_reset(void)
|
||||
{
|
||||
local_irq_disable();
|
||||
/* Set watchdog to soft reset, and enabled */
|
||||
__raw_writeb(0xc0, MCF_MBAR + MCFSIM_SYPCR);
|
||||
for (;;)
|
||||
/* wait for watchdog to timeout */;
|
||||
}
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void __init config_BSP(char *commandp, int size)
|
||||
{
|
||||
mcf_setimr(MCFSIM_IMR_MASKALL);
|
||||
mach_reset = coldfire_reset;
|
||||
mach_reset = m5206_cpu_reset;
|
||||
}
|
||||
|
||||
/***************************************************************************/
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/param.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/coldfire.h>
|
||||
@ -21,10 +20,6 @@
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void coldfire_reset(void);
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
static struct mcf_platform_uart m5206e_uart_platform[] = {
|
||||
{
|
||||
.mapbase = MCF_MBAR + MCFUART_BASE1,
|
||||
@ -109,6 +104,17 @@ void mcf_settimericr(unsigned int timer, unsigned int level)
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void m5206e_cpu_reset(void)
|
||||
{
|
||||
local_irq_disable();
|
||||
/* Set watchdog to soft reset, and enabled */
|
||||
__raw_writeb(0xc0, MCF_MBAR + MCFSIM_SYPCR);
|
||||
for (;;)
|
||||
/* wait for watchdog to timeout */;
|
||||
}
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void __init config_BSP(char *commandp, int size)
|
||||
{
|
||||
mcf_setimr(MCFSIM_IMR_MASKALL);
|
||||
@ -119,7 +125,7 @@ void __init config_BSP(char *commandp, int size)
|
||||
commandp[size-1] = 0;
|
||||
#endif /* CONFIG_NETtel */
|
||||
|
||||
mach_reset = coldfire_reset;
|
||||
mach_reset = m5206e_cpu_reset;
|
||||
}
|
||||
|
||||
/***************************************************************************/
|
||||
|
@ -14,7 +14,6 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/param.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/coldfire.h>
|
||||
@ -23,10 +22,6 @@
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void coldfire_reset(void);
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
static struct mcf_platform_uart m520x_uart_platform[] = {
|
||||
{
|
||||
.mapbase = MCF_MBAR + MCFUART_BASE1,
|
||||
@ -169,9 +164,17 @@ void mcf_autovector(unsigned int vec)
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
static void m520x_cpu_reset(void)
|
||||
{
|
||||
local_irq_disable();
|
||||
__raw_writeb(MCF_RCR_SWRESET, MCF_RCR);
|
||||
}
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void __init config_BSP(char *commandp, int size)
|
||||
{
|
||||
mach_reset = coldfire_reset;
|
||||
mach_reset = m520x_cpu_reset;
|
||||
m520x_uarts_init();
|
||||
m520x_fec_init();
|
||||
}
|
||||
|
@ -15,7 +15,6 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/param.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/coldfire.h>
|
||||
@ -24,10 +23,6 @@
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void coldfire_reset(void);
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
static struct mcf_platform_uart m523x_uart_platform[] = {
|
||||
{
|
||||
.mapbase = MCF_MBAR + MCFUART_BASE1,
|
||||
@ -145,13 +140,20 @@ void mcf_autovector(unsigned int vec)
|
||||
{
|
||||
/* Everything is auto-vectored on the 523x */
|
||||
}
|
||||
/***************************************************************************/
|
||||
|
||||
static void m523x_cpu_reset(void)
|
||||
{
|
||||
local_irq_disable();
|
||||
__raw_writeb(MCF_RCR_SWRESET, MCF_IPSBAR + MCF_RCR);
|
||||
}
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void __init config_BSP(char *commandp, int size)
|
||||
{
|
||||
mcf_disableall();
|
||||
mach_reset = coldfire_reset;
|
||||
mach_reset = m523x_cpu_reset;
|
||||
m523x_uarts_init();
|
||||
m523x_fec_init();
|
||||
}
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/param.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/coldfire.h>
|
||||
@ -20,10 +19,6 @@
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void coldfire_reset(void);
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
static struct mcf_platform_uart m5249_uart_platform[] = {
|
||||
{
|
||||
.mapbase = MCF_MBAR + MCFUART_BASE1,
|
||||
@ -106,10 +101,21 @@ void mcf_settimericr(unsigned int timer, unsigned int level)
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void m5249_cpu_reset(void)
|
||||
{
|
||||
local_irq_disable();
|
||||
/* Set watchdog to soft reset, and enabled */
|
||||
__raw_writeb(0xc0, MCF_MBAR + MCFSIM_SYPCR);
|
||||
for (;;)
|
||||
/* wait for watchdog to timeout */;
|
||||
}
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void __init config_BSP(char *commandp, int size)
|
||||
{
|
||||
mcf_setimr(MCFSIM_IMR_MASKALL);
|
||||
mach_reset = coldfire_reset;
|
||||
mach_reset = m5249_cpu_reset;
|
||||
}
|
||||
|
||||
/***************************************************************************/
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/param.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/coldfire.h>
|
||||
@ -21,8 +20,6 @@
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void coldfire_reset(void);
|
||||
|
||||
extern unsigned int mcf_timervector;
|
||||
extern unsigned int mcf_profilevector;
|
||||
extern unsigned int mcf_timerlevel;
|
||||
@ -170,6 +167,19 @@ void mcf_settimericr(int timer, int level)
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
static void m5272_cpu_reset(void)
|
||||
{
|
||||
local_irq_disable();
|
||||
/* Set watchdog to reset, and enabled */
|
||||
__raw_writew(0, MCF_MBAR + MCFSIM_WIRR);
|
||||
__raw_writew(1, MCF_MBAR + MCFSIM_WRRR);
|
||||
__raw_writew(0, MCF_MBAR + MCFSIM_WCR);
|
||||
for (;;)
|
||||
/* wait for watchdog to timeout */;
|
||||
}
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void __init config_BSP(char *commandp, int size)
|
||||
{
|
||||
#if defined (CONFIG_MOD5272)
|
||||
@ -194,7 +204,7 @@ void __init config_BSP(char *commandp, int size)
|
||||
|
||||
mcf_timervector = 69;
|
||||
mcf_profilevector = 70;
|
||||
mach_reset = coldfire_reset;
|
||||
mach_reset = m5272_cpu_reset;
|
||||
}
|
||||
|
||||
/***************************************************************************/
|
||||
|
@ -15,7 +15,6 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/param.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/coldfire.h>
|
||||
@ -24,10 +23,6 @@
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void coldfire_reset(void);
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
static struct mcf_platform_uart m527x_uart_platform[] = {
|
||||
{
|
||||
.mapbase = MCF_MBAR + MCFUART_BASE1,
|
||||
@ -227,10 +222,18 @@ void mcf_autovector(unsigned int vec)
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
static void m527x_cpu_reset(void)
|
||||
{
|
||||
local_irq_disable();
|
||||
__raw_writeb(MCF_RCR_SWRESET, MCF_IPSBAR + MCF_RCR);
|
||||
}
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void __init config_BSP(char *commandp, int size)
|
||||
{
|
||||
mcf_disableall();
|
||||
mach_reset = coldfire_reset;
|
||||
mach_reset = m527x_cpu_reset;
|
||||
m527x_uarts_init();
|
||||
m527x_fec_init();
|
||||
}
|
||||
|
@ -31,10 +31,6 @@
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void coldfire_reset(void);
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
static struct mcf_platform_uart m528x_uart_platform[] = {
|
||||
{
|
||||
.mapbase = MCF_MBAR + MCFUART_BASE1,
|
||||
@ -171,6 +167,14 @@ void mcf_autovector(unsigned int vec)
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
static void m528x_cpu_reset(void)
|
||||
{
|
||||
local_irq_disable();
|
||||
__raw_writeb(MCF_RCR_SWRESET, MCF_IPSBAR + MCF_RCR);
|
||||
}
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
#ifdef CONFIG_WILDFIRE
|
||||
void wildfire_halt(void)
|
||||
{
|
||||
@ -214,6 +218,7 @@ void __init config_BSP(char *commandp, int size)
|
||||
|
||||
static int __init init_BSP(void)
|
||||
{
|
||||
mach_reset = m528x_cpu_reset;
|
||||
m528x_uarts_init();
|
||||
m528x_fec_init();
|
||||
platform_add_devices(m528x_devices, ARRAY_SIZE(m528x_devices));
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/param.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/coldfire.h>
|
||||
@ -22,8 +21,6 @@
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void coldfire_reset(void);
|
||||
|
||||
extern unsigned int mcf_timervector;
|
||||
extern unsigned int mcf_profilevector;
|
||||
extern unsigned int mcf_timerlevel;
|
||||
@ -119,6 +116,17 @@ void mcf_settimericr(unsigned int timer, unsigned int level)
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void m5307_cpu_reset(void)
|
||||
{
|
||||
local_irq_disable();
|
||||
/* Set watchdog to soft reset, and enabled */
|
||||
__raw_writeb(0xc0, MCF_MBAR + MCFSIM_SYPCR);
|
||||
for (;;)
|
||||
/* wait for watchdog to timeout */;
|
||||
}
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void __init config_BSP(char *commandp, int size)
|
||||
{
|
||||
mcf_setimr(MCFSIM_IMR_MASKALL);
|
||||
@ -134,7 +142,7 @@ void __init config_BSP(char *commandp, int size)
|
||||
mcf_timerlevel = 6;
|
||||
#endif
|
||||
|
||||
mach_reset = coldfire_reset;
|
||||
mach_reset = m5307_cpu_reset;
|
||||
|
||||
#ifdef CONFIG_BDM_DISABLE
|
||||
/*
|
||||
|
@ -31,8 +31,6 @@
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void coldfire_reset(void);
|
||||
|
||||
extern unsigned int mcf_timervector;
|
||||
extern unsigned int mcf_profilevector;
|
||||
extern unsigned int mcf_timerlevel;
|
||||
@ -164,6 +162,14 @@ void mcf_settimericr(unsigned int timer, unsigned int level)
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
static void m532x_cpu_reset(void)
|
||||
{
|
||||
local_irq_disable();
|
||||
__raw_writeb(MCF_RCR_SWRESET, MCF_RCR);
|
||||
}
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void __init config_BSP(char *commandp, int size)
|
||||
{
|
||||
mcf_setimr(MCFSIM_IMR_MASKALL);
|
||||
@ -181,7 +187,7 @@ void __init config_BSP(char *commandp, int size)
|
||||
|
||||
mcf_timervector = 64+32;
|
||||
mcf_profilevector = 64+33;
|
||||
mach_reset = coldfire_reset;
|
||||
mach_reset = m532x_cpu_reset;
|
||||
|
||||
#ifdef CONFIG_BDM_DISABLE
|
||||
/*
|
||||
|
@ -12,7 +12,6 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/param.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/coldfire.h>
|
||||
@ -21,8 +20,6 @@
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void coldfire_reset(void);
|
||||
|
||||
extern unsigned int mcf_timervector;
|
||||
extern unsigned int mcf_profilevector;
|
||||
extern unsigned int mcf_timerlevel;
|
||||
@ -110,6 +107,17 @@ void mcf_settimericr(unsigned int timer, unsigned int level)
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void m5407_cpu_reset(void)
|
||||
{
|
||||
local_irq_disable();
|
||||
/* set watchdog to soft reset, and enabled */
|
||||
__raw_writeb(0xc0, MCF_MBAR + MCFSIM_SYPCR);
|
||||
for (;;)
|
||||
/* wait for watchdog to timeout */;
|
||||
}
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void __init config_BSP(char *commandp, int size)
|
||||
{
|
||||
mcf_setimr(MCFSIM_IMR_MASKALL);
|
||||
@ -121,7 +129,7 @@ void __init config_BSP(char *commandp, int size)
|
||||
mcf_timerlevel = 6;
|
||||
#endif
|
||||
|
||||
mach_reset = coldfire_reset;
|
||||
mach_reset = m5407_cpu_reset;
|
||||
}
|
||||
|
||||
/***************************************************************************/
|
||||
|
@ -96,10 +96,3 @@ void ack_vector(unsigned int irq)
|
||||
}
|
||||
|
||||
/***************************************************************************/
|
||||
|
||||
void coldfire_reset(void)
|
||||
{
|
||||
HARD_RESET_NOW();
|
||||
}
|
||||
|
||||
/***************************************************************************/
|
||||
|
@ -177,7 +177,7 @@ static void octeon_irq_ciu0_disable(unsigned int irq)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
|
||||
static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
|
||||
{
|
||||
int cpu;
|
||||
int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
|
||||
@ -199,6 +199,8 @@ static void octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask
|
||||
*/
|
||||
cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
|
||||
write_unlock(&octeon_irq_ciu0_rwlock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -292,7 +294,7 @@ static void octeon_irq_ciu1_disable(unsigned int irq)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask *dest)
|
||||
static int octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask *dest)
|
||||
{
|
||||
int cpu;
|
||||
int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
|
||||
@ -315,6 +317,8 @@ static void octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask
|
||||
*/
|
||||
cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
|
||||
write_unlock(&octeon_irq_ciu1_rwlock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -49,7 +49,7 @@ static inline void smtc_im_ack_irq(unsigned int irq)
|
||||
#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
extern void plat_set_irq_affinity(unsigned int irq,
|
||||
extern int plat_set_irq_affinity(unsigned int irq,
|
||||
const struct cpumask *affinity);
|
||||
extern void smtc_forward_irq(unsigned int irq);
|
||||
|
||||
|
@ -155,7 +155,7 @@ static void gic_unmask_irq(unsigned int irq)
|
||||
|
||||
static DEFINE_SPINLOCK(gic_lock);
|
||||
|
||||
static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||
static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||
{
|
||||
cpumask_t tmp = CPU_MASK_NONE;
|
||||
unsigned long flags;
|
||||
@ -166,7 +166,7 @@ static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||
|
||||
cpumask_and(&tmp, cpumask, cpu_online_mask);
|
||||
if (cpus_empty(tmp))
|
||||
return;
|
||||
return -1;
|
||||
|
||||
/* Assumption : cpumask refers to a single CPU */
|
||||
spin_lock_irqsave(&gic_lock, flags);
|
||||
@ -190,6 +190,7 @@ static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||
cpumask_copy(irq_desc[irq].affinity, cpumask);
|
||||
spin_unlock_irqrestore(&gic_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -114,7 +114,7 @@ struct plat_smp_ops msmtc_smp_ops = {
|
||||
*/
|
||||
|
||||
|
||||
void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
|
||||
int plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
|
||||
{
|
||||
cpumask_t tmask;
|
||||
int cpu = 0;
|
||||
@ -156,5 +156,7 @@ void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
|
||||
|
||||
/* Do any generic SMTC IRQ affinity setup */
|
||||
smtc_set_irq_affinity(irq, tmask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
|
||||
|
@ -50,7 +50,7 @@ static void enable_bcm1480_irq(unsigned int irq);
|
||||
static void disable_bcm1480_irq(unsigned int irq);
|
||||
static void ack_bcm1480_irq(unsigned int irq);
|
||||
#ifdef CONFIG_SMP
|
||||
static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask);
|
||||
static int bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
@ -109,7 +109,7 @@ void bcm1480_unmask_irq(int cpu, int irq)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
static int bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
{
|
||||
int i = 0, old_cpu, cpu, int_on, k;
|
||||
u64 cur_ints;
|
||||
@ -118,7 +118,7 @@ static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
|
||||
if (cpumask_weight(mask) != 1) {
|
||||
printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
i = cpumask_first(mask);
|
||||
|
||||
@ -152,6 +152,8 @@ static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&bcm1480_imr_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -50,7 +50,7 @@ static void enable_sb1250_irq(unsigned int irq);
|
||||
static void disable_sb1250_irq(unsigned int irq);
|
||||
static void ack_sb1250_irq(unsigned int irq);
|
||||
#ifdef CONFIG_SMP
|
||||
static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask);
|
||||
static int sb1250_set_affinity(unsigned int irq, const struct cpumask *mask);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SIBYTE_HAS_LDT
|
||||
@ -103,7 +103,7 @@ void sb1250_unmask_irq(int cpu, int irq)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
static int sb1250_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
{
|
||||
int i = 0, old_cpu, cpu, int_on;
|
||||
u64 cur_ints;
|
||||
@ -113,7 +113,7 @@ static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
|
||||
if (cpumask_weight(mask) > 1) {
|
||||
printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Convert logical CPU to physical CPU */
|
||||
@ -143,6 +143,8 @@ static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
R_IMR_INTERRUPT_MASK));
|
||||
}
|
||||
spin_unlock_irqrestore(&sb1250_imr_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -8,6 +8,7 @@ mainmenu "Linux Kernel Configuration"
|
||||
config MN10300
|
||||
def_bool y
|
||||
select HAVE_OPROFILE
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
|
||||
config AM33
|
||||
def_bool y
|
||||
|
@ -34,7 +34,7 @@
|
||||
*/
|
||||
typedef unsigned long elf_greg_t;
|
||||
|
||||
#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t))
|
||||
#define ELF_NGREG ((sizeof(struct pt_regs) / sizeof(elf_greg_t)) - 1)
|
||||
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
|
||||
|
||||
#define ELF_NFPREG 32
|
||||
@ -76,6 +76,7 @@ do { \
|
||||
} while (0)
|
||||
|
||||
#define USE_ELF_CORE_DUMP
|
||||
#define CORE_DUMP_USE_REGSET
|
||||
#define ELF_EXEC_PAGESIZE 4096
|
||||
|
||||
/*
|
||||
|
@ -143,13 +143,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
|
||||
|
||||
unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
#define task_pt_regs(task) \
|
||||
({ \
|
||||
struct pt_regs *__regs__; \
|
||||
__regs__ = (struct pt_regs *) (KSTK_TOP(task_stack_page(task)) - 8); \
|
||||
__regs__ - 1; \
|
||||
})
|
||||
|
||||
#define task_pt_regs(task) ((task)->thread.uregs)
|
||||
#define KSTK_EIP(task) (task_pt_regs(task)->pc)
|
||||
#define KSTK_ESP(task) (task_pt_regs(task)->sp)
|
||||
|
||||
|
@ -91,9 +91,17 @@ extern struct pt_regs *__frame; /* current frame pointer */
|
||||
#if defined(__KERNEL__)
|
||||
|
||||
#if !defined(__ASSEMBLY__)
|
||||
struct task_struct;
|
||||
|
||||
#define user_mode(regs) (((regs)->epsw & EPSW_nSL) == EPSW_nSL)
|
||||
#define instruction_pointer(regs) ((regs)->pc)
|
||||
#define user_stack_pointer(regs) ((regs)->sp)
|
||||
extern void show_regs(struct pt_regs *);
|
||||
|
||||
#define arch_has_single_step() (1)
|
||||
extern void user_enable_single_step(struct task_struct *);
|
||||
extern void user_disable_single_step(struct task_struct *);
|
||||
|
||||
#endif /* !__ASSEMBLY */
|
||||
|
||||
#define profile_pc(regs) ((regs)->pc)
|
||||
|
@ -76,7 +76,7 @@ ENTRY(system_call)
|
||||
cmp nr_syscalls,d0
|
||||
bcc syscall_badsys
|
||||
btst _TIF_SYSCALL_TRACE,(TI_flags,a2)
|
||||
bne syscall_trace_entry
|
||||
bne syscall_entry_trace
|
||||
syscall_call:
|
||||
add d0,d0,a1
|
||||
add a1,a1
|
||||
@ -104,11 +104,10 @@ restore_all:
|
||||
syscall_exit_work:
|
||||
btst _TIF_SYSCALL_TRACE,d2
|
||||
beq work_pending
|
||||
__sti # could let do_syscall_trace() call
|
||||
__sti # could let syscall_trace_exit() call
|
||||
# schedule() instead
|
||||
mov fp,d0
|
||||
mov 1,d1
|
||||
call do_syscall_trace[],0 # do_syscall_trace(regs,entryexit)
|
||||
call syscall_trace_exit[],0 # do_syscall_trace(regs)
|
||||
jmp resume_userspace
|
||||
|
||||
ALIGN
|
||||
@ -138,13 +137,11 @@ work_notifysig:
|
||||
jmp resume_userspace
|
||||
|
||||
# perform syscall entry tracing
|
||||
syscall_trace_entry:
|
||||
syscall_entry_trace:
|
||||
mov -ENOSYS,d0
|
||||
mov d0,(REG_D0,fp)
|
||||
mov fp,d0
|
||||
clr d1
|
||||
call do_syscall_trace[],0
|
||||
mov (REG_ORIG_D0,fp),d0
|
||||
call syscall_trace_entry[],0 # returns the syscall number to actually use
|
||||
mov (REG_D1,fp),d1
|
||||
cmp nr_syscalls,d0
|
||||
bcs syscall_call
|
||||
|
@ -17,6 +17,9 @@
|
||||
#include <linux/errno.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/user.h>
|
||||
#include <linux/regset.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/tracehook.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/system.h>
|
||||
@ -64,12 +67,6 @@ static inline int get_stack_long(struct task_struct *task, int offset)
|
||||
((unsigned long) task->thread.uregs + offset);
|
||||
}
|
||||
|
||||
/*
|
||||
* this routine will put a word on the processes privileged stack.
|
||||
* the offset is how far from the base addr as stored in the TSS.
|
||||
* this routine assumes that all the privileged stacks are in our
|
||||
* data space.
|
||||
*/
|
||||
static inline
|
||||
int put_stack_long(struct task_struct *task, int offset, unsigned long data)
|
||||
{
|
||||
@ -80,44 +77,191 @@ int put_stack_long(struct task_struct *task, int offset, unsigned long data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline unsigned long get_fpregs(struct fpu_state_struct *buf,
|
||||
struct task_struct *tsk)
|
||||
/*
|
||||
* retrieve the contents of MN10300 userspace general registers
|
||||
*/
|
||||
static int genregs_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf)
|
||||
{
|
||||
return __copy_to_user(buf, &tsk->thread.fpu_state,
|
||||
sizeof(struct fpu_state_struct));
|
||||
}
|
||||
const struct pt_regs *regs = task_pt_regs(target);
|
||||
int ret;
|
||||
|
||||
static inline unsigned long set_fpregs(struct task_struct *tsk,
|
||||
struct fpu_state_struct *buf)
|
||||
{
|
||||
return __copy_from_user(&tsk->thread.fpu_state, buf,
|
||||
sizeof(struct fpu_state_struct));
|
||||
}
|
||||
/* we need to skip regs->next */
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
regs, 0, PT_ORIG_D0 * sizeof(long));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
static inline void fpsave_init(struct task_struct *task)
|
||||
{
|
||||
memset(&task->thread.fpu_state, 0, sizeof(struct fpu_state_struct));
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
®s->orig_d0, PT_ORIG_D0 * sizeof(long),
|
||||
NR_PTREGS * sizeof(long));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
|
||||
NR_PTREGS * sizeof(long), -1);
|
||||
}
|
||||
|
||||
/*
|
||||
* make sure the single step bit is not set
|
||||
* update the contents of the MN10300 userspace general registers
|
||||
*/
|
||||
void ptrace_disable(struct task_struct *child)
|
||||
static int genregs_set(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
#ifndef CONFIG_MN10300_USING_JTAG
|
||||
struct user *dummy = NULL;
|
||||
long tmp;
|
||||
struct pt_regs *regs = task_pt_regs(target);
|
||||
unsigned long tmp;
|
||||
int ret;
|
||||
|
||||
tmp = get_stack_long(child, (unsigned long) &dummy->regs.epsw);
|
||||
tmp &= ~EPSW_T;
|
||||
put_stack_long(child, (unsigned long) &dummy->regs.epsw, tmp);
|
||||
#endif
|
||||
/* we need to skip regs->next */
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
regs, 0, PT_ORIG_D0 * sizeof(long));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
®s->orig_d0, PT_ORIG_D0 * sizeof(long),
|
||||
PT_EPSW * sizeof(long));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* we need to mask off changes to EPSW */
|
||||
tmp = regs->epsw;
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&tmp, PT_EPSW * sizeof(long),
|
||||
PT_PC * sizeof(long));
|
||||
tmp &= EPSW_FLAG_V | EPSW_FLAG_C | EPSW_FLAG_N | EPSW_FLAG_Z;
|
||||
tmp |= regs->epsw & ~(EPSW_FLAG_V | EPSW_FLAG_C | EPSW_FLAG_N |
|
||||
EPSW_FLAG_Z);
|
||||
regs->epsw = tmp;
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* and finally load the PC */
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
®s->pc, PT_PC * sizeof(long),
|
||||
NR_PTREGS * sizeof(long));
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
||||
NR_PTREGS * sizeof(long), -1);
|
||||
}
|
||||
|
||||
/*
|
||||
* set the single step bit
|
||||
* retrieve the contents of MN10300 userspace FPU registers
|
||||
*/
|
||||
void ptrace_enable(struct task_struct *child)
|
||||
static int fpuregs_get(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
void *kbuf, void __user *ubuf)
|
||||
{
|
||||
const struct fpu_state_struct *fpregs = &target->thread.fpu_state;
|
||||
int ret;
|
||||
|
||||
unlazy_fpu(target);
|
||||
|
||||
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
|
||||
fpregs, 0, sizeof(*fpregs));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
|
||||
sizeof(*fpregs), -1);
|
||||
}
|
||||
|
||||
/*
|
||||
* update the contents of the MN10300 userspace FPU registers
|
||||
*/
|
||||
static int fpuregs_set(struct task_struct *target,
|
||||
const struct user_regset *regset,
|
||||
unsigned int pos, unsigned int count,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
struct fpu_state_struct fpu_state = target->thread.fpu_state;
|
||||
int ret;
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
|
||||
&fpu_state, 0, sizeof(fpu_state));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
fpu_kill_state(target);
|
||||
target->thread.fpu_state = fpu_state;
|
||||
set_using_fpu(target);
|
||||
|
||||
return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
|
||||
sizeof(fpu_state), -1);
|
||||
}
|
||||
|
||||
/*
|
||||
* determine if the FPU registers have actually been used
|
||||
*/
|
||||
static int fpuregs_active(struct task_struct *target,
|
||||
const struct user_regset *regset)
|
||||
{
|
||||
return is_using_fpu(target) ? regset->n : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Define the register sets available on the MN10300 under Linux
|
||||
*/
|
||||
enum mn10300_regset {
|
||||
REGSET_GENERAL,
|
||||
REGSET_FPU,
|
||||
};
|
||||
|
||||
static const struct user_regset mn10300_regsets[] = {
|
||||
/*
|
||||
* General register format is:
|
||||
* A3, A2, D3, D2, MCVF, MCRL, MCRH, MDRQ
|
||||
* E1, E0, E7...E2, SP, LAR, LIR, MDR
|
||||
* A1, A0, D1, D0, ORIG_D0, EPSW, PC
|
||||
*/
|
||||
[REGSET_GENERAL] = {
|
||||
.core_note_type = NT_PRSTATUS,
|
||||
.n = ELF_NGREG,
|
||||
.size = sizeof(long),
|
||||
.align = sizeof(long),
|
||||
.get = genregs_get,
|
||||
.set = genregs_set,
|
||||
},
|
||||
/*
|
||||
* FPU register format is:
|
||||
* FS0-31, FPCR
|
||||
*/
|
||||
[REGSET_FPU] = {
|
||||
.core_note_type = NT_PRFPREG,
|
||||
.n = sizeof(struct fpu_state_struct) / sizeof(long),
|
||||
.size = sizeof(long),
|
||||
.align = sizeof(long),
|
||||
.get = fpuregs_get,
|
||||
.set = fpuregs_set,
|
||||
.active = fpuregs_active,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct user_regset_view user_mn10300_native_view = {
|
||||
.name = "mn10300",
|
||||
.e_machine = EM_MN10300,
|
||||
.regsets = mn10300_regsets,
|
||||
.n = ARRAY_SIZE(mn10300_regsets),
|
||||
};
|
||||
|
||||
const struct user_regset_view *task_user_regset_view(struct task_struct *task)
|
||||
{
|
||||
return &user_mn10300_native_view;
|
||||
}
|
||||
|
||||
/*
|
||||
* set the single-step bit
|
||||
*/
|
||||
void user_enable_single_step(struct task_struct *child)
|
||||
{
|
||||
#ifndef CONFIG_MN10300_USING_JTAG
|
||||
struct user *dummy = NULL;
|
||||
@ -129,45 +273,37 @@ void ptrace_enable(struct task_struct *child)
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* make sure the single-step bit is not set
|
||||
*/
|
||||
void user_disable_single_step(struct task_struct *child)
|
||||
{
|
||||
#ifndef CONFIG_MN10300_USING_JTAG
|
||||
struct user *dummy = NULL;
|
||||
long tmp;
|
||||
|
||||
tmp = get_stack_long(child, (unsigned long) &dummy->regs.epsw);
|
||||
tmp &= ~EPSW_T;
|
||||
put_stack_long(child, (unsigned long) &dummy->regs.epsw, tmp);
|
||||
#endif
|
||||
}
|
||||
|
||||
void ptrace_disable(struct task_struct *child)
|
||||
{
|
||||
user_disable_single_step(child);
|
||||
}
|
||||
|
||||
/*
|
||||
* handle the arch-specific side of process tracing
|
||||
*/
|
||||
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
{
|
||||
struct fpu_state_struct fpu_state;
|
||||
int i, ret;
|
||||
unsigned long tmp;
|
||||
int ret;
|
||||
|
||||
switch (request) {
|
||||
/* read the word at location addr. */
|
||||
case PTRACE_PEEKTEXT: {
|
||||
unsigned long tmp;
|
||||
int copied;
|
||||
|
||||
copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
|
||||
ret = -EIO;
|
||||
if (copied != sizeof(tmp))
|
||||
break;
|
||||
ret = put_user(tmp, (unsigned long *) data);
|
||||
break;
|
||||
}
|
||||
|
||||
/* read the word at location addr. */
|
||||
case PTRACE_PEEKDATA: {
|
||||
unsigned long tmp;
|
||||
int copied;
|
||||
|
||||
copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
|
||||
ret = -EIO;
|
||||
if (copied != sizeof(tmp))
|
||||
break;
|
||||
ret = put_user(tmp, (unsigned long *) data);
|
||||
break;
|
||||
}
|
||||
|
||||
/* read the word at location addr in the USER area. */
|
||||
case PTRACE_PEEKUSR: {
|
||||
unsigned long tmp;
|
||||
|
||||
case PTRACE_PEEKUSR:
|
||||
ret = -EIO;
|
||||
if ((addr & 3) || addr < 0 ||
|
||||
addr > sizeof(struct user) - 3)
|
||||
@ -179,17 +315,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
ptrace_regid_to_frame[addr]);
|
||||
ret = put_user(tmp, (unsigned long *) data);
|
||||
break;
|
||||
}
|
||||
|
||||
/* write the word at location addr. */
|
||||
case PTRACE_POKETEXT:
|
||||
case PTRACE_POKEDATA:
|
||||
if (access_process_vm(child, addr, &data, sizeof(data), 1) ==
|
||||
sizeof(data))
|
||||
ret = 0;
|
||||
else
|
||||
ret = -EIO;
|
||||
break;
|
||||
|
||||
/* write the word at location addr in the USER area */
|
||||
case PTRACE_POKEUSR:
|
||||
@ -204,132 +329,32 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
data);
|
||||
break;
|
||||
|
||||
/* continue and stop at next (return from) syscall */
|
||||
case PTRACE_SYSCALL:
|
||||
/* restart after signal. */
|
||||
case PTRACE_CONT:
|
||||
ret = -EIO;
|
||||
if ((unsigned long) data > _NSIG)
|
||||
break;
|
||||
if (request == PTRACE_SYSCALL)
|
||||
set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
|
||||
else
|
||||
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
|
||||
child->exit_code = data;
|
||||
ptrace_disable(child);
|
||||
wake_up_process(child);
|
||||
ret = 0;
|
||||
break;
|
||||
case PTRACE_GETREGS: /* Get all integer regs from the child. */
|
||||
return copy_regset_to_user(child, &user_mn10300_native_view,
|
||||
REGSET_GENERAL,
|
||||
0, NR_PTREGS * sizeof(long),
|
||||
(void __user *)data);
|
||||
|
||||
/*
|
||||
* make the child exit
|
||||
* - the best I can do is send it a sigkill
|
||||
* - perhaps it should be put in the status that it wants to
|
||||
* exit
|
||||
*/
|
||||
case PTRACE_KILL:
|
||||
ret = 0;
|
||||
if (child->exit_state == EXIT_ZOMBIE) /* already dead */
|
||||
break;
|
||||
child->exit_code = SIGKILL;
|
||||
clear_tsk_thread_flag(child, TIF_SINGLESTEP);
|
||||
ptrace_disable(child);
|
||||
wake_up_process(child);
|
||||
break;
|
||||
case PTRACE_SETREGS: /* Set all integer regs in the child. */
|
||||
return copy_regset_from_user(child, &user_mn10300_native_view,
|
||||
REGSET_GENERAL,
|
||||
0, NR_PTREGS * sizeof(long),
|
||||
(const void __user *)data);
|
||||
|
||||
case PTRACE_SINGLESTEP: /* set the trap flag. */
|
||||
#ifndef CONFIG_MN10300_USING_JTAG
|
||||
ret = -EIO;
|
||||
if ((unsigned long) data > _NSIG)
|
||||
break;
|
||||
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
|
||||
ptrace_enable(child);
|
||||
child->exit_code = data;
|
||||
wake_up_process(child);
|
||||
ret = 0;
|
||||
#else
|
||||
ret = -EINVAL;
|
||||
#endif
|
||||
break;
|
||||
case PTRACE_GETFPREGS: /* Get the child FPU state. */
|
||||
return copy_regset_to_user(child, &user_mn10300_native_view,
|
||||
REGSET_FPU,
|
||||
0, sizeof(struct fpu_state_struct),
|
||||
(void __user *)data);
|
||||
|
||||
case PTRACE_DETACH: /* detach a process that was attached. */
|
||||
ret = ptrace_detach(child, data);
|
||||
break;
|
||||
|
||||
/* Get all gp regs from the child. */
|
||||
case PTRACE_GETREGS: {
|
||||
unsigned long tmp;
|
||||
|
||||
if (!access_ok(VERIFY_WRITE, (unsigned *) data, NR_PTREGS << 2)) {
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < NR_PTREGS << 2; i += 4) {
|
||||
tmp = get_stack_long(child, ptrace_regid_to_frame[i]);
|
||||
__put_user(tmp, (unsigned long *) data);
|
||||
data += sizeof(tmp);
|
||||
}
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
case PTRACE_SETREGS: { /* Set all gp regs in the child. */
|
||||
unsigned long tmp;
|
||||
|
||||
if (!access_ok(VERIFY_READ, (unsigned long *)data,
|
||||
sizeof(struct pt_regs))) {
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
for (i = 0; i < NR_PTREGS << 2; i += 4) {
|
||||
__get_user(tmp, (unsigned long *) data);
|
||||
put_stack_long(child, ptrace_regid_to_frame[i], tmp);
|
||||
data += sizeof(tmp);
|
||||
}
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
case PTRACE_GETFPREGS: { /* Get the child FPU state. */
|
||||
if (is_using_fpu(child)) {
|
||||
unlazy_fpu(child);
|
||||
fpu_state = child->thread.fpu_state;
|
||||
} else {
|
||||
memset(&fpu_state, 0, sizeof(fpu_state));
|
||||
}
|
||||
|
||||
ret = -EIO;
|
||||
if (copy_to_user((void *) data, &fpu_state,
|
||||
sizeof(fpu_state)) == 0)
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
case PTRACE_SETFPREGS: { /* Set the child FPU state. */
|
||||
ret = -EFAULT;
|
||||
if (copy_from_user(&fpu_state, (const void *) data,
|
||||
sizeof(fpu_state)) == 0) {
|
||||
fpu_kill_state(child);
|
||||
child->thread.fpu_state = fpu_state;
|
||||
set_using_fpu(child);
|
||||
ret = 0;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case PTRACE_SETOPTIONS: {
|
||||
if (data & PTRACE_O_TRACESYSGOOD)
|
||||
child->ptrace |= PT_TRACESYSGOOD;
|
||||
else
|
||||
child->ptrace &= ~PT_TRACESYSGOOD;
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
case PTRACE_SETFPREGS: /* Set the child FPU state. */
|
||||
return copy_regset_from_user(child, &user_mn10300_native_view,
|
||||
REGSET_FPU,
|
||||
0, sizeof(struct fpu_state_struct),
|
||||
(const void __user *)data);
|
||||
|
||||
default:
|
||||
ret = -EIO;
|
||||
ret = ptrace_request(child, request, addr, data);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -337,43 +362,26 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
}
|
||||
|
||||
/*
|
||||
* notification of system call entry/exit
|
||||
* - triggered by current->work.syscall_trace
|
||||
* handle tracing of system call entry
|
||||
* - return the revised system call number or ULONG_MAX to cause ENOSYS
|
||||
*/
|
||||
asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit)
|
||||
asmlinkage unsigned long syscall_trace_entry(struct pt_regs *regs)
|
||||
{
|
||||
#if 0
|
||||
/* just in case... */
|
||||
printk(KERN_DEBUG "[%d] syscall_%lu(%lx,%lx,%lx,%lx) = %lx\n",
|
||||
current->pid,
|
||||
regs->orig_d0,
|
||||
regs->a0,
|
||||
regs->d1,
|
||||
regs->a3,
|
||||
regs->a2,
|
||||
regs->d0);
|
||||
return;
|
||||
#endif
|
||||
if (tracehook_report_syscall_entry(regs))
|
||||
/* tracing decided this syscall should not happen, so
|
||||
* We'll return a bogus call number to get an ENOSYS
|
||||
* error, but leave the original number in
|
||||
* regs->orig_d0
|
||||
*/
|
||||
return ULONG_MAX;
|
||||
|
||||
if (!test_thread_flag(TIF_SYSCALL_TRACE) &&
|
||||
!test_thread_flag(TIF_SINGLESTEP))
|
||||
return;
|
||||
if (!(current->ptrace & PT_PTRACED))
|
||||
return;
|
||||
|
||||
/* the 0x80 provides a way for the tracing parent to distinguish
|
||||
between a syscall stop and SIGTRAP delivery */
|
||||
ptrace_notify(SIGTRAP |
|
||||
((current->ptrace & PT_TRACESYSGOOD) &&
|
||||
!test_thread_flag(TIF_SINGLESTEP) ? 0x80 : 0));
|
||||
|
||||
/*
|
||||
* this isn't the same as continuing with a signal, but it will do
|
||||
* for normal use. strace only continues with a signal if the
|
||||
* stopping signal is not SIGTRAP. -brl
|
||||
*/
|
||||
if (current->exit_code) {
|
||||
send_sig(current->exit_code, current, 1);
|
||||
current->exit_code = 0;
|
||||
}
|
||||
return regs->orig_d0;
|
||||
}
|
||||
|
||||
/*
|
||||
* handle tracing of system call exit
|
||||
*/
|
||||
asmlinkage void syscall_trace_exit(struct pt_regs *regs)
|
||||
{
|
||||
tracehook_report_syscall_exit(regs, 0);
|
||||
}
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <linux/tty.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/tracehook.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/ucontext.h>
|
||||
#include <asm/uaccess.h>
|
||||
@ -511,6 +512,9 @@ static void do_signal(struct pt_regs *regs)
|
||||
* clear the TIF_RESTORE_SIGMASK flag */
|
||||
if (test_thread_flag(TIF_RESTORE_SIGMASK))
|
||||
clear_thread_flag(TIF_RESTORE_SIGMASK);
|
||||
|
||||
tracehook_signal_handler(signr, &info, &ka, regs,
|
||||
test_thread_flag(TIF_SINGLESTEP));
|
||||
}
|
||||
|
||||
return;
|
||||
@ -561,4 +565,9 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
|
||||
/* deal with pending signal delivery */
|
||||
if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK))
|
||||
do_signal(regs);
|
||||
|
||||
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
|
||||
clear_thread_flag(TIF_NOTIFY_RESUME);
|
||||
tracehook_notify_resume(__frame);
|
||||
}
|
||||
}
|
||||
|
@ -165,24 +165,6 @@ ENTRY(itlb_aerror)
|
||||
ENTRY(dtlb_aerror)
|
||||
and ~EPSW_NMID,epsw
|
||||
add -4,sp
|
||||
mov d1,(sp)
|
||||
|
||||
movhu (MMUFCR_DFC),d1 # is it the initial valid write
|
||||
# to this page?
|
||||
and MMUFCR_xFC_INITWR,d1
|
||||
beq dtlb_pagefault # jump if not
|
||||
|
||||
mov (DPTEL),d1 # set the dirty bit
|
||||
# (don't replace with BSET!)
|
||||
or _PAGE_DIRTY,d1
|
||||
mov d1,(DPTEL)
|
||||
mov (sp),d1
|
||||
add 4,sp
|
||||
rti
|
||||
|
||||
ALIGN
|
||||
dtlb_pagefault:
|
||||
mov (sp),d1
|
||||
SAVE_ALL
|
||||
add -4,sp # need to pass three params
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user