forked from Minki/linux
Merge remote-tracking branch 'airlied/drm-next' into drm-intel-next-queued
Pull in Dave's drm-next pull request to have a clean base for 4.6. Also, we need the various atomic state extensions Maarten recently created. Conflicts are just adjacent changes that all resolve to nothing in git diff. Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
This commit is contained in:
commit
2da80b57c6
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,54 @@
|
||||
Etnaviv DRM master device
|
||||
=========================
|
||||
|
||||
The Etnaviv DRM master device is a virtual device needed to list all
|
||||
Vivante GPU cores that comprise the GPU subsystem.
|
||||
|
||||
Required properties:
|
||||
- compatible: Should be one of
|
||||
"fsl,imx-gpu-subsystem"
|
||||
"marvell,dove-gpu-subsystem"
|
||||
- cores: Should contain a list of phandles pointing to Vivante GPU devices
|
||||
|
||||
example:
|
||||
|
||||
gpu-subsystem {
|
||||
compatible = "fsl,imx-gpu-subsystem";
|
||||
cores = <&gpu_2d>, <&gpu_3d>;
|
||||
};
|
||||
|
||||
|
||||
Vivante GPU core devices
|
||||
========================
|
||||
|
||||
Required properties:
|
||||
- compatible: Should be "vivante,gc"
|
||||
A more specific compatible is not needed, as the cores contain chip
|
||||
identification registers at fixed locations, which provide all the
|
||||
necessary information to the driver.
|
||||
- reg: should be register base and length as documented in the
|
||||
datasheet
|
||||
- interrupts: Should contain the cores interrupt line
|
||||
- clocks: should contain one clock for entry in clock-names
|
||||
see Documentation/devicetree/bindings/clock/clock-bindings.txt
|
||||
- clock-names:
|
||||
- "bus": AXI/register clock
|
||||
- "core": GPU core clock
|
||||
- "shader": Shader clock (only required if GPU has feature PIPE_3D)
|
||||
|
||||
Optional properties:
|
||||
- power-domains: a power domain consumer specifier according to
|
||||
Documentation/devicetree/bindings/power/power_domain.txt
|
||||
|
||||
example:
|
||||
|
||||
gpu_3d: gpu@00130000 {
|
||||
compatible = "vivante,gc";
|
||||
reg = <0x00130000 0x4000>;
|
||||
interrupts = <0 9 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&clks IMX6QDL_CLK_GPU3D_AXI>,
|
||||
<&clks IMX6QDL_CLK_GPU3D_CORE>,
|
||||
<&clks IMX6QDL_CLK_GPU3D_SHADER>;
|
||||
clock-names = "bus", "core", "shader";
|
||||
power-domains = <&gpc 1>;
|
||||
};
|
@ -1,3 +1,20 @@
|
||||
Device-Tree bindings for Samsung Exynos Embedded DisplayPort Transmitter(eDP)
|
||||
|
||||
DisplayPort is industry standard to accommodate the growing board adoption
|
||||
of digital display technology within the PC and CE industries.
|
||||
It consolidates the internal and external connection methods to reduce device
|
||||
complexity and cost. It also supports necessary features for important cross
|
||||
industry applications and provides performance scalability to enable the next
|
||||
generation of displays that feature higher color depths, refresh rates, and
|
||||
display resolutions.
|
||||
|
||||
eDP (embedded display port) device is compliant with Embedded DisplayPort
|
||||
standard as follows,
|
||||
- DisplayPort standard 1.1a for Exynos5250 and Exynos5260.
|
||||
- DisplayPort standard 1.3 for Exynos5422s and Exynos5800.
|
||||
|
||||
eDP resides between FIMD and panel or FIMD and bridge such as LVDS.
|
||||
|
||||
The Exynos display port interface should be configured based on
|
||||
the type of panel connected to it.
|
||||
|
||||
@ -66,8 +83,15 @@ Optional properties for dp-controller:
|
||||
Hotplug detect GPIO.
|
||||
Indicates which GPIO should be used for hotplug
|
||||
detection
|
||||
-video interfaces: Device node can contain video interface port
|
||||
nodes according to [1].
|
||||
Video interfaces:
|
||||
Device node can contain video interface port nodes according to [1].
|
||||
The following are properties specific to those nodes:
|
||||
|
||||
endpoint node connected to bridge or panel node:
|
||||
- remote-endpoint: specifies the endpoint in panel or bridge node.
|
||||
This node is required in all kinds of exynos dp
|
||||
to represent the connection between dp and bridge
|
||||
or dp and panel.
|
||||
|
||||
[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
|
||||
|
||||
@ -111,9 +135,18 @@ Board Specific portion:
|
||||
};
|
||||
|
||||
ports {
|
||||
port@0 {
|
||||
port {
|
||||
dp_out: endpoint {
|
||||
remote-endpoint = <&bridge_in>;
|
||||
remote-endpoint = <&dp_in>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
panel {
|
||||
...
|
||||
port {
|
||||
dp_in: endpoint {
|
||||
remote-endpoint = <&dp_out>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -14,17 +14,20 @@ Required properties:
|
||||
- clocks: device clocks
|
||||
See Documentation/devicetree/bindings/clocks/clock-bindings.txt for details.
|
||||
- clock-names: the following clocks are required:
|
||||
* "bus_clk"
|
||||
* "byte_clk"
|
||||
* "core_clk"
|
||||
* "core_mmss_clk"
|
||||
* "iface_clk"
|
||||
* "mdp_core_clk"
|
||||
* "iface_clk"
|
||||
* "bus_clk"
|
||||
* "core_mmss_clk"
|
||||
* "byte_clk"
|
||||
* "pixel_clk"
|
||||
* "core_clk"
|
||||
For DSIv2, we need an additional clock:
|
||||
* "src_clk"
|
||||
- vdd-supply: phandle to vdd regulator device node
|
||||
- vddio-supply: phandle to vdd-io regulator device node
|
||||
- vdda-supply: phandle to vdda regulator device node
|
||||
- qcom,dsi-phy: phandle to DSI PHY device node
|
||||
- syscon-sfpb: A phandle to mmss_sfpb syscon node (only for DSIv2)
|
||||
|
||||
Optional properties:
|
||||
- panel@0: Node of panel connected to this DSI controller.
|
||||
@ -51,6 +54,7 @@ Required properties:
|
||||
* "qcom,dsi-phy-28nm-hpm"
|
||||
* "qcom,dsi-phy-28nm-lp"
|
||||
* "qcom,dsi-phy-20nm"
|
||||
* "qcom,dsi-phy-28nm-8960"
|
||||
- reg: Physical base address and length of the registers of PLL, PHY and PHY
|
||||
regulator
|
||||
- reg-names: The names of register regions. The following regions are required:
|
||||
|
@ -2,18 +2,28 @@ Qualcomm adreno/snapdragon display controller
|
||||
|
||||
Required properties:
|
||||
- compatible:
|
||||
* "qcom,mdp" - mdp4
|
||||
* "qcom,mdp4" - mdp4
|
||||
* "qcom,mdp5" - mdp5
|
||||
- reg: Physical base address and length of the controller's registers.
|
||||
- interrupts: The interrupt signal from the display controller.
|
||||
- connectors: array of phandles for output device(s)
|
||||
- clocks: device clocks
|
||||
See ../clocks/clock-bindings.txt for details.
|
||||
- clock-names: the following clocks are required:
|
||||
* "core_clk"
|
||||
* "iface_clk"
|
||||
* "src_clk"
|
||||
* "hdmi_clk"
|
||||
* "mpd_clk"
|
||||
- clock-names: the following clocks are required.
|
||||
For MDP4:
|
||||
* "core_clk"
|
||||
* "iface_clk"
|
||||
* "lut_clk"
|
||||
* "src_clk"
|
||||
* "hdmi_clk"
|
||||
* "mdp_clk"
|
||||
For MDP5:
|
||||
* "bus_clk"
|
||||
* "iface_clk"
|
||||
* "core_clk_src"
|
||||
* "core_clk"
|
||||
* "lut_clk" (some MDP5 versions may not need this)
|
||||
* "vsync_clk"
|
||||
|
||||
Optional properties:
|
||||
- gpus: phandle for gpu device
|
||||
@ -26,7 +36,7 @@ Example:
|
||||
...
|
||||
|
||||
mdp: qcom,mdp@5100000 {
|
||||
compatible = "qcom,mdp";
|
||||
compatible = "qcom,mdp4";
|
||||
reg = <0x05100000 0xf0000>;
|
||||
interrupts = <GIC_SPI 75 0>;
|
||||
connectors = <&hdmi>;
|
||||
|
@ -0,0 +1,7 @@
|
||||
Boe Corporation 8.0" WUXGA TFT LCD panel
|
||||
|
||||
Required properties:
|
||||
- compatible: should be "boe,tv080wum-nl0"
|
||||
|
||||
This binding is compatible with the simple-panel binding, which is specified
|
||||
in simple-panel.txt in this directory.
|
@ -0,0 +1,7 @@
|
||||
Innolux Corporation 12.1" G121X1-L03 XGA (1024x768) TFT LCD panel
|
||||
|
||||
Required properties:
|
||||
- compatible: should be "innolux,g121x1-l03"
|
||||
|
||||
This binding is compatible with the simple-panel binding, which is specified
|
||||
in simple-panel.txt in this directory.
|
@ -0,0 +1,7 @@
|
||||
Kyocera Corporation 12.1" XGA (1024x768) TFT LCD panel
|
||||
|
||||
Required properties:
|
||||
- compatible: should be "kyo,tcg121xglp"
|
||||
|
||||
This binding is compatible with the simple-panel binding, which is specified
|
||||
in simple-panel.txt in this directory.
|
@ -0,0 +1,20 @@
|
||||
Panasonic 10" WUXGA TFT LCD panel
|
||||
|
||||
Required properties:
|
||||
- compatible: should be "panasonic,vvx10f034n00"
|
||||
- reg: DSI virtual channel of the peripheral
|
||||
- power-supply: phandle of the regulator that provides the supply voltage
|
||||
|
||||
Optional properties:
|
||||
- backlight: phandle of the backlight device attached to the panel
|
||||
|
||||
Example:
|
||||
|
||||
mdss_dsi@fd922800 {
|
||||
panel@0 {
|
||||
compatible = "panasonic,vvx10f034n00";
|
||||
reg = <0>;
|
||||
power-supply = <&vreg_vsp>;
|
||||
backlight = <&lp8566_wled>;
|
||||
};
|
||||
};
|
@ -0,0 +1,7 @@
|
||||
QiaoDian XianShi Corporation 4"3 TFT LCD panel
|
||||
|
||||
Required properties:
|
||||
- compatible: should be "qiaodian,qd43003c0-40"
|
||||
|
||||
This binding is compatible with the simple-panel binding, which is specified
|
||||
in simple-panel.txt in this directory.
|
@ -0,0 +1,22 @@
|
||||
Sharp Microelectronics 4.3" qHD TFT LCD panel
|
||||
|
||||
Required properties:
|
||||
- compatible: should be "sharp,ls043t1le01-qhd"
|
||||
- reg: DSI virtual channel of the peripheral
|
||||
- power-supply: phandle of the regulator that provides the supply voltage
|
||||
|
||||
Optional properties:
|
||||
- backlight: phandle of the backlight device attached to the panel
|
||||
- reset-gpios: a GPIO spec for the reset pin
|
||||
|
||||
Example:
|
||||
|
||||
mdss_dsi@fd922800 {
|
||||
panel@0 {
|
||||
compatible = "sharp,ls043t1le01-qhd";
|
||||
reg = <0>;
|
||||
avdd-supply = <&pm8941_l22>;
|
||||
backlight = <&pm8941_wled>;
|
||||
reset-gpios = <&pm8941_gpios 19 GPIO_ACTIVE_HIGH>;
|
||||
};
|
||||
};
|
@ -0,0 +1,60 @@
|
||||
Rockchip specific extensions to the Synopsys Designware MIPI DSI
|
||||
================================
|
||||
|
||||
Required properties:
|
||||
- #address-cells: Should be <1>.
|
||||
- #size-cells: Should be <0>.
|
||||
- compatible: "rockchip,rk3288-mipi-dsi", "snps,dw-mipi-dsi".
|
||||
- reg: Represent the physical address range of the controller.
|
||||
- interrupts: Represent the controller's interrupt to the CPU(s).
|
||||
- clocks, clock-names: Phandles to the controller's pll reference
|
||||
clock(ref) and APB clock(pclk), as described in [1].
|
||||
- rockchip,grf: this soc should set GRF regs to mux vopl/vopb.
|
||||
- ports: contain a port node with endpoint definitions as defined in [2].
|
||||
For vopb,set the reg = <0> and set the reg = <1> for vopl.
|
||||
|
||||
[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
|
||||
[2] Documentation/devicetree/bindings/media/video-interfaces.txt
|
||||
|
||||
Example:
|
||||
mipi_dsi: mipi@ff960000 {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
compatible = "rockchip,rk3288-mipi-dsi", "snps,dw-mipi-dsi";
|
||||
reg = <0xff960000 0x4000>;
|
||||
interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&cru SCLK_MIPI_24M>, <&cru PCLK_MIPI_DSI0>;
|
||||
clock-names = "ref", "pclk";
|
||||
rockchip,grf = <&grf>;
|
||||
status = "okay";
|
||||
|
||||
ports {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
reg = <1>;
|
||||
|
||||
mipi_in: port {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
mipi_in_vopb: endpoint@0 {
|
||||
reg = <0>;
|
||||
remote-endpoint = <&vopb_out_mipi>;
|
||||
};
|
||||
mipi_in_vopl: endpoint@1 {
|
||||
reg = <1>;
|
||||
remote-endpoint = <&vopl_out_mipi>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
panel {
|
||||
compatible ="boe,tv080wum-nl0";
|
||||
reg = <0>;
|
||||
|
||||
enable-gpios = <&gpio7 3 GPIO_ACTIVE_HIGH>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&lcd_en>;
|
||||
backlight = <&backlight>;
|
||||
status = "okay";
|
||||
};
|
||||
};
|
@ -7,6 +7,7 @@ buffer to an external LCD interface.
|
||||
Required properties:
|
||||
- compatible: value should be one of the following
|
||||
"rockchip,rk3288-vop";
|
||||
"rockchip,rk3036-vop";
|
||||
|
||||
- interrupts: should contain a list of all VOP IP block interrupts in the
|
||||
order: VSYNC, LCD_SYSTEM. The interrupt specifier
|
||||
|
@ -22,8 +22,7 @@ Required properties:
|
||||
Optional properties:
|
||||
- ti,hwmods: Name of the hwmods associated to the eDMA CC
|
||||
- ti,edma-memcpy-channels: List of channels allocated to be used for memcpy, iow
|
||||
these channels will be SW triggered channels. The list must
|
||||
contain 16 bits numbers, see example.
|
||||
these channels will be SW triggered channels. See example.
|
||||
- ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by
|
||||
the driver, they are allocated to be used by for example the
|
||||
DSP. See example.
|
||||
@ -56,10 +55,9 @@ edma: edma@49000000 {
|
||||
ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 7>, <&edma_tptc2 0>;
|
||||
|
||||
/* Channel 20 and 21 is allocated for memcpy */
|
||||
ti,edma-memcpy-channels = /bits/ 16 <20 21>;
|
||||
/* The following PaRAM slots are reserved: 35-45 and 100-110 */
|
||||
ti,edma-reserved-slot-ranges = /bits/ 16 <35 10>,
|
||||
/bits/ 16 <100 10>;
|
||||
ti,edma-memcpy-channels = <20 21>;
|
||||
/* The following PaRAM slots are reserved: 35-44 and 100-109 */
|
||||
ti,edma-reserved-slot-ranges = <35 10>, <100 10>;
|
||||
};
|
||||
|
||||
edma_tptc0: tptc@49800000 {
|
||||
|
@ -11,6 +11,10 @@ Required properties:
|
||||
0 = active high
|
||||
1 = active low
|
||||
|
||||
Optional properties:
|
||||
- little-endian : GPIO registers are used as little endian. If not
|
||||
present registers are used as big endian by default.
|
||||
|
||||
Example:
|
||||
|
||||
gpio0: gpio@1100 {
|
||||
|
@ -12,7 +12,7 @@ Each key is represented as a sub-node of "allwinner,sun4i-a10-lradc-keys":
|
||||
Required subnode-properties:
|
||||
- label: Descriptive name of the key.
|
||||
- linux,code: Keycode to emit.
|
||||
- channel: Channel this key is attached to, mut be 0 or 1.
|
||||
- channel: Channel this key is attached to, must be 0 or 1.
|
||||
- voltage: Voltage in µV at lradc input when this key is pressed.
|
||||
|
||||
Example:
|
||||
|
@ -7,6 +7,10 @@ Required properties:
|
||||
- reg: should contain G-Scaler physical address location and length.
|
||||
- interrupts: should contain G-Scaler interrupt number
|
||||
|
||||
Optional properties:
|
||||
- samsung,sysreg: handle to syscon used to control the system registers to
|
||||
set writeback input and destination
|
||||
|
||||
Example:
|
||||
|
||||
gsc_0: gsc@0x13e00000 {
|
||||
|
@ -6,7 +6,9 @@ used for what purposes, but which don't use an on-flash partition table such
|
||||
as RedBoot.
|
||||
|
||||
The partition table should be a subnode of the mtd node and should be named
|
||||
'partitions'. Partitions are defined in subnodes of the partitions node.
|
||||
'partitions'. This node should have the following property:
|
||||
- compatible : (required) must be "fixed-partitions"
|
||||
Partitions are then defined in subnodes of the partitions node.
|
||||
|
||||
For backwards compatibility partitions as direct subnodes of the mtd device are
|
||||
supported. This use is discouraged.
|
||||
@ -36,6 +38,7 @@ Examples:
|
||||
|
||||
flash@0 {
|
||||
partitions {
|
||||
compatible = "fixed-partitions";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
@ -53,6 +56,7 @@ flash@0 {
|
||||
|
||||
flash@1 {
|
||||
partitions {
|
||||
compatible = "fixed-partitions";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <2>;
|
||||
|
||||
@ -66,6 +70,7 @@ flash@1 {
|
||||
|
||||
flash@2 {
|
||||
partitions {
|
||||
compatible = "fixed-partitions";
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
|
||||
|
@ -33,6 +33,7 @@ auo AU Optronics Corporation
|
||||
avago Avago Technologies
|
||||
avic Shanghai AVIC Optoelectronics Co., Ltd.
|
||||
axis Axis Communications AB
|
||||
boe BOE Technology Group Co., Ltd.
|
||||
bosch Bosch Sensortec GmbH
|
||||
boundary Boundary Devices Inc.
|
||||
brcm Broadcom Corporation
|
||||
@ -123,6 +124,7 @@ jedec JEDEC Solid State Technology Association
|
||||
karo Ka-Ro electronics GmbH
|
||||
keymile Keymile GmbH
|
||||
kinetic Kinetic Technologies
|
||||
kyo Kyocera Corporation
|
||||
lacie LaCie
|
||||
lantiq Lantiq Semiconductor
|
||||
lenovo Lenovo Group Ltd.
|
||||
@ -180,6 +182,7 @@ qca Qualcomm Atheros, Inc.
|
||||
qcom Qualcomm Technologies, Inc
|
||||
qemu QEMU, a generic and open source machine emulator and virtualizer
|
||||
qi Qi Hardware
|
||||
qiaodian QiaoDian XianShi Corporation
|
||||
qnap QNAP Systems, Inc.
|
||||
radxa Radxa
|
||||
raidsonic RaidSonic Technology GmbH
|
||||
@ -238,6 +241,7 @@ v3 V3 Semiconductor
|
||||
variscite Variscite Ltd.
|
||||
via VIA Technologies, Inc.
|
||||
virtio Virtual I/O Device Specification, developed by the OASIS consortium
|
||||
vivante Vivante Corporation
|
||||
voipac Voipac Technologies s.r.o.
|
||||
wexler Wexler
|
||||
winbond Winbond Electronics corp.
|
||||
|
@ -181,17 +181,3 @@ For general information, go to the Intel support website at:
|
||||
If an issue is identified with the released source code on the supported
|
||||
kernel with a supported adapter, email the specific information related to the
|
||||
issue to e1000-devel@lists.sourceforge.net.
|
||||
|
||||
|
||||
License
|
||||
=======
|
||||
|
||||
This software program is released under the terms of a license agreement
|
||||
between you ('Licensee') and Intel. Do not use or load this software or any
|
||||
associated materials (collectively, the 'Software') until you have carefully
|
||||
read the full terms and conditions of the file COPYING located in this software
|
||||
package. By loading or using the Software, you agree to the terms of this
|
||||
Agreement. If you do not agree with the terms of this Agreement, do not install
|
||||
or use the Software.
|
||||
|
||||
* Other names and brands may be claimed as the property of others.
|
||||
|
29
MAINTAINERS
29
MAINTAINERS
@ -2975,6 +2975,7 @@ F: kernel/cpuset.c
|
||||
CONTROL GROUP - MEMORY RESOURCE CONTROLLER (MEMCG)
|
||||
M: Johannes Weiner <hannes@cmpxchg.org>
|
||||
M: Michal Hocko <mhocko@kernel.org>
|
||||
M: Vladimir Davydov <vdavydov@virtuozzo.com>
|
||||
L: cgroups@vger.kernel.org
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
@ -3743,6 +3744,15 @@ S: Maintained
|
||||
F: drivers/gpu/drm/sti
|
||||
F: Documentation/devicetree/bindings/display/st,stih4xx.txt
|
||||
|
||||
DRM DRIVERS FOR VIVANTE GPU IP
|
||||
M: Lucas Stach <l.stach@pengutronix.de>
|
||||
R: Russell King <linux+etnaviv@arm.linux.org.uk>
|
||||
R: Christian Gmeiner <christian.gmeiner@gmail.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
F: drivers/gpu/drm/etnaviv
|
||||
F: Documentation/devicetree/bindings/display/etnaviv
|
||||
|
||||
DSBR100 USB FM RADIO DRIVER
|
||||
M: Alexey Klimov <klimov.linux@gmail.com>
|
||||
L: linux-media@vger.kernel.org
|
||||
@ -5577,7 +5587,7 @@ R: Jesse Brandeburg <jesse.brandeburg@intel.com>
|
||||
R: Shannon Nelson <shannon.nelson@intel.com>
|
||||
R: Carolyn Wyborny <carolyn.wyborny@intel.com>
|
||||
R: Don Skidmore <donald.c.skidmore@intel.com>
|
||||
R: Matthew Vick <matthew.vick@intel.com>
|
||||
R: Bruce Allan <bruce.w.allan@intel.com>
|
||||
R: John Ronciak <john.ronciak@intel.com>
|
||||
R: Mitch Williams <mitch.a.williams@intel.com>
|
||||
L: intel-wired-lan@lists.osuosl.org
|
||||
@ -8286,7 +8296,7 @@ F: include/linux/delayacct.h
|
||||
F: kernel/delayacct.c
|
||||
|
||||
PERFORMANCE EVENTS SUBSYSTEM
|
||||
M: Peter Zijlstra <a.p.zijlstra@chello.nl>
|
||||
M: Peter Zijlstra <peterz@infradead.org>
|
||||
M: Ingo Molnar <mingo@redhat.com>
|
||||
M: Arnaldo Carvalho de Melo <acme@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
@ -8379,6 +8389,14 @@ L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: drivers/pinctrl/samsung/
|
||||
|
||||
PIN CONTROLLER - SINGLE
|
||||
M: Tony Lindgren <tony@atomide.com>
|
||||
M: Haojian Zhuang <haojian.zhuang@linaro.org>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: linux-omap@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/pinctrl/pinctrl-single.c
|
||||
|
||||
PIN CONTROLLER - ST SPEAR
|
||||
M: Viresh Kumar <vireshk@kernel.org>
|
||||
L: spear-devel@list.st.com
|
||||
@ -8945,6 +8963,13 @@ F: drivers/rpmsg/
|
||||
F: Documentation/rpmsg.txt
|
||||
F: include/linux/rpmsg.h
|
||||
|
||||
RENESAS ETHERNET DRIVERS
|
||||
R: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
|
||||
L: netdev@vger.kernel.org
|
||||
L: linux-sh@vger.kernel.org
|
||||
F: drivers/net/ethernet/renesas/
|
||||
F: include/linux/sh_eth.h
|
||||
|
||||
RESET CONTROLLER FRAMEWORK
|
||||
M: Philipp Zabel <p.zabel@pengutronix.de>
|
||||
S: Maintained
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -445,6 +445,7 @@ config LINUX_LINK_BASE
|
||||
However some customers have peripherals mapped at this addr, so
|
||||
Linux needs to be scooted a bit.
|
||||
If you don't know what the above means, leave this setting alone.
|
||||
This needs to match memory start address specified in Device Tree
|
||||
|
||||
config HIGHMEM
|
||||
bool "High Memory Support"
|
||||
|
@ -46,6 +46,7 @@
|
||||
snps,pbl = < 32 >;
|
||||
clocks = <&apbclk>;
|
||||
clock-names = "stmmaceth";
|
||||
max-speed = <100>;
|
||||
};
|
||||
|
||||
ehci@0x40000 {
|
||||
|
@ -17,7 +17,8 @@
|
||||
|
||||
memory {
|
||||
device_type = "memory";
|
||||
reg = <0x0 0x80000000 0x0 0x40000000 /* 1 GB low mem */
|
||||
/* CONFIG_LINUX_LINK_BASE needs to match low mem start */
|
||||
reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MB low mem */
|
||||
0x1 0x00000000 0x0 0x40000000>; /* 1 GB highmem */
|
||||
};
|
||||
|
||||
|
@ -23,7 +23,7 @@
|
||||
* @dt_compat: Array of device tree 'compatible' strings
|
||||
* (XXX: although only 1st entry is looked at)
|
||||
* @init_early: Very early callback [called from setup_arch()]
|
||||
* @init_cpu_smp: for each CPU as it is coming up (SMP as well as UP)
|
||||
* @init_per_cpu: for each CPU as it is coming up (SMP as well as UP)
|
||||
* [(M):init_IRQ(), (o):start_kernel_secondary()]
|
||||
* @init_machine: arch initcall level callback (e.g. populate static
|
||||
* platform devices or parse Devicetree)
|
||||
@ -35,7 +35,7 @@ struct machine_desc {
|
||||
const char **dt_compat;
|
||||
void (*init_early)(void);
|
||||
#ifdef CONFIG_SMP
|
||||
void (*init_cpu_smp)(unsigned int);
|
||||
void (*init_per_cpu)(unsigned int);
|
||||
#endif
|
||||
void (*init_machine)(void);
|
||||
void (*init_late)(void);
|
||||
|
@ -48,7 +48,7 @@ extern int smp_ipi_irq_setup(int cpu, int irq);
|
||||
* @init_early_smp: A SMP specific h/w block can init itself
|
||||
* Could be common across platforms so not covered by
|
||||
* mach_desc->init_early()
|
||||
* @init_irq_cpu: Called for each core so SMP h/w block driver can do
|
||||
* @init_per_cpu: Called for each core so SMP h/w block driver can do
|
||||
* any needed setup per cpu (e.g. IPI request)
|
||||
* @cpu_kick: For Master to kickstart a cpu (optionally at a PC)
|
||||
* @ipi_send: To send IPI to a @cpu
|
||||
@ -57,7 +57,7 @@ extern int smp_ipi_irq_setup(int cpu, int irq);
|
||||
struct plat_smp_ops {
|
||||
const char *info;
|
||||
void (*init_early_smp)(void);
|
||||
void (*init_irq_cpu)(int cpu);
|
||||
void (*init_per_cpu)(int cpu);
|
||||
void (*cpu_kick)(int cpu, unsigned long pc);
|
||||
void (*ipi_send)(int cpu);
|
||||
void (*ipi_clear)(int irq);
|
||||
|
@ -112,7 +112,6 @@ struct unwind_frame_info {
|
||||
|
||||
extern int arc_unwind(struct unwind_frame_info *frame);
|
||||
extern void arc_unwind_init(void);
|
||||
extern void arc_unwind_setup(void);
|
||||
extern void *unwind_add_table(struct module *module, const void *table_start,
|
||||
unsigned long table_size);
|
||||
extern void unwind_remove_table(void *handle, int init_only);
|
||||
@ -152,9 +151,6 @@ static inline void arc_unwind_init(void)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void arc_unwind_setup(void)
|
||||
{
|
||||
}
|
||||
#define unwind_add_table(a, b, c)
|
||||
#define unwind_remove_table(a, b)
|
||||
|
||||
|
@ -106,10 +106,21 @@ static struct irq_chip arcv2_irq_chip = {
|
||||
static int arcv2_irq_map(struct irq_domain *d, unsigned int irq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
if (irq == TIMER0_IRQ || irq == IPI_IRQ)
|
||||
/*
|
||||
* core intc IRQs [16, 23]:
|
||||
* Statically assigned always private-per-core (Timers, WDT, IPI, PCT)
|
||||
*/
|
||||
if (hw < 24) {
|
||||
/*
|
||||
* A subsequent request_percpu_irq() fails if percpu_devid is
|
||||
* not set. That in turns sets NOAUTOEN, meaning each core needs
|
||||
* to call enable_percpu_irq()
|
||||
*/
|
||||
irq_set_percpu_devid(irq);
|
||||
irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq);
|
||||
else
|
||||
} else {
|
||||
irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -29,11 +29,11 @@ void __init init_IRQ(void)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* a SMP H/w block could do IPI IRQ request here */
|
||||
if (plat_smp_ops.init_irq_cpu)
|
||||
plat_smp_ops.init_irq_cpu(smp_processor_id());
|
||||
if (plat_smp_ops.init_per_cpu)
|
||||
plat_smp_ops.init_per_cpu(smp_processor_id());
|
||||
|
||||
if (machine_desc->init_cpu_smp)
|
||||
machine_desc->init_cpu_smp(smp_processor_id());
|
||||
if (machine_desc->init_per_cpu)
|
||||
machine_desc->init_per_cpu(smp_processor_id());
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -51,6 +51,18 @@ void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
/*
|
||||
* API called for requesting percpu interrupts - called by each CPU
|
||||
* - For boot CPU, actually request the IRQ with genirq core + enables
|
||||
* - For subsequent callers only enable called locally
|
||||
*
|
||||
* Relies on being called by boot cpu first (i.e. request called ahead) of
|
||||
* any enable as expected by genirq. Hence Suitable only for TIMER, IPI
|
||||
* which are guaranteed to be setup on boot core first.
|
||||
* Late probed peripherals such as perf can't use this as there no guarantee
|
||||
* of being called on boot CPU first.
|
||||
*/
|
||||
|
||||
void arc_request_percpu_irq(int irq, int cpu,
|
||||
irqreturn_t (*isr)(int irq, void *dev),
|
||||
const char *irq_nm,
|
||||
@ -60,14 +72,17 @@ void arc_request_percpu_irq(int irq, int cpu,
|
||||
if (!cpu) {
|
||||
int rc;
|
||||
|
||||
#ifdef CONFIG_ISA_ARCOMPACT
|
||||
/*
|
||||
* These 2 calls are essential to making percpu IRQ APIs work
|
||||
* Ideally these details could be hidden in irq chip map function
|
||||
* but the issue is IPIs IRQs being static (non-DT) and platform
|
||||
* specific, so we can't identify them there.
|
||||
* A subsequent request_percpu_irq() fails if percpu_devid is
|
||||
* not set. That in turns sets NOAUTOEN, meaning each core needs
|
||||
* to call enable_percpu_irq()
|
||||
*
|
||||
* For ARCv2, this is done in irq map function since we know
|
||||
* which irqs are strictly per cpu
|
||||
*/
|
||||
irq_set_percpu_devid(irq);
|
||||
irq_modify_status(irq, IRQ_NOAUTOEN, 0); /* @irq, @clr, @set */
|
||||
#endif
|
||||
|
||||
rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev);
|
||||
if (rc)
|
||||
|
@ -132,7 +132,7 @@ static void mcip_probe_n_setup(void)
|
||||
struct plat_smp_ops plat_smp_ops = {
|
||||
.info = smp_cpuinfo_buf,
|
||||
.init_early_smp = mcip_probe_n_setup,
|
||||
.init_irq_cpu = mcip_setup_per_cpu,
|
||||
.init_per_cpu = mcip_setup_per_cpu,
|
||||
.ipi_send = mcip_ipi_send,
|
||||
.ipi_clear = mcip_ipi_clear,
|
||||
};
|
||||
|
@ -428,12 +428,11 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
|
||||
|
||||
#endif /* CONFIG_ISA_ARCV2 */
|
||||
|
||||
void arc_cpu_pmu_irq_init(void)
|
||||
static void arc_cpu_pmu_irq_init(void *data)
|
||||
{
|
||||
struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
|
||||
int irq = *(int *)data;
|
||||
|
||||
arc_request_percpu_irq(arc_pmu->irq, smp_processor_id(), arc_pmu_intr,
|
||||
"ARC perf counters", pmu_cpu);
|
||||
enable_percpu_irq(irq, IRQ_TYPE_NONE);
|
||||
|
||||
/* Clear all pending interrupt flags */
|
||||
write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
|
||||
@ -515,7 +514,6 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
|
||||
|
||||
if (has_interrupts) {
|
||||
int irq = platform_get_irq(pdev, 0);
|
||||
unsigned long flags;
|
||||
|
||||
if (irq < 0) {
|
||||
pr_err("Cannot get IRQ number for the platform\n");
|
||||
@ -524,24 +522,12 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
|
||||
|
||||
arc_pmu->irq = irq;
|
||||
|
||||
/*
|
||||
* arc_cpu_pmu_irq_init() needs to be called on all cores for
|
||||
* their respective local PMU.
|
||||
* However we use opencoded on_each_cpu() to ensure it is called
|
||||
* on core0 first, so that arc_request_percpu_irq() sets up
|
||||
* AUTOEN etc. Otherwise enable_percpu_irq() fails to enable
|
||||
* perf IRQ on non master cores.
|
||||
* see arc_request_percpu_irq()
|
||||
*/
|
||||
preempt_disable();
|
||||
local_irq_save(flags);
|
||||
arc_cpu_pmu_irq_init();
|
||||
local_irq_restore(flags);
|
||||
smp_call_function((smp_call_func_t)arc_cpu_pmu_irq_init, 0, 1);
|
||||
preempt_enable();
|
||||
/* intc map function ensures irq_set_percpu_devid() called */
|
||||
request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters",
|
||||
this_cpu_ptr(&arc_pmu_cpu));
|
||||
|
||||
on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
|
||||
|
||||
/* Clean all pending interrupt flags */
|
||||
write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
|
||||
} else
|
||||
arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
|
||||
|
||||
|
@ -429,7 +429,6 @@ void __init setup_arch(char **cmdline_p)
|
||||
#endif
|
||||
|
||||
arc_unwind_init();
|
||||
arc_unwind_setup();
|
||||
}
|
||||
|
||||
static int __init customize_machine(void)
|
||||
|
@ -132,11 +132,11 @@ void start_kernel_secondary(void)
|
||||
pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
|
||||
|
||||
/* Some SMP H/w setup - for each cpu */
|
||||
if (plat_smp_ops.init_irq_cpu)
|
||||
plat_smp_ops.init_irq_cpu(cpu);
|
||||
if (plat_smp_ops.init_per_cpu)
|
||||
plat_smp_ops.init_per_cpu(cpu);
|
||||
|
||||
if (machine_desc->init_cpu_smp)
|
||||
machine_desc->init_cpu_smp(cpu);
|
||||
if (machine_desc->init_per_cpu)
|
||||
machine_desc->init_per_cpu(cpu);
|
||||
|
||||
arc_local_timer_setup();
|
||||
|
||||
|
@ -170,6 +170,23 @@ static struct unwind_table *find_table(unsigned long pc)
|
||||
|
||||
static unsigned long read_pointer(const u8 **pLoc,
|
||||
const void *end, signed ptrType);
|
||||
static void init_unwind_hdr(struct unwind_table *table,
|
||||
void *(*alloc) (unsigned long));
|
||||
|
||||
/*
|
||||
* wrappers for header alloc (vs. calling one vs. other at call site)
|
||||
* to elide section mismatches warnings
|
||||
*/
|
||||
static void *__init unw_hdr_alloc_early(unsigned long sz)
|
||||
{
|
||||
return __alloc_bootmem_nopanic(sz, sizeof(unsigned int),
|
||||
MAX_DMA_ADDRESS);
|
||||
}
|
||||
|
||||
static void *unw_hdr_alloc(unsigned long sz)
|
||||
{
|
||||
return kmalloc(sz, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static void init_unwind_table(struct unwind_table *table, const char *name,
|
||||
const void *core_start, unsigned long core_size,
|
||||
@ -209,6 +226,8 @@ void __init arc_unwind_init(void)
|
||||
__start_unwind, __end_unwind - __start_unwind,
|
||||
NULL, 0);
|
||||
/*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/
|
||||
|
||||
init_unwind_hdr(&root_table, unw_hdr_alloc_early);
|
||||
}
|
||||
|
||||
static const u32 bad_cie, not_fde;
|
||||
@ -241,8 +260,8 @@ static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size)
|
||||
e2->fde = v;
|
||||
}
|
||||
|
||||
static void __init setup_unwind_table(struct unwind_table *table,
|
||||
void *(*alloc) (unsigned long))
|
||||
static void init_unwind_hdr(struct unwind_table *table,
|
||||
void *(*alloc) (unsigned long))
|
||||
{
|
||||
const u8 *ptr;
|
||||
unsigned long tableSize = table->size, hdrSize;
|
||||
@ -274,13 +293,13 @@ static void __init setup_unwind_table(struct unwind_table *table,
|
||||
const u32 *cie = cie_for_fde(fde, table);
|
||||
signed ptrType;
|
||||
|
||||
if (cie == ¬_fde)
|
||||
if (cie == ¬_fde) /* only process FDE here */
|
||||
continue;
|
||||
if (cie == NULL || cie == &bad_cie)
|
||||
return;
|
||||
continue; /* say FDE->CIE.version != 1 */
|
||||
ptrType = fde_pointer_type(cie);
|
||||
if (ptrType < 0)
|
||||
return;
|
||||
continue;
|
||||
|
||||
ptr = (const u8 *)(fde + 2);
|
||||
if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde,
|
||||
@ -300,9 +319,11 @@ static void __init setup_unwind_table(struct unwind_table *table,
|
||||
|
||||
hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
|
||||
+ 2 * n * sizeof(unsigned long);
|
||||
|
||||
header = alloc(hdrSize);
|
||||
if (!header)
|
||||
return;
|
||||
|
||||
header->version = 1;
|
||||
header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native;
|
||||
header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4;
|
||||
@ -322,6 +343,10 @@ static void __init setup_unwind_table(struct unwind_table *table,
|
||||
|
||||
if (fde[1] == 0xffffffff)
|
||||
continue; /* this is a CIE */
|
||||
|
||||
if (*(u8 *)(cie + 2) != 1)
|
||||
continue; /* FDE->CIE.version not supported */
|
||||
|
||||
ptr = (const u8 *)(fde + 2);
|
||||
header->table[n].start = read_pointer(&ptr,
|
||||
(const u8 *)(fde + 1) +
|
||||
@ -342,18 +367,6 @@ static void __init setup_unwind_table(struct unwind_table *table,
|
||||
table->header = (const void *)header;
|
||||
}
|
||||
|
||||
static void *__init balloc(unsigned long sz)
|
||||
{
|
||||
return __alloc_bootmem_nopanic(sz,
|
||||
sizeof(unsigned int),
|
||||
__pa(MAX_DMA_ADDRESS));
|
||||
}
|
||||
|
||||
void __init arc_unwind_setup(void)
|
||||
{
|
||||
setup_unwind_table(&root_table, balloc);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
|
||||
static struct unwind_table *last_table;
|
||||
@ -377,6 +390,8 @@ void *unwind_add_table(struct module *module, const void *table_start,
|
||||
table_start, table_size,
|
||||
NULL, 0);
|
||||
|
||||
init_unwind_hdr(table, unw_hdr_alloc);
|
||||
|
||||
#ifdef UNWIND_DEBUG
|
||||
unw_debug("Table added for [%s] %lx %lx\n",
|
||||
module->name, table->core.pc, table->core.range);
|
||||
@ -439,6 +454,7 @@ void unwind_remove_table(void *handle, int init_only)
|
||||
info.init_only = init_only;
|
||||
|
||||
unlink_table(&info); /* XXX: SMP */
|
||||
kfree(table->header);
|
||||
kfree(table);
|
||||
}
|
||||
|
||||
@ -507,7 +523,8 @@ static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table)
|
||||
|
||||
if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde)
|
||||
|| (*cie & (sizeof(*cie) - 1))
|
||||
|| (cie[1] != 0xffffffff))
|
||||
|| (cie[1] != 0xffffffff)
|
||||
|| ( *(u8 *)(cie + 2) != 1)) /* version 1 supported */
|
||||
return NULL; /* this is not a (valid) CIE */
|
||||
return cie;
|
||||
}
|
||||
|
@ -51,7 +51,9 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
|
||||
int in_use = 0;
|
||||
|
||||
if (!low_mem_sz) {
|
||||
BUG_ON(base != low_mem_start);
|
||||
if (base != low_mem_start)
|
||||
panic("CONFIG_LINUX_LINK_BASE != DT memory { }");
|
||||
|
||||
low_mem_sz = size;
|
||||
in_use = 1;
|
||||
} else {
|
||||
|
@ -74,7 +74,7 @@
|
||||
reg = <0x48240200 0x100>;
|
||||
interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&gic>;
|
||||
clocks = <&dpll_mpu_m2_ck>;
|
||||
clocks = <&mpu_periphclk>;
|
||||
};
|
||||
|
||||
local_timer: timer@48240600 {
|
||||
@ -82,7 +82,7 @@
|
||||
reg = <0x48240600 0x100>;
|
||||
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-parent = <&gic>;
|
||||
clocks = <&dpll_mpu_m2_ck>;
|
||||
clocks = <&mpu_periphclk>;
|
||||
};
|
||||
|
||||
l2-cache-controller@48242000 {
|
||||
|
@ -259,6 +259,14 @@
|
||||
ti,invert-autoidle-bit;
|
||||
};
|
||||
|
||||
mpu_periphclk: mpu_periphclk {
|
||||
#clock-cells = <0>;
|
||||
compatible = "fixed-factor-clock";
|
||||
clocks = <&dpll_mpu_m2_ck>;
|
||||
clock-mult = <1>;
|
||||
clock-div = <2>;
|
||||
};
|
||||
|
||||
dpll_ddr_ck: dpll_ddr_ck {
|
||||
#clock-cells = <0>;
|
||||
compatible = "ti,am3-dpll-clock";
|
||||
|
@ -184,6 +184,7 @@
|
||||
regulator-name = "VDD_SDHC_1V8";
|
||||
regulator-min-microvolt = <1800000>;
|
||||
regulator-max-microvolt = <1800000>;
|
||||
regulator-always-on;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
@ -118,7 +118,8 @@
|
||||
sdhci0: sdhci@ab0000 {
|
||||
compatible = "mrvl,pxav3-mmc";
|
||||
reg = <0xab0000 0x200>;
|
||||
clocks = <&chip_clk CLKID_SDIO1XIN>;
|
||||
clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO>;
|
||||
clock-names = "io", "core";
|
||||
interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
};
|
||||
@ -126,7 +127,8 @@
|
||||
sdhci1: sdhci@ab0800 {
|
||||
compatible = "mrvl,pxav3-mmc";
|
||||
reg = <0xab0800 0x200>;
|
||||
clocks = <&chip_clk CLKID_SDIO1XIN>;
|
||||
clocks = <&chip_clk CLKID_SDIO1XIN>, <&chip_clk CLKID_SDIO>;
|
||||
clock-names = "io", "core";
|
||||
interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>;
|
||||
status = "disabled";
|
||||
};
|
||||
@ -135,7 +137,7 @@
|
||||
compatible = "mrvl,pxav3-mmc";
|
||||
reg = <0xab1000 0x200>;
|
||||
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&chip_clk CLKID_NFC_ECC>, <&chip_clk CLKID_NFC>;
|
||||
clocks = <&chip_clk CLKID_NFC_ECC>, <&chip_clk CLKID_SDIO>;
|
||||
clock-names = "io", "core";
|
||||
status = "disabled";
|
||||
};
|
||||
|
@ -218,6 +218,7 @@
|
||||
reg = <0x480c8000 0x2000>;
|
||||
interrupts = <77>;
|
||||
ti,hwmods = "mailbox";
|
||||
#mbox-cells = <1>;
|
||||
ti,mbox-num-users = <4>;
|
||||
ti,mbox-num-fifos = <12>;
|
||||
mbox_dsp: mbox_dsp {
|
||||
@ -279,8 +280,11 @@
|
||||
ti,spi-num-cs = <4>;
|
||||
ti,hwmods = "mcspi1";
|
||||
dmas = <&edma 16 &edma 17
|
||||
&edma 18 &edma 19>;
|
||||
dma-names = "tx0", "rx0", "tx1", "rx1";
|
||||
&edma 18 &edma 19
|
||||
&edma 20 &edma 21
|
||||
&edma 22 &edma 23>;
|
||||
dma-names = "tx0", "rx0", "tx1", "rx1",
|
||||
"tx2", "rx2", "tx3", "rx3";
|
||||
};
|
||||
|
||||
mmc1: mmc@48060000 {
|
||||
|
@ -122,6 +122,12 @@
|
||||
compatible = "auo,b133htn01";
|
||||
power-supply = <&tps65090_fet6>;
|
||||
backlight = <&backlight>;
|
||||
|
||||
port {
|
||||
panel_in: endpoint {
|
||||
remote-endpoint = <&dp_out>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
mmc1_pwrseq: mmc1_pwrseq {
|
||||
@ -148,7 +154,14 @@
|
||||
samsung,link-rate = <0x0a>;
|
||||
samsung,lane-count = <2>;
|
||||
samsung,hpd-gpio = <&gpx2 6 GPIO_ACTIVE_HIGH>;
|
||||
panel = <&panel>;
|
||||
|
||||
ports {
|
||||
port {
|
||||
dp_out: endpoint {
|
||||
remote-endpoint = <&panel_in>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
&fimd {
|
||||
|
@ -18,8 +18,3 @@
|
||||
reg = <0x80000000 0x10000000>;
|
||||
};
|
||||
};
|
||||
|
||||
&L2 {
|
||||
arm,data-latency = <2 1 2>;
|
||||
arm,tag-latency = <3 2 3>;
|
||||
};
|
||||
|
@ -19,7 +19,7 @@
|
||||
reg = <0x40006000 0x1000>;
|
||||
cache-unified;
|
||||
cache-level = <2>;
|
||||
arm,data-latency = <1 1 1>;
|
||||
arm,data-latency = <3 3 3>;
|
||||
arm,tag-latency = <2 2 2>;
|
||||
};
|
||||
};
|
||||
|
@ -178,8 +178,10 @@
|
||||
compatible = "fsl,vf610-sai";
|
||||
reg = <0x40031000 0x1000>;
|
||||
interrupts = <86 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&clks VF610_CLK_SAI2>;
|
||||
clock-names = "sai";
|
||||
clocks = <&clks VF610_CLK_SAI2>,
|
||||
<&clks VF610_CLK_SAI2_DIV>,
|
||||
<&clks 0>, <&clks 0>;
|
||||
clock-names = "bus", "mclk1", "mclk2", "mclk3";
|
||||
dma-names = "tx", "rx";
|
||||
dmas = <&edma0 0 21>,
|
||||
<&edma0 0 20>;
|
||||
|
@ -21,6 +21,7 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
#define __ACCESS_CP15(CRn, Op1, CRm, Op2) p15, Op1, %0, CRn, CRm, Op2
|
||||
#define __ACCESS_CP15_64(Op1, CRm) p15, Op1, %Q0, %R0, CRm
|
||||
|
@ -510,10 +510,14 @@ __copy_to_user_std(void __user *to, const void *from, unsigned long n);
|
||||
static inline unsigned long __must_check
|
||||
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
#ifndef CONFIG_UACCESS_WITH_MEMCPY
|
||||
unsigned int __ua_flags = uaccess_save_and_enable();
|
||||
n = arm_copy_to_user(to, from, n);
|
||||
uaccess_restore(__ua_flags);
|
||||
return n;
|
||||
#else
|
||||
return arm_copy_to_user(to, from, n);
|
||||
#endif
|
||||
}
|
||||
|
||||
extern unsigned long __must_check
|
||||
|
@ -95,6 +95,22 @@ void __show_regs(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long flags;
|
||||
char buf[64];
|
||||
#ifndef CONFIG_CPU_V7M
|
||||
unsigned int domain;
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
/*
|
||||
* Get the domain register for the parent context. In user
|
||||
* mode, we don't save the DACR, so lets use what it should
|
||||
* be. For other modes, we place it after the pt_regs struct.
|
||||
*/
|
||||
if (user_mode(regs))
|
||||
domain = DACR_UACCESS_ENABLE;
|
||||
else
|
||||
domain = *(unsigned int *)(regs + 1);
|
||||
#else
|
||||
domain = get_domain();
|
||||
#endif
|
||||
#endif
|
||||
|
||||
show_regs_print_info(KERN_DEFAULT);
|
||||
|
||||
@ -123,21 +139,8 @@ void __show_regs(struct pt_regs *regs)
|
||||
|
||||
#ifndef CONFIG_CPU_V7M
|
||||
{
|
||||
unsigned int domain = get_domain();
|
||||
const char *segment;
|
||||
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
/*
|
||||
* Get the domain register for the parent context. In user
|
||||
* mode, we don't save the DACR, so lets use what it should
|
||||
* be. For other modes, we place it after the pt_regs struct.
|
||||
*/
|
||||
if (user_mode(regs))
|
||||
domain = DACR_UACCESS_ENABLE;
|
||||
else
|
||||
domain = *(unsigned int *)(regs + 1);
|
||||
#endif
|
||||
|
||||
if ((domain & domain_mask(DOMAIN_USER)) ==
|
||||
domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
|
||||
segment = "none";
|
||||
@ -163,11 +166,11 @@ void __show_regs(struct pt_regs *regs)
|
||||
buf[0] = '\0';
|
||||
#ifdef CONFIG_CPU_CP15_MMU
|
||||
{
|
||||
unsigned int transbase, dac = get_domain();
|
||||
unsigned int transbase;
|
||||
asm("mrc p15, 0, %0, c2, c0\n\t"
|
||||
: "=r" (transbase));
|
||||
snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
|
||||
transbase, dac);
|
||||
transbase, domain);
|
||||
}
|
||||
#endif
|
||||
asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
|
||||
|
@ -36,10 +36,10 @@
|
||||
*/
|
||||
#define __user_swpX_asm(data, addr, res, temp, B) \
|
||||
__asm__ __volatile__( \
|
||||
" mov %2, %1\n" \
|
||||
"0: ldrex"B" %1, [%3]\n" \
|
||||
"1: strex"B" %0, %2, [%3]\n" \
|
||||
"0: ldrex"B" %2, [%3]\n" \
|
||||
"1: strex"B" %0, %1, [%3]\n" \
|
||||
" cmp %0, #0\n" \
|
||||
" moveq %1, %2\n" \
|
||||
" movne %0, %4\n" \
|
||||
"2:\n" \
|
||||
" .section .text.fixup,\"ax\"\n" \
|
||||
|
@ -88,6 +88,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
|
||||
static unsigned long noinline
|
||||
__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
unsigned long ua_flags;
|
||||
int atomic;
|
||||
|
||||
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
|
||||
@ -118,7 +119,9 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
|
||||
if (tocopy > n)
|
||||
tocopy = n;
|
||||
|
||||
ua_flags = uaccess_save_and_enable();
|
||||
memcpy((void *)to, from, tocopy);
|
||||
uaccess_restore(ua_flags);
|
||||
to += tocopy;
|
||||
from += tocopy;
|
||||
n -= tocopy;
|
||||
@ -145,14 +148,21 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
* With frame pointer disabled, tail call optimization kicks in
|
||||
* as well making this test almost invisible.
|
||||
*/
|
||||
if (n < 64)
|
||||
return __copy_to_user_std(to, from, n);
|
||||
return __copy_to_user_memcpy(to, from, n);
|
||||
if (n < 64) {
|
||||
unsigned long ua_flags = uaccess_save_and_enable();
|
||||
n = __copy_to_user_std(to, from, n);
|
||||
uaccess_restore(ua_flags);
|
||||
} else {
|
||||
n = __copy_to_user_memcpy(to, from, n);
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
static unsigned long noinline
|
||||
__clear_user_memset(void __user *addr, unsigned long n)
|
||||
{
|
||||
unsigned long ua_flags;
|
||||
|
||||
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
|
||||
memset((void *)addr, 0, n);
|
||||
return 0;
|
||||
@ -175,7 +185,9 @@ __clear_user_memset(void __user *addr, unsigned long n)
|
||||
if (tocopy > n)
|
||||
tocopy = n;
|
||||
|
||||
ua_flags = uaccess_save_and_enable();
|
||||
memset((void *)addr, 0, tocopy);
|
||||
uaccess_restore(ua_flags);
|
||||
addr += tocopy;
|
||||
n -= tocopy;
|
||||
|
||||
@ -193,9 +205,14 @@ out:
|
||||
unsigned long arm_clear_user(void __user *addr, unsigned long n)
|
||||
{
|
||||
/* See rational for this in __copy_to_user() above. */
|
||||
if (n < 64)
|
||||
return __clear_user_std(addr, n);
|
||||
return __clear_user_memset(addr, n);
|
||||
if (n < 64) {
|
||||
unsigned long ua_flags = uaccess_save_and_enable();
|
||||
n = __clear_user_std(addr, n);
|
||||
uaccess_restore(ua_flags);
|
||||
} else {
|
||||
n = __clear_user_memset(addr, n);
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
@ -4,7 +4,6 @@ menuconfig ARCH_AT91
|
||||
select ARCH_REQUIRE_GPIOLIB
|
||||
select COMMON_CLK_AT91
|
||||
select PINCTRL
|
||||
select PINCTRL_AT91
|
||||
select SOC_BUS
|
||||
|
||||
if ARCH_AT91
|
||||
@ -17,6 +16,7 @@ config SOC_SAMA5D2
|
||||
select HAVE_AT91_USB_CLK
|
||||
select HAVE_AT91_H32MX
|
||||
select HAVE_AT91_GENERATED_CLK
|
||||
select PINCTRL_AT91PIO4
|
||||
help
|
||||
Select this if ou are using one of Atmel's SAMA5D2 family SoC.
|
||||
|
||||
@ -27,6 +27,7 @@ config SOC_SAMA5D3
|
||||
select HAVE_AT91_UTMI
|
||||
select HAVE_AT91_SMD
|
||||
select HAVE_AT91_USB_CLK
|
||||
select PINCTRL_AT91
|
||||
help
|
||||
Select this if you are using one of Atmel's SAMA5D3 family SoC.
|
||||
This support covers SAMA5D31, SAMA5D33, SAMA5D34, SAMA5D35, SAMA5D36.
|
||||
@ -40,6 +41,7 @@ config SOC_SAMA5D4
|
||||
select HAVE_AT91_SMD
|
||||
select HAVE_AT91_USB_CLK
|
||||
select HAVE_AT91_H32MX
|
||||
select PINCTRL_AT91
|
||||
help
|
||||
Select this if you are using one of Atmel's SAMA5D4 family SoC.
|
||||
|
||||
@ -50,6 +52,7 @@ config SOC_AT91RM9200
|
||||
select CPU_ARM920T
|
||||
select HAVE_AT91_USB_CLK
|
||||
select MIGHT_HAVE_PCI
|
||||
select PINCTRL_AT91
|
||||
select SOC_SAM_V4_V5
|
||||
select SRAM if PM
|
||||
help
|
||||
@ -65,6 +68,7 @@ config SOC_AT91SAM9
|
||||
select HAVE_AT91_UTMI
|
||||
select HAVE_FB_ATMEL
|
||||
select MEMORY
|
||||
select PINCTRL_AT91
|
||||
select SOC_SAM_V4_V5
|
||||
select SRAM if PM
|
||||
help
|
||||
|
@ -41,8 +41,10 @@
|
||||
* implementation should be moved down into the pinctrl driver and get
|
||||
* called as part of the generic suspend/resume path.
|
||||
*/
|
||||
#ifdef CONFIG_PINCTRL_AT91
|
||||
extern void at91_pinctrl_gpio_suspend(void);
|
||||
extern void at91_pinctrl_gpio_resume(void);
|
||||
#endif
|
||||
|
||||
static struct {
|
||||
unsigned long uhp_udp_mask;
|
||||
@ -151,8 +153,9 @@ static void at91_pm_suspend(suspend_state_t state)
|
||||
|
||||
static int at91_pm_enter(suspend_state_t state)
|
||||
{
|
||||
#ifdef CONFIG_PINCTRL_AT91
|
||||
at91_pinctrl_gpio_suspend();
|
||||
|
||||
#endif
|
||||
switch (state) {
|
||||
/*
|
||||
* Suspend-to-RAM is like STANDBY plus slow clock mode, so
|
||||
@ -192,7 +195,9 @@ static int at91_pm_enter(suspend_state_t state)
|
||||
error:
|
||||
target_state = PM_SUSPEND_ON;
|
||||
|
||||
#ifdef CONFIG_PINCTRL_AT91
|
||||
at91_pinctrl_gpio_resume();
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -748,8 +748,12 @@ static void exynos5_powerdown_conf(enum sys_powerdown mode)
|
||||
void exynos_sys_powerdown_conf(enum sys_powerdown mode)
|
||||
{
|
||||
unsigned int i;
|
||||
const struct exynos_pmu_data *pmu_data;
|
||||
|
||||
const struct exynos_pmu_data *pmu_data = pmu_context->pmu_data;
|
||||
if (!pmu_context)
|
||||
return;
|
||||
|
||||
pmu_data = pmu_context->pmu_data;
|
||||
|
||||
if (pmu_data->powerdown_conf)
|
||||
pmu_data->powerdown_conf(mode);
|
||||
|
@ -143,7 +143,7 @@ static inline void __indirect_writesl(volatile void __iomem *bus_addr,
|
||||
writel(*vaddr++, bus_addr);
|
||||
}
|
||||
|
||||
static inline unsigned char __indirect_readb(const volatile void __iomem *p)
|
||||
static inline u8 __indirect_readb(const volatile void __iomem *p)
|
||||
{
|
||||
u32 addr = (u32)p;
|
||||
u32 n, byte_enables, data;
|
||||
@ -166,7 +166,7 @@ static inline void __indirect_readsb(const volatile void __iomem *bus_addr,
|
||||
*vaddr++ = readb(bus_addr);
|
||||
}
|
||||
|
||||
static inline unsigned short __indirect_readw(const volatile void __iomem *p)
|
||||
static inline u16 __indirect_readw(const volatile void __iomem *p)
|
||||
{
|
||||
u32 addr = (u32)p;
|
||||
u32 n, byte_enables, data;
|
||||
@ -189,7 +189,7 @@ static inline void __indirect_readsw(const volatile void __iomem *bus_addr,
|
||||
*vaddr++ = readw(bus_addr);
|
||||
}
|
||||
|
||||
static inline unsigned long __indirect_readl(const volatile void __iomem *p)
|
||||
static inline u32 __indirect_readl(const volatile void __iomem *p)
|
||||
{
|
||||
u32 addr = (__force u32)p;
|
||||
u32 data;
|
||||
@ -350,7 +350,7 @@ static inline void insl(u32 io_addr, void *p, u32 count)
|
||||
((unsigned long)p <= (PIO_MASK + PIO_OFFSET)))
|
||||
|
||||
#define ioread8(p) ioread8(p)
|
||||
static inline unsigned int ioread8(const void __iomem *addr)
|
||||
static inline u8 ioread8(const void __iomem *addr)
|
||||
{
|
||||
unsigned long port = (unsigned long __force)addr;
|
||||
if (__is_io_address(port))
|
||||
@ -378,7 +378,7 @@ static inline void ioread8_rep(const void __iomem *addr, void *vaddr, u32 count)
|
||||
}
|
||||
|
||||
#define ioread16(p) ioread16(p)
|
||||
static inline unsigned int ioread16(const void __iomem *addr)
|
||||
static inline u16 ioread16(const void __iomem *addr)
|
||||
{
|
||||
unsigned long port = (unsigned long __force)addr;
|
||||
if (__is_io_address(port))
|
||||
@ -407,7 +407,7 @@ static inline void ioread16_rep(const void __iomem *addr, void *vaddr,
|
||||
}
|
||||
|
||||
#define ioread32(p) ioread32(p)
|
||||
static inline unsigned int ioread32(const void __iomem *addr)
|
||||
static inline u32 ioread32(const void __iomem *addr)
|
||||
{
|
||||
unsigned long port = (unsigned long __force)addr;
|
||||
if (__is_io_address(port))
|
||||
|
@ -121,6 +121,7 @@ config ARCH_OMAP2PLUS_TYPICAL
|
||||
select NEON if CPU_V7
|
||||
select PM
|
||||
select REGULATOR
|
||||
select REGULATOR_FIXED_VOLTAGE
|
||||
select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4
|
||||
select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4
|
||||
select VFP
|
||||
@ -201,7 +202,6 @@ config MACH_OMAP3_PANDORA
|
||||
depends on ARCH_OMAP3
|
||||
default y
|
||||
select OMAP_PACKAGE_CBB
|
||||
select REGULATOR_FIXED_VOLTAGE if REGULATOR
|
||||
|
||||
config MACH_NOKIA_N810
|
||||
bool
|
||||
|
@ -889,6 +889,7 @@ static void __init e680_init(void)
|
||||
|
||||
pxa_set_keypad_info(&e680_keypad_platform_data);
|
||||
|
||||
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
|
||||
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
|
||||
platform_add_devices(ARRAY_AND_SIZE(e680_devices));
|
||||
}
|
||||
@ -956,6 +957,7 @@ static void __init a1200_init(void)
|
||||
|
||||
pxa_set_keypad_info(&a1200_keypad_platform_data);
|
||||
|
||||
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
|
||||
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
|
||||
platform_add_devices(ARRAY_AND_SIZE(a1200_devices));
|
||||
}
|
||||
@ -1148,6 +1150,7 @@ static void __init a910_init(void)
|
||||
platform_device_register(&a910_camera);
|
||||
}
|
||||
|
||||
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
|
||||
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
|
||||
platform_add_devices(ARRAY_AND_SIZE(a910_devices));
|
||||
}
|
||||
@ -1215,6 +1218,7 @@ static void __init e6_init(void)
|
||||
|
||||
pxa_set_keypad_info(&e6_keypad_platform_data);
|
||||
|
||||
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
|
||||
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
|
||||
platform_add_devices(ARRAY_AND_SIZE(e6_devices));
|
||||
}
|
||||
@ -1256,6 +1260,7 @@ static void __init e2_init(void)
|
||||
|
||||
pxa_set_keypad_info(&e2_keypad_platform_data);
|
||||
|
||||
pwm_add_table(ezx_pwm_lookup, ARRAY_SIZE(ezx_pwm_lookup));
|
||||
platform_add_devices(ARRAY_AND_SIZE(ezx_devices));
|
||||
platform_add_devices(ARRAY_AND_SIZE(e2_devices));
|
||||
}
|
||||
|
@ -20,7 +20,7 @@
|
||||
#include <plat/cpu.h>
|
||||
#include <plat/cpu-freq-core.h>
|
||||
|
||||
static struct cpufreq_frequency_table s3c2440_plls_12[] __initdata = {
|
||||
static struct cpufreq_frequency_table s3c2440_plls_12[] = {
|
||||
{ .frequency = 75000000, .driver_data = PLLVAL(0x75, 3, 3), }, /* FVco 600.000000 */
|
||||
{ .frequency = 80000000, .driver_data = PLLVAL(0x98, 4, 3), }, /* FVco 640.000000 */
|
||||
{ .frequency = 90000000, .driver_data = PLLVAL(0x70, 2, 3), }, /* FVco 720.000000 */
|
||||
|
@ -20,7 +20,7 @@
|
||||
#include <plat/cpu.h>
|
||||
#include <plat/cpu-freq-core.h>
|
||||
|
||||
static struct cpufreq_frequency_table s3c2440_plls_169344[] __initdata = {
|
||||
static struct cpufreq_frequency_table s3c2440_plls_169344[] = {
|
||||
{ .frequency = 78019200, .driver_data = PLLVAL(121, 5, 3), }, /* FVco 624.153600 */
|
||||
{ .frequency = 84067200, .driver_data = PLLVAL(131, 5, 3), }, /* FVco 672.537600 */
|
||||
{ .frequency = 90115200, .driver_data = PLLVAL(141, 5, 3), }, /* FVco 720.921600 */
|
||||
|
@ -165,13 +165,28 @@ static void flush_context(unsigned int cpu)
|
||||
__flush_icache_all();
|
||||
}
|
||||
|
||||
static int is_reserved_asid(u64 asid)
|
||||
static bool check_update_reserved_asid(u64 asid, u64 newasid)
|
||||
{
|
||||
int cpu;
|
||||
for_each_possible_cpu(cpu)
|
||||
if (per_cpu(reserved_asids, cpu) == asid)
|
||||
return 1;
|
||||
return 0;
|
||||
bool hit = false;
|
||||
|
||||
/*
|
||||
* Iterate over the set of reserved ASIDs looking for a match.
|
||||
* If we find one, then we can update our mm to use newasid
|
||||
* (i.e. the same ASID in the current generation) but we can't
|
||||
* exit the loop early, since we need to ensure that all copies
|
||||
* of the old ASID are updated to reflect the mm. Failure to do
|
||||
* so could result in us missing the reserved ASID in a future
|
||||
* generation.
|
||||
*/
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (per_cpu(reserved_asids, cpu) == asid) {
|
||||
hit = true;
|
||||
per_cpu(reserved_asids, cpu) = newasid;
|
||||
}
|
||||
}
|
||||
|
||||
return hit;
|
||||
}
|
||||
|
||||
static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
||||
@ -181,12 +196,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
||||
u64 generation = atomic64_read(&asid_generation);
|
||||
|
||||
if (asid != 0) {
|
||||
u64 newasid = generation | (asid & ~ASID_MASK);
|
||||
|
||||
/*
|
||||
* If our current ASID was active during a rollover, we
|
||||
* can continue to use it and this was just a false alarm.
|
||||
*/
|
||||
if (is_reserved_asid(asid))
|
||||
return generation | (asid & ~ASID_MASK);
|
||||
if (check_update_reserved_asid(asid, newasid))
|
||||
return newasid;
|
||||
|
||||
/*
|
||||
* We had a valid ASID in a previous life, so try to re-use
|
||||
@ -194,7 +211,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
||||
*/
|
||||
asid &= ~ASID_MASK;
|
||||
if (!__test_and_set_bit(asid, asid_map))
|
||||
goto bump_gen;
|
||||
return newasid;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -216,11 +233,8 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
||||
|
||||
__set_bit(asid, asid_map);
|
||||
cur_idx = asid;
|
||||
|
||||
bump_gen:
|
||||
asid |= generation;
|
||||
cpumask_clear(mm_cpumask(mm));
|
||||
return asid;
|
||||
return asid | generation;
|
||||
}
|
||||
|
||||
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
|
||||
|
@ -1521,7 +1521,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
||||
return -ENOMEM;
|
||||
|
||||
for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
|
||||
phys_addr_t phys = sg_phys(s) & PAGE_MASK;
|
||||
phys_addr_t phys = page_to_phys(sg_page(s));
|
||||
unsigned int len = PAGE_ALIGN(s->offset + s->length);
|
||||
|
||||
if (!is_coherent &&
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/stop_machine.h>
|
||||
|
||||
#include <asm/cp15.h>
|
||||
#include <asm/mach-types.h>
|
||||
@ -627,12 +628,10 @@ static struct section_perm ro_perms[] = {
|
||||
* safe to be called with preemption disabled, as under stop_machine().
|
||||
*/
|
||||
static inline void section_update(unsigned long addr, pmdval_t mask,
|
||||
pmdval_t prot)
|
||||
pmdval_t prot, struct mm_struct *mm)
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
pmd_t *pmd;
|
||||
|
||||
mm = current->active_mm;
|
||||
pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
|
||||
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
@ -656,49 +655,82 @@ static inline bool arch_has_strict_perms(void)
|
||||
return !!(get_cr() & CR_XP);
|
||||
}
|
||||
|
||||
#define set_section_perms(perms, field) { \
|
||||
size_t i; \
|
||||
unsigned long addr; \
|
||||
\
|
||||
if (!arch_has_strict_perms()) \
|
||||
return; \
|
||||
\
|
||||
for (i = 0; i < ARRAY_SIZE(perms); i++) { \
|
||||
if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \
|
||||
!IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \
|
||||
pr_err("BUG: section %lx-%lx not aligned to %lx\n", \
|
||||
perms[i].start, perms[i].end, \
|
||||
SECTION_SIZE); \
|
||||
continue; \
|
||||
} \
|
||||
\
|
||||
for (addr = perms[i].start; \
|
||||
addr < perms[i].end; \
|
||||
addr += SECTION_SIZE) \
|
||||
section_update(addr, perms[i].mask, \
|
||||
perms[i].field); \
|
||||
} \
|
||||
void set_section_perms(struct section_perm *perms, int n, bool set,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
size_t i;
|
||||
unsigned long addr;
|
||||
|
||||
if (!arch_has_strict_perms())
|
||||
return;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
|
||||
!IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
|
||||
pr_err("BUG: section %lx-%lx not aligned to %lx\n",
|
||||
perms[i].start, perms[i].end,
|
||||
SECTION_SIZE);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (addr = perms[i].start;
|
||||
addr < perms[i].end;
|
||||
addr += SECTION_SIZE)
|
||||
section_update(addr, perms[i].mask,
|
||||
set ? perms[i].prot : perms[i].clear, mm);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static inline void fix_kernmem_perms(void)
|
||||
static void update_sections_early(struct section_perm perms[], int n)
|
||||
{
|
||||
set_section_perms(nx_perms, prot);
|
||||
struct task_struct *t, *s;
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
for_each_process(t) {
|
||||
if (t->flags & PF_KTHREAD)
|
||||
continue;
|
||||
for_each_thread(t, s)
|
||||
set_section_perms(perms, n, true, s->mm);
|
||||
}
|
||||
read_unlock(&tasklist_lock);
|
||||
set_section_perms(perms, n, true, current->active_mm);
|
||||
set_section_perms(perms, n, true, &init_mm);
|
||||
}
|
||||
|
||||
int __fix_kernmem_perms(void *unused)
|
||||
{
|
||||
update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
|
||||
return 0;
|
||||
}
|
||||
|
||||
void fix_kernmem_perms(void)
|
||||
{
|
||||
stop_machine(__fix_kernmem_perms, NULL, NULL);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_RODATA
|
||||
int __mark_rodata_ro(void *unused)
|
||||
{
|
||||
update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mark_rodata_ro(void)
|
||||
{
|
||||
set_section_perms(ro_perms, prot);
|
||||
stop_machine(__mark_rodata_ro, NULL, NULL);
|
||||
}
|
||||
|
||||
void set_kernel_text_rw(void)
|
||||
{
|
||||
set_section_perms(ro_perms, clear);
|
||||
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
|
||||
current->active_mm);
|
||||
}
|
||||
|
||||
void set_kernel_text_ro(void)
|
||||
{
|
||||
set_section_perms(ro_perms, prot);
|
||||
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
|
||||
current->active_mm);
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_RODATA */
|
||||
|
||||
|
@ -95,7 +95,7 @@ ENDPROC(cpu_v7_dcache_clean_area)
|
||||
.equ cpu_v7_suspend_size, 4 * 9
|
||||
#ifdef CONFIG_ARM_CPU_SUSPEND
|
||||
ENTRY(cpu_v7_do_suspend)
|
||||
stmfd sp!, {r4 - r10, lr}
|
||||
stmfd sp!, {r4 - r11, lr}
|
||||
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
|
||||
mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID
|
||||
stmia r0!, {r4 - r5}
|
||||
@ -112,7 +112,7 @@ ENTRY(cpu_v7_do_suspend)
|
||||
mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register
|
||||
mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control
|
||||
stmia r0, {r5 - r11}
|
||||
ldmfd sp!, {r4 - r10, pc}
|
||||
ldmfd sp!, {r4 - r11, pc}
|
||||
ENDPROC(cpu_v7_do_suspend)
|
||||
|
||||
ENTRY(cpu_v7_do_resume)
|
||||
|
@ -269,6 +269,7 @@
|
||||
clock-frequency = <0>; /* Updated by bootloader */
|
||||
voltage-ranges = <1800 1800 3300 3300>;
|
||||
sdhci,auto-cmd12;
|
||||
little-endian;
|
||||
bus-width = <4>;
|
||||
};
|
||||
|
||||
@ -277,6 +278,7 @@
|
||||
reg = <0x0 0x2300000 0x0 0x10000>;
|
||||
interrupts = <0 36 0x4>; /* Level high type */
|
||||
gpio-controller;
|
||||
little-endian;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
@ -287,6 +289,7 @@
|
||||
reg = <0x0 0x2310000 0x0 0x10000>;
|
||||
interrupts = <0 36 0x4>; /* Level high type */
|
||||
gpio-controller;
|
||||
little-endian;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
@ -297,6 +300,7 @@
|
||||
reg = <0x0 0x2320000 0x0 0x10000>;
|
||||
interrupts = <0 37 0x4>; /* Level high type */
|
||||
gpio-controller;
|
||||
little-endian;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
@ -307,6 +311,7 @@
|
||||
reg = <0x0 0x2330000 0x0 0x10000>;
|
||||
interrupts = <0 37 0x4>; /* Level high type */
|
||||
gpio-controller;
|
||||
little-endian;
|
||||
#gpio-cells = <2>;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
|
@ -77,6 +77,7 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/stringify.h>
|
||||
#include <asm/barrier.h>
|
||||
|
||||
/*
|
||||
* Low-level accessors
|
||||
|
@ -276,10 +276,14 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
* hardware updates of the pte (ptep_set_access_flags safely changes
|
||||
* valid ptes without going through an invalid entry).
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_DEBUG_VM) && IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
|
||||
pte_valid(*ptep)) {
|
||||
BUG_ON(!pte_young(pte));
|
||||
BUG_ON(pte_write(*ptep) && !pte_dirty(pte));
|
||||
if (IS_ENABLED(CONFIG_ARM64_HW_AFDBM) &&
|
||||
pte_valid(*ptep) && pte_valid(pte)) {
|
||||
VM_WARN_ONCE(!pte_young(pte),
|
||||
"%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
|
||||
__func__, pte_val(*ptep), pte_val(pte));
|
||||
VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
|
||||
"%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
|
||||
__func__, pte_val(*ptep), pte_val(pte));
|
||||
}
|
||||
|
||||
set_pte(ptep, pte);
|
||||
|
@ -5,6 +5,7 @@
|
||||
*/
|
||||
|
||||
#include <asm-generic/vmlinux.lds.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/kernel-pgtable.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/memory.h>
|
||||
@ -140,7 +141,7 @@ SECTIONS
|
||||
ARM_EXIT_KEEP(EXIT_DATA)
|
||||
}
|
||||
|
||||
PERCPU_SECTION(64)
|
||||
PERCPU_SECTION(L1_CACHE_BYTES)
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
__init_end = .;
|
||||
@ -158,7 +159,7 @@ SECTIONS
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
_data = .;
|
||||
_sdata = .;
|
||||
RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
|
||||
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
|
||||
PECOFF_EDATA_PADDING
|
||||
_edata = .;
|
||||
|
||||
|
@ -14,7 +14,7 @@
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
||||
*
|
||||
* ppc:
|
||||
|
@ -11,7 +11,7 @@
|
||||
|
||||
|
||||
|
||||
#define NR_syscalls 322 /* length of syscall table */
|
||||
#define NR_syscalls 323 /* length of syscall table */
|
||||
|
||||
/*
|
||||
* The following defines stop scripts/checksyscalls.sh from complaining about
|
||||
|
@ -335,5 +335,6 @@
|
||||
#define __NR_userfaultfd 1343
|
||||
#define __NR_membarrier 1344
|
||||
#define __NR_kcmp 1345
|
||||
#define __NR_mlock2 1346
|
||||
|
||||
#endif /* _UAPI_ASM_IA64_UNISTD_H */
|
||||
|
@ -1771,5 +1771,6 @@ sys_call_table:
|
||||
data8 sys_userfaultfd
|
||||
data8 sys_membarrier
|
||||
data8 sys_kcmp // 1345
|
||||
data8 sys_mlock2
|
||||
|
||||
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
|
||||
|
@ -61,7 +61,8 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||
/* FIXME this part of code is untested */
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
sg->dma_address = sg_phys(sg);
|
||||
__dma_sync(sg_phys(sg), sg->length, direction);
|
||||
__dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
|
||||
sg->length, direction);
|
||||
}
|
||||
|
||||
return nents;
|
||||
|
@ -145,7 +145,7 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
|
||||
|
||||
gfp = massage_gfp_flags(dev, gfp);
|
||||
|
||||
if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC))
|
||||
if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
|
||||
page = dma_alloc_from_contiguous(dev,
|
||||
count, get_order(size));
|
||||
if (!page)
|
||||
|
@ -372,7 +372,8 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
|
||||
*/
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
#define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE)
|
||||
#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_HUGE))
|
||||
#define pte_mkhuge(pte) (__pte(pte_val(pte) | \
|
||||
(parisc_requires_coherency() ? 0 : _PAGE_HUGE)))
|
||||
#else
|
||||
#define pte_huge(pte) (0)
|
||||
#define pte_mkhuge(pte) (pte)
|
||||
|
@ -360,8 +360,9 @@
|
||||
#define __NR_execveat (__NR_Linux + 342)
|
||||
#define __NR_membarrier (__NR_Linux + 343)
|
||||
#define __NR_userfaultfd (__NR_Linux + 344)
|
||||
#define __NR_mlock2 (__NR_Linux + 345)
|
||||
|
||||
#define __NR_Linux_syscalls (__NR_userfaultfd + 1)
|
||||
#define __NR_Linux_syscalls (__NR_mlock2 + 1)
|
||||
|
||||
|
||||
#define __IGNORE_select /* newselect */
|
||||
|
@ -171,24 +171,6 @@ void pcibios_set_master(struct pci_dev *dev)
|
||||
}
|
||||
|
||||
|
||||
void __init pcibios_init_bus(struct pci_bus *bus)
|
||||
{
|
||||
struct pci_dev *dev = bus->self;
|
||||
unsigned short bridge_ctl;
|
||||
|
||||
/* We deal only with pci controllers and pci-pci bridges. */
|
||||
if (!dev || (dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
|
||||
return;
|
||||
|
||||
/* PCI-PCI bridge - set the cache line and default latency
|
||||
(32) for primary and secondary buses. */
|
||||
pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 32);
|
||||
|
||||
pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bridge_ctl);
|
||||
bridge_ctl |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR;
|
||||
pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bridge_ctl);
|
||||
}
|
||||
|
||||
/*
|
||||
* pcibios align resources() is called every time generic PCI code
|
||||
* wants to generate a new address. The process of looking for
|
||||
|
@ -440,6 +440,7 @@
|
||||
ENTRY_COMP(execveat)
|
||||
ENTRY_SAME(membarrier)
|
||||
ENTRY_SAME(userfaultfd)
|
||||
ENTRY_SAME(mlock2) /* 345 */
|
||||
|
||||
|
||||
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
|
||||
|
@ -227,23 +227,15 @@
|
||||
reg = <0x520 0x20>;
|
||||
|
||||
phy0: ethernet-phy@1f {
|
||||
interrupt-parent = <&mpic>;
|
||||
interrupts = <10 1>;
|
||||
reg = <0x1f>;
|
||||
};
|
||||
phy1: ethernet-phy@0 {
|
||||
interrupt-parent = <&mpic>;
|
||||
interrupts = <10 1>;
|
||||
reg = <0>;
|
||||
};
|
||||
phy2: ethernet-phy@1 {
|
||||
interrupt-parent = <&mpic>;
|
||||
interrupts = <10 1>;
|
||||
reg = <1>;
|
||||
};
|
||||
phy3: ethernet-phy@2 {
|
||||
interrupt-parent = <&mpic>;
|
||||
interrupts = <10 1>;
|
||||
reg = <2>;
|
||||
};
|
||||
tbi0: tbi-phy@11 {
|
||||
|
@ -370,16 +370,16 @@ COMPAT_SYS(execveat)
|
||||
PPC64ONLY(switch_endian)
|
||||
SYSCALL_SPU(userfaultfd)
|
||||
SYSCALL_SPU(membarrier)
|
||||
SYSCALL(semop)
|
||||
SYSCALL(semget)
|
||||
COMPAT_SYS(semctl)
|
||||
COMPAT_SYS(semtimedop)
|
||||
COMPAT_SYS(msgsnd)
|
||||
COMPAT_SYS(msgrcv)
|
||||
SYSCALL(msgget)
|
||||
COMPAT_SYS(msgctl)
|
||||
COMPAT_SYS(shmat)
|
||||
SYSCALL(shmdt)
|
||||
SYSCALL(shmget)
|
||||
COMPAT_SYS(shmctl)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(mlock2)
|
||||
|
@ -388,18 +388,6 @@
|
||||
#define __NR_switch_endian 363
|
||||
#define __NR_userfaultfd 364
|
||||
#define __NR_membarrier 365
|
||||
#define __NR_semop 366
|
||||
#define __NR_semget 367
|
||||
#define __NR_semctl 368
|
||||
#define __NR_semtimedop 369
|
||||
#define __NR_msgsnd 370
|
||||
#define __NR_msgrcv 371
|
||||
#define __NR_msgget 372
|
||||
#define __NR_msgctl 373
|
||||
#define __NR_shmat 374
|
||||
#define __NR_shmdt 375
|
||||
#define __NR_shmget 376
|
||||
#define __NR_shmctl 377
|
||||
#define __NR_mlock2 378
|
||||
|
||||
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
|
||||
|
@ -590,16 +590,10 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
|
||||
eeh_ops->configure_bridge(pe);
|
||||
eeh_pe_restore_bars(pe);
|
||||
|
||||
/*
|
||||
* If it's PHB PE, the frozen state on all available PEs should have
|
||||
* been cleared by the PHB reset. Otherwise, we unfreeze the PE and its
|
||||
* child PEs because they might be in frozen state.
|
||||
*/
|
||||
if (!(pe->type & EEH_PE_PHB)) {
|
||||
rc = eeh_clear_pe_frozen_state(pe, false);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
/* Clear frozen state */
|
||||
rc = eeh_clear_pe_frozen_state(pe, false);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Give the system 5 seconds to finish running the user-space
|
||||
* hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
|
||||
|
@ -224,6 +224,12 @@ static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
|
||||
|
||||
static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
|
||||
{
|
||||
/*
|
||||
* Check for illegal transactional state bit combination
|
||||
* and if we find it, force the TS field to a safe state.
|
||||
*/
|
||||
if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
|
||||
msr &= ~MSR_TS_MASK;
|
||||
vcpu->arch.shregs.msr = msr;
|
||||
kvmppc_end_cede(vcpu);
|
||||
}
|
||||
|
@ -43,11 +43,34 @@ static unsigned int opal_irq_count;
|
||||
static unsigned int *opal_irqs;
|
||||
|
||||
static void opal_handle_irq_work(struct irq_work *work);
|
||||
static __be64 last_outstanding_events;
|
||||
static u64 last_outstanding_events;
|
||||
static struct irq_work opal_event_irq_work = {
|
||||
.func = opal_handle_irq_work,
|
||||
};
|
||||
|
||||
void opal_handle_events(uint64_t events)
|
||||
{
|
||||
int virq, hwirq = 0;
|
||||
u64 mask = opal_event_irqchip.mask;
|
||||
|
||||
if (!in_irq() && (events & mask)) {
|
||||
last_outstanding_events = events;
|
||||
irq_work_queue(&opal_event_irq_work);
|
||||
return;
|
||||
}
|
||||
|
||||
while (events & mask) {
|
||||
hwirq = fls64(events) - 1;
|
||||
if (BIT_ULL(hwirq) & mask) {
|
||||
virq = irq_find_mapping(opal_event_irqchip.domain,
|
||||
hwirq);
|
||||
if (virq)
|
||||
generic_handle_irq(virq);
|
||||
}
|
||||
events &= ~BIT_ULL(hwirq);
|
||||
}
|
||||
}
|
||||
|
||||
static void opal_event_mask(struct irq_data *d)
|
||||
{
|
||||
clear_bit(d->hwirq, &opal_event_irqchip.mask);
|
||||
@ -55,9 +78,21 @@ static void opal_event_mask(struct irq_data *d)
|
||||
|
||||
static void opal_event_unmask(struct irq_data *d)
|
||||
{
|
||||
__be64 events;
|
||||
|
||||
set_bit(d->hwirq, &opal_event_irqchip.mask);
|
||||
|
||||
opal_poll_events(&last_outstanding_events);
|
||||
opal_poll_events(&events);
|
||||
last_outstanding_events = be64_to_cpu(events);
|
||||
|
||||
/*
|
||||
* We can't just handle the events now with opal_handle_events().
|
||||
* If we did we would deadlock when opal_event_unmask() is called from
|
||||
* handle_level_irq() with the irq descriptor lock held, because
|
||||
* calling opal_handle_events() would call generic_handle_irq() and
|
||||
* then handle_level_irq() which would try to take the descriptor lock
|
||||
* again. Instead queue the events for later.
|
||||
*/
|
||||
if (last_outstanding_events & opal_event_irqchip.mask)
|
||||
/* Need to retrigger the interrupt */
|
||||
irq_work_queue(&opal_event_irq_work);
|
||||
@ -96,29 +131,6 @@ static int opal_event_map(struct irq_domain *d, unsigned int irq,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void opal_handle_events(uint64_t events)
|
||||
{
|
||||
int virq, hwirq = 0;
|
||||
u64 mask = opal_event_irqchip.mask;
|
||||
|
||||
if (!in_irq() && (events & mask)) {
|
||||
last_outstanding_events = events;
|
||||
irq_work_queue(&opal_event_irq_work);
|
||||
return;
|
||||
}
|
||||
|
||||
while (events & mask) {
|
||||
hwirq = fls64(events) - 1;
|
||||
if (BIT_ULL(hwirq) & mask) {
|
||||
virq = irq_find_mapping(opal_event_irqchip.domain,
|
||||
hwirq);
|
||||
if (virq)
|
||||
generic_handle_irq(virq);
|
||||
}
|
||||
events &= ~BIT_ULL(hwirq);
|
||||
}
|
||||
}
|
||||
|
||||
static irqreturn_t opal_interrupt(int irq, void *data)
|
||||
{
|
||||
__be64 events;
|
||||
@ -131,7 +143,7 @@ static irqreturn_t opal_interrupt(int irq, void *data)
|
||||
|
||||
static void opal_handle_irq_work(struct irq_work *work)
|
||||
{
|
||||
opal_handle_events(be64_to_cpu(last_outstanding_events));
|
||||
opal_handle_events(last_outstanding_events);
|
||||
}
|
||||
|
||||
static int opal_event_match(struct irq_domain *h, struct device_node *node,
|
||||
|
@ -278,7 +278,7 @@ static void opal_handle_message(void)
|
||||
|
||||
/* Sanity check */
|
||||
if (type >= OPAL_MSG_TYPE_MAX) {
|
||||
pr_warning("%s: Unknown message type: %u\n", __func__, type);
|
||||
pr_warn_once("%s: Unknown message type: %u\n", __func__, type);
|
||||
return;
|
||||
}
|
||||
opal_message_do_notify(type, (void *)&msg);
|
||||
|
@ -1920,16 +1920,23 @@ static int print_insn(char *buffer, unsigned char *code, unsigned long addr)
|
||||
}
|
||||
if (separator)
|
||||
ptr += sprintf(ptr, "%c", separator);
|
||||
/*
|
||||
* Use four '%' characters below because of the
|
||||
* following two conversions:
|
||||
*
|
||||
* 1) sprintf: %%%%r -> %%r
|
||||
* 2) printk : %%r -> %r
|
||||
*/
|
||||
if (operand->flags & OPERAND_GPR)
|
||||
ptr += sprintf(ptr, "%%r%i", value);
|
||||
ptr += sprintf(ptr, "%%%%r%i", value);
|
||||
else if (operand->flags & OPERAND_FPR)
|
||||
ptr += sprintf(ptr, "%%f%i", value);
|
||||
ptr += sprintf(ptr, "%%%%f%i", value);
|
||||
else if (operand->flags & OPERAND_AR)
|
||||
ptr += sprintf(ptr, "%%a%i", value);
|
||||
ptr += sprintf(ptr, "%%%%a%i", value);
|
||||
else if (operand->flags & OPERAND_CR)
|
||||
ptr += sprintf(ptr, "%%c%i", value);
|
||||
ptr += sprintf(ptr, "%%%%c%i", value);
|
||||
else if (operand->flags & OPERAND_VR)
|
||||
ptr += sprintf(ptr, "%%v%i", value);
|
||||
ptr += sprintf(ptr, "%%%%v%i", value);
|
||||
else if (operand->flags & OPERAND_PCREL)
|
||||
ptr += sprintf(ptr, "%lx", (signed int) value
|
||||
+ addr);
|
||||
|
@ -278,7 +278,7 @@
|
||||
#define __NR_fsetxattr 256
|
||||
#define __NR_getxattr 257
|
||||
#define __NR_lgetxattr 258
|
||||
#define __NR_fgetxattr 269
|
||||
#define __NR_fgetxattr 259
|
||||
#define __NR_listxattr 260
|
||||
#define __NR_llistxattr 261
|
||||
#define __NR_flistxattr 262
|
||||
|
@ -10,7 +10,7 @@
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
||||
*
|
||||
* ppc:
|
||||
|
@ -9,7 +9,7 @@
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||
*/
|
||||
|
||||
#include <linux/perf_event.h>
|
||||
|
@ -21,7 +21,7 @@
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
||||
* Copyright (C) 2009 Google, Inc., Stephane Eranian
|
||||
*/
|
||||
|
@ -131,7 +131,7 @@ export LDS_ELF_FORMAT := $(ELF_FORMAT)
|
||||
# The wrappers will select whether using "malloc" or the kernel allocator.
|
||||
LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
|
||||
|
||||
LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt)) -lrt
|
||||
LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt))
|
||||
|
||||
# Used by link-vmlinux.sh which has special support for um link
|
||||
export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
|
||||
|
@ -249,21 +249,23 @@ void close_addr(unsigned char *addr, unsigned char *netmask, void *arg)
|
||||
|
||||
char *split_if_spec(char *str, ...)
|
||||
{
|
||||
char **arg, *end;
|
||||
char **arg, *end, *ret = NULL;
|
||||
va_list ap;
|
||||
|
||||
va_start(ap, str);
|
||||
while ((arg = va_arg(ap, char **)) != NULL) {
|
||||
if (*str == '\0')
|
||||
return NULL;
|
||||
goto out;
|
||||
end = strchr(str, ',');
|
||||
if (end != str)
|
||||
*arg = str;
|
||||
if (end == NULL)
|
||||
return NULL;
|
||||
goto out;
|
||||
*end++ = '\0';
|
||||
str = end;
|
||||
}
|
||||
ret = str;
|
||||
out:
|
||||
va_end(ap);
|
||||
return str;
|
||||
return ret;
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ void do_signal(struct pt_regs *regs)
|
||||
struct ksignal ksig;
|
||||
int handled_sig = 0;
|
||||
|
||||
while (get_signal(&ksig)) {
|
||||
if (get_signal(&ksig)) {
|
||||
handled_sig = 1;
|
||||
/* Whee! Actually deliver the signal. */
|
||||
handle_signal(&ksig, regs);
|
||||
|
@ -5,7 +5,7 @@
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
||||
* Copyright (C) 2009 Google, Inc., Stephane Eranian
|
||||
*
|
||||
|
@ -5,7 +5,7 @@
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
|
||||
* Copyright (C) 2009 Jaswinder Singh Rajput
|
||||
* Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
|
||||
* Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
|
||||
* Copyright (C) 2009 Google, Inc., Stephane Eranian
|
||||
*
|
||||
@ -387,7 +387,7 @@ struct cpu_hw_events {
|
||||
/* Check flags and event code/umask, and set the HSW N/A flag */
|
||||
#define INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_NA(code, n) \
|
||||
__EVENT_CONSTRAINT(code, n, \
|
||||
INTEL_ARCH_EVENT_MASK|INTEL_ARCH_EVENT_MASK, \
|
||||
INTEL_ARCH_EVENT_MASK|X86_ALL_EVENT_FLAGS, \
|
||||
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_NA_HSW)
|
||||
|
||||
|
||||
@ -627,6 +627,7 @@ struct x86_perf_task_context {
|
||||
u64 lbr_from[MAX_LBR_ENTRIES];
|
||||
u64 lbr_to[MAX_LBR_ENTRIES];
|
||||
u64 lbr_info[MAX_LBR_ENTRIES];
|
||||
int tos;
|
||||
int lbr_callstack_users;
|
||||
int lbr_stack_state;
|
||||
};
|
||||
|
@ -232,7 +232,7 @@ static struct event_constraint intel_hsw_event_constraints[] = {
|
||||
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
|
||||
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
|
||||
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
|
||||
INTEL_EVENT_CONSTRAINT(0x48, 0x4), /* L1D_PEND_MISS.* */
|
||||
INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
|
||||
INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */
|
||||
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */
|
||||
/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
|
||||
|
@ -298,7 +298,7 @@ static bool __match_event(struct perf_event *a, struct perf_event *b)
|
||||
static inline struct perf_cgroup *event_to_cgroup(struct perf_event *event)
|
||||
{
|
||||
if (event->attach_state & PERF_ATTACH_TASK)
|
||||
return perf_cgroup_from_task(event->hw.target);
|
||||
return perf_cgroup_from_task(event->hw.target, event->ctx);
|
||||
|
||||
return event->cgrp;
|
||||
}
|
||||
|
@ -239,7 +239,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
|
||||
}
|
||||
|
||||
mask = x86_pmu.lbr_nr - 1;
|
||||
tos = intel_pmu_lbr_tos();
|
||||
tos = task_ctx->tos;
|
||||
for (i = 0; i < tos; i++) {
|
||||
lbr_idx = (tos - i) & mask;
|
||||
wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]);
|
||||
@ -247,6 +247,7 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
|
||||
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
|
||||
wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
|
||||
}
|
||||
wrmsrl(x86_pmu.lbr_tos, tos);
|
||||
task_ctx->lbr_stack_state = LBR_NONE;
|
||||
}
|
||||
|
||||
@ -270,6 +271,7 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
|
||||
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
|
||||
rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
|
||||
}
|
||||
task_ctx->tos = tos;
|
||||
task_ctx->lbr_stack_state = LBR_VALID;
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* x86 specific code for irq_work
|
||||
*
|
||||
* Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
|
||||
* Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
@ -38,6 +38,14 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
|
||||
return best && (best->ecx & bit(X86_FEATURE_XSAVE));
|
||||
}
|
||||
|
||||
static inline bool guest_cpuid_has_mtrr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *best;
|
||||
|
||||
best = kvm_find_cpuid_entry(vcpu, 1, 0);
|
||||
return best && (best->edx & bit(X86_FEATURE_MTRR));
|
||||
}
|
||||
|
||||
static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *best;
|
||||
|
@ -120,14 +120,22 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
|
||||
return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
|
||||
}
|
||||
|
||||
static u8 mtrr_disabled_type(void)
|
||||
static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/*
|
||||
* Intel SDM 11.11.2.2: all MTRRs are disabled when
|
||||
* IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
|
||||
* memory type is applied to all of physical memory.
|
||||
*
|
||||
* However, virtual machines can be run with CPUID such that
|
||||
* there are no MTRRs. In that case, the firmware will never
|
||||
* enable MTRRs and it is obviously undesirable to run the
|
||||
* guest entirely with UC memory and we use WB.
|
||||
*/
|
||||
return MTRR_TYPE_UNCACHABLE;
|
||||
if (guest_cpuid_has_mtrr(vcpu))
|
||||
return MTRR_TYPE_UNCACHABLE;
|
||||
else
|
||||
return MTRR_TYPE_WRBACK;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -267,7 +275,7 @@ static int fixed_mtrr_addr_to_seg(u64 addr)
|
||||
|
||||
for (seg = 0; seg < seg_num; seg++) {
|
||||
mtrr_seg = &fixed_seg_table[seg];
|
||||
if (mtrr_seg->start >= addr && addr < mtrr_seg->end)
|
||||
if (mtrr_seg->start <= addr && addr < mtrr_seg->end)
|
||||
return seg;
|
||||
}
|
||||
|
||||
@ -300,7 +308,6 @@ static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
|
||||
*start = range->base & PAGE_MASK;
|
||||
|
||||
mask = range->mask & PAGE_MASK;
|
||||
mask |= ~0ULL << boot_cpu_data.x86_phys_bits;
|
||||
|
||||
/* This cannot overflow because writing to the reserved bits of
|
||||
* variable MTRRs causes a #GP.
|
||||
@ -356,10 +363,14 @@ static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
|
||||
if (var_mtrr_range_is_valid(cur))
|
||||
list_del(&mtrr_state->var_ranges[index].node);
|
||||
|
||||
/* Extend the mask with all 1 bits to the left, since those
|
||||
* bits must implicitly be 0. The bits are then cleared
|
||||
* when reading them.
|
||||
*/
|
||||
if (!is_mtrr_mask)
|
||||
cur->base = data;
|
||||
else
|
||||
cur->mask = data;
|
||||
cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu));
|
||||
|
||||
/* add it to the list if it's enabled. */
|
||||
if (var_mtrr_range_is_valid(cur)) {
|
||||
@ -426,6 +437,8 @@ int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
|
||||
*pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
|
||||
else
|
||||
*pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
|
||||
|
||||
*pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -670,7 +683,7 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
|
||||
}
|
||||
|
||||
if (iter.mtrr_disabled)
|
||||
return mtrr_disabled_type();
|
||||
return mtrr_disabled_type(vcpu);
|
||||
|
||||
/* not contained in any MTRRs. */
|
||||
if (type == -1)
|
||||
|
@ -3422,6 +3422,8 @@ static int handle_exit(struct kvm_vcpu *vcpu)
|
||||
struct kvm_run *kvm_run = vcpu->run;
|
||||
u32 exit_code = svm->vmcb->control.exit_code;
|
||||
|
||||
trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
|
||||
|
||||
if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
|
||||
vcpu->arch.cr0 = svm->vmcb->save.cr0;
|
||||
if (npt_enabled)
|
||||
@ -3892,8 +3894,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
|
||||
vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
|
||||
vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
|
||||
|
||||
trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM);
|
||||
|
||||
if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
|
||||
kvm_before_handle_nmi(&svm->vcpu);
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user