drm for v5.15-rc1

core:
 - extract i915 eDP backlight into core
 - DP aux bus support
 - drm_device.irq_enabled removed
 - port drivers to native irq interfaces
 - export gem shadow plane handling for vgem
 - print proper driver name in framebuffer registration
 - driver fixes for implicit fencing rules
 - ARM fixed rate compression modifier added
 - updated fb damage handling
 - rmfb ioctl logging/docs
 - drop drm_gem_object_put_locked
 - define DRM_FORMAT_MAX_PLANES
 - add gem fb vmap/vunmap helpers
 - add lockdep_assert(once) helpers
 - mark drm irq midlayer as legacy
 - use offset adjusted bo mapping conversion
 
 vgaarb:
 - cleanups
 
 fbdev:
 - extend efifb handling to all arches
 - div by 0 fixes for multiple drivers
 
 udmabuf:
 - add hugepage mapping support
 
 dma-buf:
 - non-dynamic exporter fixups
 - document implicit fencing rules
 
 amdgpu:
 - Initial Cyan Skillfish support
 - switch virtual DCE over to vkms based atomic
 - VCN/JPEG power down fixes
 - NAVI PCIE link handling fixes
 - AMD HDMI freesync fixes
 - Yellow Carp + Beige Goby fixes
 - Clockgating/S0ix/SMU/EEPROM fixes
 - embed hw fence in job
 - rework dma-resv handling
 - ensure eviction to system ram
 
 amdkfd:
 - uapi: SVM address range query added
 - sysfs leak fix
 - GPUVM TLB optimizations
 - vmfault/migration counters
 
 i915:
 - Enable JSL and EHL by default
 - preliminary XeHP/DG2 support
 - remove all CNL support (never shipped)
 - move to TTM for discrete memory support
 - allow mixed object mmap handling
 - GEM uAPI spring cleaning
   - add I915_MMAP_OBJECT_FIXED
   - reinstate ADL-P mmap ioctls
   - drop a bunch of unused by userspace features
   - disable and remove GPU relocations
 - revert some i915 misfeatures
 - major refactoring of GuC for Gen11+
 - execbuffer object locking separate step
 - reject caching/set-domain on discrete
 - Enable pipe DMC loading on XE-LPD and ADL-P
 - add PSF GV point support
 - Refactor and fix DDI buffer translations
 - Clean up FBC CFB allocation code
 - Finish INTEL_GEN() and friends macro conversions
 
 nouveau:
 - add eDP backlight support
 - implicit fence fix
 
 msm:
 - a680/7c3 support
 - drm/scheduler conversion
 
 panfrost:
 - rework GPU reset
 
 virtio:
 - fix fencing for planes
 
 ast:
 - add detect support
 
 bochs:
 - move to tiny GPU driver
 
 vc4:
 - use hotplug irqs
 - HDMI codec support
 
 vmwgfx:
 - use internal vmware device headers
 
 ingenic:
 - demidlayering irq
 
 rcar-du:
 - shutdown fixes
 - convert to bridge connector helpers
 
 zynqmp-dsub:
 - misc fixes
 
 mgag200:
 - convert PLL handling to atomic
 
 mediatek:
 - MT8133 AAL support
 - gem mmap object support
 - MT8167 support
 
 etnaviv:
 - NXP Layerscape LS1028A SoC support
 - GEM mmap cleanups
 
 tegra:
 - new user API
 
 exynos:
 - missing unlock fix
 - build warning fix
 - use refcount_t
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEEKbZHaGwW9KfbeusDHTzWXnEhr4FAmEtvn8ACgkQDHTzWXnE
 hr7aqw//WfcIyGdPLjAz59cW8jm+FgihD5colHtOUYRHRO4GeX/bNNufquR8+N3y
 HESsyZdpihFHms/wURMq41ibmHg0EuHA01HZzjZuGBesG4F9I8sP/HnDOxDuYuAx
 N7Lg4PlUNlfFHmw7Y84owQ6s/XWmNp5iZ8e/mTK5hcraJFQKS4QO74n9RbG/F1vC
 Hc3P6AnpqGac2AEGXt0NjIRxVVCTUIBGx+XOhj+1AMyAGzt9VcO1DS9PVCS0zsEy
 zKMj9tZAPNg0wYsXAi4kA1lK7uVY8KoXSVDYLpsI5Or2/e7mfq2b4EWrezbtp6UA
 H+w86axuwJq7NaYHYH6HqyrLTOmvcHgIl2LoZN91KaNt61xfJT3XZkyQoYViGIrJ
 oZy6X/+s+WPoW98bHZrr6vbcxtWKfEeQyUFEAaDMmraKNJwROjtwgFC9DP8MDctq
 PUSM+XkwbGRRxQfv9dNKufeWfV5blVfzEJO8EfTU1YET3WTDaUHe/FoIcLZt2DZG
 JAJgZkIlU8egthPdakUjQz/KoyLMyovcN5zcjgzgjA9PyNEq74uElN9l446kSSxu
 jEVErOdd+aG3Zzk7/ZZL/RmpNQpPfpQ2RaPUkgeUsW01myNzUNuU3KUDaSlVa+Oi
 1n7eKoaQ2to/+LjhYApVriri4hIZckNNn5FnnhkgwGi8mpHQIVQ=
 =vZkA
 -----END PGP SIGNATURE-----

Merge tag 'drm-next-2021-08-31-1' of git://anongit.freedesktop.org/drm/drm

Pull drm updates from Dave Airlie:
 "Highlights:

   - i915 has seen a lot of refactoring and uAPI cleanups due to a
     change in the upstream direction going forward

     This has all been audited with known userspace, but there may be
     some pitfalls that were missed.

   - i915 now uses common TTM to enable discrete memory on DG1/2 GPUs

   - i915 enables Jasper and Elkhart Lake by default and has preliminary
     XeHP/DG2 support

   - amdgpu adds support for Cyan Skillfish

   - lots of implicit fencing rules documented and fixed up in drivers

   - msm now uses the core scheduler

   - the irq midlayer has been removed for non-legacy drivers

   - the sysfb code now works on more than x86.

  Otherwise the usual smattering of stuff everywhere, panels, bridges,
  refactorings.

  Detailed summary:

  core:
   - extract i915 eDP backlight into core
   - DP aux bus support
   - drm_device.irq_enabled removed
   - port drivers to native irq interfaces
   - export gem shadow plane handling for vgem
   - print proper driver name in framebuffer registration
   - driver fixes for implicit fencing rules
   - ARM fixed rate compression modifier added
   - updated fb damage handling
   - rmfb ioctl logging/docs
   - drop drm_gem_object_put_locked
   - define DRM_FORMAT_MAX_PLANES
   - add gem fb vmap/vunmap helpers
   - add lockdep_assert(once) helpers
   - mark drm irq midlayer as legacy
   - use offset adjusted bo mapping conversion

  vgaarb:
   - cleanups

  fbdev:
   - extend efifb handling to all arches
   - div by 0 fixes for multiple drivers

  udmabuf:
   - add hugepage mapping support

  dma-buf:
   - non-dynamic exporter fixups
   - document implicit fencing rules

  amdgpu:
   - Initial Cyan Skillfish support
   - switch virtual DCE over to vkms based atomic
   - VCN/JPEG power down fixes
   - NAVI PCIE link handling fixes
   - AMD HDMI freesync fixes
   - Yellow Carp + Beige Goby fixes
   - Clockgating/S0ix/SMU/EEPROM fixes
   - embed hw fence in job
   - rework dma-resv handling
   - ensure eviction to system ram

  amdkfd:
   - uapi: SVM address range query added
   - sysfs leak fix
   - GPUVM TLB optimizations
   - vmfault/migration counters

  i915:
   - Enable JSL and EHL by default
   - preliminary XeHP/DG2 support
   - remove all CNL support (never shipped)
   - move to TTM for discrete memory support
   - allow mixed object mmap handling
   - GEM uAPI spring cleaning
       - add I915_MMAP_OBJECT_FIXED
       - reinstate ADL-P mmap ioctls
       - drop a bunch of unused by userspace features
       - disable and remove GPU relocations
   - revert some i915 misfeatures
   - major refactoring of GuC for Gen11+
   - execbuffer object locking separate step
   - reject caching/set-domain on discrete
   - Enable pipe DMC loading on XE-LPD and ADL-P
   - add PSF GV point support
   - Refactor and fix DDI buffer translations
   - Clean up FBC CFB allocation code
   - Finish INTEL_GEN() and friends macro conversions

  nouveau:
   - add eDP backlight support
   - implicit fence fix

  msm:
   - a680/7c3 support
   - drm/scheduler conversion

  panfrost:
   - rework GPU reset

  virtio:
   - fix fencing for planes

  ast:
   - add detect support

  bochs:
   - move to tiny GPU driver

  vc4:
   - use hotplug irqs
   - HDMI codec support

  vmwgfx:
   - use internal vmware device headers

  ingenic:
   - demidlayering irq

  rcar-du:
   - shutdown fixes
   - convert to bridge connector helpers

  zynqmp-dsub:
   - misc fixes

  mgag200:
   - convert PLL handling to atomic

  mediatek:
   - MT8133 AAL support
   - gem mmap object support
   - MT8167 support

  etnaviv:
   - NXP Layerscape LS1028A SoC support
   - GEM mmap cleanups

  tegra:
   - new user API

  exynos:
   - missing unlock fix
   - build warning fix
   - use refcount_t"

* tag 'drm-next-2021-08-31-1' of git://anongit.freedesktop.org/drm/drm: (1318 commits)
  drm/amd/display: Move AllowDRAMSelfRefreshOrDRAMClockChangeInVblank to bounding box
  drm/amd/display: Remove duplicate dml init
  drm/amd/display: Update bounding box states (v2)
  drm/amd/display: Update number of DCN3 clock states
  drm/amdgpu: disable GFX CGCG in aldebaran
  drm/amdgpu: Clear RAS interrupt status on aldebaran
  drm/amdgpu: Add support for RAS XGMI err query
  drm/amdkfd: Account for SH/SE count when setting up cu masks.
  drm/amdgpu: rename amdgpu_bo_get_preferred_pin_domain
  drm/amdgpu: drop redundant cancel_delayed_work_sync call
  drm/amdgpu: add missing cleanups for more ASICs on UVD/VCE suspend
  drm/amdgpu: add missing cleanups for Polaris12 UVD/VCE on suspend
  drm/amdkfd: map SVM range with correct access permission
  drm/amdkfd: check access permisson to restore retry fault
  drm/amdgpu: Update RAS XGMI Error Query
  drm/amdgpu: Add driver infrastructure for MCA RAS
  drm/amd/display: Add Logging for HDMI color depth information
  drm/amd/amdgpu: consolidate PSP TA init shared buf functions
  drm/amd/amdgpu: add name field back to ras_common_if
  drm/amdgpu: Fix build with missing pm_suspend_target_state module export
  ...
This commit is contained in:
Linus Torvalds 2021-09-01 11:26:46 -07:00
commit 477f70cd2a
1124 changed files with 62336 additions and 35102 deletions

View File

@ -0,0 +1,24 @@
What: /sys/kernel/dmabuf/buffers
Date: May 2021
KernelVersion: v5.13
Contact: Hridya Valsaraju <hridya@google.com>
Description: The /sys/kernel/dmabuf/buffers directory contains a
snapshot of the internal state of every DMA-BUF.
/sys/kernel/dmabuf/buffers/<inode_number> will contain the
statistics for the DMA-BUF with the unique inode number
<inode_number>
Users: kernel memory tuning/debugging tools
What: /sys/kernel/dmabuf/buffers/<inode_number>/exporter_name
Date: May 2021
KernelVersion: v5.13
Contact: Hridya Valsaraju <hridya@google.com>
Description: This file is read-only and contains the name of the exporter of
the DMA-BUF.
What: /sys/kernel/dmabuf/buffers/<inode_number>/size
Date: May 2021
KernelVersion: v5.13
Contact: Hridya Valsaraju <hridya@google.com>
Description: This file is read-only and specifies the size of the DMA-BUF in
bytes.

View File

@ -22,6 +22,9 @@ properties:
- ti,ths8134a
- ti,ths8134b
- const: ti,ths8134
- items:
- const: corpro,gm7123
- const: adi,adv7123
- enum:
- adi,adv7123
- dumb-vga-dac

View File

@ -70,6 +70,9 @@ properties:
const: 1
description: See ../../pwm/pwm.yaml for description of the cell formats.
aux-bus:
$ref: /schemas/display/dp-aux-bus.yaml#
ports:
$ref: /schemas/graph.yaml#/properties/ports
@ -150,7 +153,6 @@ properties:
required:
- compatible
- reg
- enable-gpios
- vccio-supply
- vpll-supply
- vcca-supply
@ -201,11 +203,26 @@ examples:
port@1 {
reg = <1>;
endpoint {
sn65dsi86_out: endpoint {
remote-endpoint = <&panel_in_edp>;
};
};
};
aux-bus {
panel {
compatible = "boe,nv133fhm-n62";
power-supply = <&pp3300_dx_edp>;
backlight = <&backlight>;
hpd-gpios = <&sn65dsi86_bridge 2 GPIO_ACTIVE_HIGH>;
port {
panel_in_edp: endpoint {
remote-endpoint = <&sn65dsi86_out>;
};
};
};
};
};
};
- |

View File

@ -0,0 +1,37 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/dp-aux-bus.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: DisplayPort AUX bus
maintainers:
- Douglas Anderson <dianders@chromium.org>
description:
DisplayPort controllers provide a control channel to the sinks that
are hooked up to them. This is the DP AUX bus. Over the DP AUX bus
we can query properties about a sink and also configure it. In
particular, DP sinks support DDC over DP AUX which allows tunneling
a standard I2C DDC connection over the AUX channel.
To model this relationship, DP sinks should be placed as children
of the DP controller under the "aux-bus" node.
At the moment, this binding only handles the eDP case. It is
possible it will be extended in the future to handle the DP case.
For DP, presumably a connector would be listed under the DP AUX
bus instead of a panel.
properties:
$nodename:
const: "aux-bus"
panel:
$ref: panel/panel-common.yaml#
additionalProperties: false
required:
- panel

View File

@ -7,7 +7,7 @@ channel output.
Required properties:
- compatible: "mediatek,<chip>-dsi"
- the supported chips are mt2701, mt7623, mt8173 and mt8183.
- the supported chips are mt2701, mt7623, mt8167, mt8173 and mt8183.
- reg: Physical base address and length of the controller's registers
- interrupts: The interrupt signal from the function block.
- clocks: device clocks

View File

@ -64,6 +64,18 @@ properties:
Indicates if the DSI controller is driving a panel which needs
2 DSI links.
assigned-clocks:
minItems: 2
maxItems: 2
description: |
Parents of "byte" and "pixel" for the given platform.
assigned-clock-parents:
minItems: 2
maxItems: 2
description: |
The Byte clock and Pixel clock PLL outputs provided by a DSI PHY block.
power-domains:
maxItems: 1
@ -119,6 +131,8 @@ required:
- clock-names
- phys
- phy-names
- assigned-clocks
- assigned-clock-parents
- power-domains
- operating-points-v2
- ports
@ -159,6 +173,9 @@ examples:
phys = <&dsi0_phy>;
phy-names = "dsi";
assigned-clocks = <&dispcc DISP_CC_MDSS_BYTE0_CLK_SRC>, <&dispcc DISP_CC_MDSS_PCLK0_CLK_SRC>;
assigned-clock-parents = <&dsi_phy 0>, <&dsi_phy 1>;
power-domains = <&rpmhpd SC7180_CX>;
operating-points-v2 = <&dsi_opp_table>;

View File

@ -0,0 +1,72 @@
# SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/msm/dsi-phy-7nm.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Qualcomm Display DSI 7nm PHY
maintainers:
- Jonathan Marek <jonathan@marek.ca>
allOf:
- $ref: dsi-phy-common.yaml#
properties:
compatible:
oneOf:
- const: qcom,dsi-phy-7nm
- const: qcom,dsi-phy-7nm-8150
- const: qcom,sc7280-dsi-phy-7nm
reg:
items:
- description: dsi phy register set
- description: dsi phy lane register set
- description: dsi pll register set
reg-names:
items:
- const: dsi_phy
- const: dsi_phy_lane
- const: dsi_pll
vdds-supply:
description: |
Connected to VDD_A_DSI_PLL_0P9 pin (or VDDA_DSI{0,1}_PLL_0P9 for sm8150)
phy-type:
description: D-PHY (default) or C-PHY mode
enum: [ 10, 11 ]
default: 10
required:
- compatible
- reg
- reg-names
- vdds-supply
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/clock/qcom,dispcc-sm8250.h>
#include <dt-bindings/clock/qcom,rpmh.h>
dsi-phy@ae94400 {
compatible = "qcom,dsi-phy-7nm";
reg = <0x0ae94400 0x200>,
<0x0ae94600 0x280>,
<0x0ae94900 0x260>;
reg-names = "dsi_phy",
"dsi_phy_lane",
"dsi_pll";
#clock-cells = <1>;
#phy-cells = <0>;
vdds-supply = <&vreg_l5a_0p88>;
clocks = <&dispcc DISP_CC_MDSS_AHB_CLK>,
<&rpmhcc RPMH_CXO_CLK>;
clock-names = "iface", "ref";
};

View File

@ -0,0 +1,78 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/ilitek,ili9341.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Ilitek-9341 Display Panel
maintainers:
- Dillon Min <dillon.minfei@gmail.com>
description: |
Ilitek ILI9341 TFT panel driver with SPI control bus
This is a driver for 320x240 TFT panels, accepting a rgb input
streams with 16 bits or 18 bits.
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
items:
- enum:
# ili9341 240*320 Color on stm32f429-disco board
- st,sf-tc240t-9370-t
- const: ilitek,ili9341
reg: true
dc-gpios:
maxItems: 1
description: Display data/command selection (D/CX) of this DBI panel
spi-3wire: true
spi-max-frequency:
const: 10000000
port: true
vci-supply:
description: Analog voltage supply (2.5 .. 3.3V)
vddi-supply:
description: Voltage supply for interface logic (1.65 .. 3.3 V)
vddi-led-supply:
description: Voltage supply for the LED driver (1.65 .. 3.3 V)
additionalProperties: false
required:
- compatible
- reg
- dc-gpios
- port
examples:
- |+
spi {
#address-cells = <1>;
#size-cells = <0>;
panel: display@0 {
compatible = "st,sf-tc240t-9370-t",
"ilitek,ili9341";
reg = <0>;
spi-3wire;
spi-max-frequency = <10000000>;
dc-gpios = <&gpiod 13 0>;
port {
panel_in: endpoint {
remote-endpoint = <&display_out>;
};
};
};
};
...

View File

@ -0,0 +1,62 @@
# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/innolux,ej030na.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Innolux EJ030NA 3.0" (320x480 pixels) 24-bit TFT LCD panel
description: |
The panel must obey the rules for a SPI slave device as specified in
spi/spi-controller.yaml
maintainers:
- Paul Cercueil <paul@crapouillou.net>
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
const: innolux,ej030na
backlight: true
port: true
power-supply: true
reg: true
reset-gpios: true
required:
- compatible
- reg
- power-supply
- reset-gpios
unevaluatedProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
spi {
#address-cells = <1>;
#size-cells = <0>;
panel@0 {
compatible = "innolux,ej030na";
reg = <0>;
spi-max-frequency = <10000000>;
reset-gpios = <&gpe 4 GPIO_ACTIVE_LOW>;
power-supply = <&lcd_power>;
backlight = <&backlight>;
port {
panel_input: endpoint {
remote-endpoint = <&panel_output>;
};
};
};
};

View File

@ -46,9 +46,13 @@ properties:
# AU Optronics Corporation 11.6" HD (1366x768) color TFT-LCD panel
- auo,b116xw03
# AU Optronics Corporation 13.3" FHD (1920x1080) color TFT-LCD panel
- auo,b133han05
# AU Optronics Corporation 13.3" FHD (1920x1080) color TFT-LCD panel
- auo,b133htn01
# AU Optronics Corporation 13.3" WXGA (1366x768) TFT LCD panel
- auo,b133xtn01
# AU Optronics Corporation 14.0" FHD (1920x1080) color TFT-LCD panel
- auo,b140han06
# AU Optronics Corporation 7.0" FHD (800 x 480) TFT LCD panel
- auo,g070vvn01
# AU Optronics Corporation 10.1" (1280x800) color TFT LCD panel
@ -110,6 +114,9 @@ properties:
# Emerging Display Technology Corp. 5.7" VGA TFT LCD panel
- edt,et057090dhu
- edt,et070080dh6
# Emerging Display Technology Corp. 3.5" WVGA TFT LCD panel with
# capacitive multitouch
- edt,etm0350g0dh6
# Emerging Display Technology Corp. 480x272 TFT Display with capacitive touch
- edt,etm043080dh6gp
# Emerging Display Technology Corp. 480x272 TFT Display
@ -128,6 +135,11 @@ properties:
# Emerging Display Technology Corp. WVGA TFT Display with capacitive touch
- edt,etm0700g0dh6
- edt,etm0700g0edh6
# Emerging Display Technology Corp. 5.7" VGA TFT LCD panel with
# capacitive touch
- edt,etmv570g2dhu
# E Ink VB3300-KCA
- eink,vb3300-kca
# Evervision Electronics Co. Ltd. VGG804821 5.0" WVGA TFT LCD Panel
- evervision,vgg804821
# Foxlink Group 5" WVGA TFT LCD panel
@ -202,8 +214,14 @@ properties:
- logictechno,lt161010-2nhr
# Logic Technologies LT170410-2WHC 10.1" 1280x800 IPS TFT Cap Touch Mod.
- logictechno,lt170410-2whc
# Logic Technologies LTTD800x480 L2RT 7" 800x480 TFT Resistive Touch Module
- logictechno,lttd800480070-l2rt
# Logic Technologies LTTD800480070-L6WH-RT 7” 800x480 TFT Resistive Touch Module
- logictechno,lttd800480070-l6wh-rt
# Mitsubishi "AA070MC01 7.0" WVGA TFT LCD panel
- mitsubishi,aa070mc01-ca1
# Multi-Inno Technology Co.,Ltd MI1010AIT-1CP 10.1" 1280x800 LVDS IPS Cap Touch Mod.
- multi-inno,mi1010ait-1cp
# NEC LCD Technologies, Ltd. 12.1" WXGA (1280x800) LVDS TFT LCD panel
- nec,nl12880bc20-05
# NEC LCD Technologies,Ltd. WQVGA TFT LCD panel
@ -238,10 +256,14 @@ properties:
- powertip,ph800480t013-idf02
# QiaoDian XianShi Corporation 4"3 TFT LCD panel
- qiaodian,qd43003c0-40
# Shenzhen QiShenglong Industrialist Co., Ltd. Gopher 2b 4.3" 480(RGB)x272 TFT LCD panel
- qishenglong,gopher2b-lcd
# Rocktech Displays Ltd. RK101II01D-CT 10.1" TFT 1280x800
- rocktech,rk101ii01d-ct
# Rocktech Display Ltd. RK070ER9427 800(RGB)x480 TFT LCD panel
- rocktech,rk070er9427
# Samsung 13.3" FHD (1920x1080 pixels) eDP AMOLED panel
- samsung,atna33xc20
# Samsung 12.2" (2560x1600 pixels) TFT LCD panel
- samsung,lsn122dl01-c01
# Samsung Electronics 10.1" WSVGA TFT LCD panel
@ -298,6 +320,8 @@ properties:
enable-gpios: true
port: true
power-supply: true
no-hpd: true
hpd-gpios: true
additionalProperties: false

View File

@ -0,0 +1,99 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/panel/samsung,lms380kf01.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Samsung LMS380KF01 display panel
description: The LMS380KF01 is a 480x800 DPI display panel from Samsung Mobile
Displays (SMD) utilizing the WideChips WS2401 display controller. It can be
used with internal or external backlight control.
The panel must obey the rules for a SPI slave device as specified in
spi/spi-controller.yaml
maintainers:
- Linus Walleij <linus.walleij@linaro.org>
allOf:
- $ref: panel-common.yaml#
properties:
compatible:
const: samsung,lms380kf01
reg: true
interrupts:
description: provides an optional ESD (electrostatic discharge)
interrupt that signals abnormalities in the display hardware.
This can also be raised for other reasons like erroneous
configuration.
maxItems: 1
reset-gpios: true
vci-supply:
description: regulator that supplies the VCI analog voltage
usually around 3.0 V
vccio-supply:
description: regulator that supplies the VCCIO voltage usually
around 1.8 V
backlight: true
spi-cpha: true
spi-cpol: true
spi-max-frequency:
maximum: 1200000
port: true
required:
- compatible
- reg
- spi-cpha
- spi-cpol
- port
additionalProperties: false
examples:
- |
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/interrupt-controller/irq.h>
spi {
compatible = "spi-gpio";
sck-gpios = <&gpio 0 GPIO_ACTIVE_HIGH>;
miso-gpios = <&gpio 1 GPIO_ACTIVE_HIGH>;
mosi-gpios = <&gpio 2 GPIO_ACTIVE_HIGH>;
cs-gpios = <&gpio 3 GPIO_ACTIVE_HIGH>;
num-chipselects = <1>;
#address-cells = <1>;
#size-cells = <0>;
panel@0 {
compatible = "samsung,lms380kf01";
spi-max-frequency = <1200000>;
spi-cpha;
spi-cpol;
reg = <0>;
vci-supply = <&lcd_3v0_reg>;
vccio-supply = <&lcd_1v8_reg>;
reset-gpios = <&gpio 4 GPIO_ACTIVE_LOW>;
interrupt-parent = <&gpio>;
interrupts = <5 IRQ_TYPE_EDGE_RISING>;
port {
panel_in: endpoint {
remote-endpoint = <&display_out>;
};
};
};
};
...

View File

@ -33,8 +33,11 @@ properties:
backlight: true
spi-cpha: true
spi-cpol: true
spi-max-frequency:
$ref: /schemas/types.yaml#/definitions/uint32
description: inherited as a SPI client node, the datasheet specifies
maximum 300 ns minimum cycle which gives around 3 MHz max frequency
maximum: 3000000
@ -44,6 +47,9 @@ properties:
required:
- compatible
- reg
- spi-cpha
- spi-cpol
- port
additionalProperties: false
@ -52,15 +58,23 @@ examples:
#include <dt-bindings/gpio/gpio.h>
spi {
compatible = "spi-gpio";
sck-gpios = <&gpio 0 GPIO_ACTIVE_HIGH>;
miso-gpios = <&gpio 1 GPIO_ACTIVE_HIGH>;
mosi-gpios = <&gpio 2 GPIO_ACTIVE_HIGH>;
cs-gpios = <&gpio 3 GPIO_ACTIVE_HIGH>;
num-chipselects = <1>;
#address-cells = <1>;
#size-cells = <0>;
panel@0 {
compatible = "samsung,lms397kf04";
spi-max-frequency = <3000000>;
spi-cpha;
spi-cpol;
reg = <0>;
vci-supply = <&lcd_3v0_reg>;
vccio-supply = <&lcd_1v8_reg>;
reset-gpios = <&gpio 1 GPIO_ACTIVE_LOW>;
reset-gpios = <&gpio 4 GPIO_ACTIVE_LOW>;
backlight = <&ktd259>;
port {

View File

@ -23,6 +23,7 @@ Required properties:
Optional properties:
- phys: from general PHY binding: the phandle for the PHY device.
- phy-names: Should be "dphy" if phys references an external phy.
- #phy-cells: Defined when used as ISP phy, should be 0.
- power-domains: a phandle to mipi dsi power domain node.
- resets: list of phandle + reset specifier pairs, as described in [3].
- reset-names: string reset name, must be "apb".

View File

@ -0,0 +1,208 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/display/solomon,ssd1307fb.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Solomon SSD1307 OLED Controller Framebuffer
maintainers:
- Maxime Ripard <mripard@kernel.org>
properties:
compatible:
enum:
- solomon,ssd1305fb-i2c
- solomon,ssd1306fb-i2c
- solomon,ssd1307fb-i2c
- solomon,ssd1309fb-i2c
reg:
maxItems: 1
pwms:
maxItems: 1
reset-gpios:
maxItems: 1
vbat-supply:
description: The supply for VBAT
solomon,height:
$ref: /schemas/types.yaml#/definitions/uint32
default: 16
description:
Height in pixel of the screen driven by the controller
solomon,width:
$ref: /schemas/types.yaml#/definitions/uint32
default: 96
description:
Width in pixel of the screen driven by the controller
solomon,page-offset:
$ref: /schemas/types.yaml#/definitions/uint32
default: 1
description:
Offset of pages (band of 8 pixels) that the screen is mapped to
solomon,segment-no-remap:
type: boolean
description:
Display needs normal (non-inverted) data column to segment mapping
solomon,col-offset:
$ref: /schemas/types.yaml#/definitions/uint32
default: 0
description:
Offset of columns (COL/SEG) that the screen is mapped to
solomon,com-seq:
type: boolean
description:
Display uses sequential COM pin configuration
solomon,com-lrremap:
type: boolean
description:
Display uses left-right COM pin remap
solomon,com-invdir:
type: boolean
description:
Display uses inverted COM pin scan direction
solomon,com-offset:
$ref: /schemas/types.yaml#/definitions/uint32
default: 0
description:
Number of the COM pin wired to the first display line
solomon,prechargep1:
$ref: /schemas/types.yaml#/definitions/uint32
default: 2
description:
Length of deselect period (phase 1) in clock cycles
solomon,prechargep2:
$ref: /schemas/types.yaml#/definitions/uint32
default: 2
description:
Length of precharge period (phase 2) in clock cycles. This needs to be
the higher, the higher the capacitance of the OLED's pixels is.
solomon,dclk-div:
$ref: /schemas/types.yaml#/definitions/uint32
minimum: 1
maximum: 16
description:
Clock divisor. The default value is controller-dependent.
solomon,dclk-frq:
$ref: /schemas/types.yaml#/definitions/uint32
minimum: 0
maximum: 15
description:
Clock frequency, higher value means higher frequency.
The default value is controller-dependent.
solomon,lookup-table:
$ref: /schemas/types.yaml#/definitions/uint8-array
maxItems: 4
description:
8 bit value array of current drive pulse widths for BANK0, and colors A,
B, and C. Each value in range of 31 to 63 for pulse widths of 32 to 64.
Color D is always width 64.
solomon,area-color-enable:
type: boolean
description:
Display uses color mode
solomon,low-power:
type: boolean
description:
Display runs in low power mode
required:
- compatible
- reg
allOf:
- if:
properties:
compatible:
contains:
const: solomon,ssd1305fb-i2c
then:
properties:
solomon,dclk-div:
default: 1
solomon,dclk-frq:
default: 7
- if:
properties:
compatible:
contains:
const: solomon,ssd1306fb-i2c
then:
properties:
solomon,dclk-div:
default: 1
solomon,dclk-frq:
default: 8
- if:
properties:
compatible:
contains:
const: solomon,ssd1307fb-i2c
then:
properties:
solomon,dclk-div:
default: 2
solomon,dclk-frq:
default: 12
required:
- pwms
- if:
properties:
compatible:
contains:
const: solomon,ssd1309fb-i2c
then:
properties:
solomon,dclk-div:
default: 1
solomon,dclk-frq:
default: 10
additionalProperties: false
examples:
- |
i2c1 {
#address-cells = <1>;
#size-cells = <0>;
ssd1307: oled@3c {
compatible = "solomon,ssd1307fb-i2c";
reg = <0x3c>;
pwms = <&pwm 4 3000>;
reset-gpios = <&gpio2 7>;
};
ssd1306: oled@3d {
compatible = "solomon,ssd1306fb-i2c";
reg = <0x3c>;
pwms = <&pwm 4 3000>;
reset-gpios = <&gpio2 7>;
solomon,com-lrremap;
solomon,com-invdir;
solomon,com-offset = <32>;
solomon,lookup-table = /bits/ 8 <0x3f 0x3f 0x3f 0x3f>;
};
};

View File

@ -1,60 +0,0 @@
* Solomon SSD1307 Framebuffer Driver
Required properties:
- compatible: Should be "solomon,<chip>fb-<bus>". The only supported bus for
now is i2c, and the supported chips are ssd1305, ssd1306, ssd1307 and
ssd1309.
- reg: Should contain address of the controller on the I2C bus. Most likely
0x3c or 0x3d
- pwm: Should contain the pwm to use according to the OF device tree PWM
specification [0]. Only required for the ssd1307.
- solomon,height: Height in pixel of the screen driven by the controller
- solomon,width: Width in pixel of the screen driven by the controller
- solomon,page-offset: Offset of pages (band of 8 pixels) that the screen is
mapped to.
Optional properties:
- reset-gpios: The GPIO used to reset the OLED display, if available. See
Documentation/devicetree/bindings/gpio/gpio.txt for details.
- vbat-supply: The supply for VBAT
- solomon,segment-no-remap: Display needs normal (non-inverted) data column
to segment mapping
- solomon,col-offset: Offset of columns (COL/SEG) that the screen is mapped to.
- solomon,com-seq: Display uses sequential COM pin configuration
- solomon,com-lrremap: Display uses left-right COM pin remap
- solomon,com-invdir: Display uses inverted COM pin scan direction
- solomon,com-offset: Number of the COM pin wired to the first display line
- solomon,prechargep1: Length of deselect period (phase 1) in clock cycles.
- solomon,prechargep2: Length of precharge period (phase 2) in clock cycles.
This needs to be the higher, the higher the capacitance
of the OLED's pixels is
- solomon,dclk-div: Clock divisor 1 to 16
- solomon,dclk-frq: Clock frequency 0 to 15, higher value means higher
frequency
- solomon,lookup-table: 8 bit value array of current drive pulse widths for
BANK0, and colors A, B, and C. Each value in range
of 31 to 63 for pulse widths of 32 to 64. Color D
is always width 64.
- solomon,area-color-enable: Display uses color mode
- solomon,low-power. Display runs in low power mode
[0]: Documentation/devicetree/bindings/pwm/pwm.txt
Examples:
ssd1307: oled@3c {
compatible = "solomon,ssd1307fb-i2c";
reg = <0x3c>;
pwms = <&pwm 4 3000>;
reset-gpios = <&gpio2 7>;
};
ssd1306: oled@3c {
compatible = "solomon,ssd1306fb-i2c";
reg = <0x3c>;
pwms = <&pwm 4 3000>;
reset-gpios = <&gpio2 7>;
solomon,com-lrremap;
solomon,com-invdir;
solomon,com-offset = <32>;
solomon,lookup-table = /bits/ 8 <0x3f 0x3f 0x3f 0x3f>;
};

View File

@ -341,6 +341,8 @@ patternProperties:
description: eGalax_eMPIA Technology Inc
"^einfochips,.*":
description: Einfochips
"^eink,.*":
description: E Ink Corporation
"^elan,.*":
description: Elan Microelectronic Corp.
"^element14,.*":
@ -938,6 +940,8 @@ patternProperties:
description: Chengdu Kaixuan Information Technology Co., Ltd.
"^qiaodian,.*":
description: QiaoDian XianShi Corporation
"^qishenglong,.*":
description: Shenzhen QiShenglong Industrialist Co., Ltd.
"^qnap,.*":
description: QNAP Systems, Inc.
"^radxa,.*":

View File

@ -88,6 +88,9 @@ consider though:
- The DMA buffer FD is also pollable, see `Implicit Fence Poll Support`_ below for
details.
- The DMA buffer FD also supports a few dma-buf-specific ioctls, see
`DMA Buffer ioctls`_ below for details.
Basic Operation and Device DMA Access
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -106,6 +109,16 @@ Implicit Fence Poll Support
.. kernel-doc:: drivers/dma-buf/dma-buf.c
:doc: implicit fence polling
DMA-BUF statistics
~~~~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/dma-buf/dma-buf-sysfs-stats.c
:doc: overview
DMA Buffer ioctls
~~~~~~~~~~~~~~~~~
.. kernel-doc:: include/uapi/linux/dma-buf.h
Kernel Functions and Structures Reference
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

View File

@ -90,7 +90,7 @@ Bug reports
-----------
Thermal management on Nouveau is new and may not work on all cards. If you have
inquiries, please ping mupuf on IRC (#nouveau, freenode).
inquiries, please ping mupuf on IRC (#nouveau, OFTC).
Bug reports should be filled on Freedesktop's bug tracker. Please follow
https://nouveau.freedesktop.org/wiki/Bugs

View File

@ -159,6 +159,8 @@ KMS Core Structures and Functions
.. kernel-doc:: drivers/gpu/drm/drm_mode_config.c
:export:
.. _kms_base_object_abstraction:
Modeset Base Object Abstraction
===============================
@ -463,6 +465,35 @@ KMS Properties
This section of the documentation is primarily aimed at user-space developers.
For the driver APIs, see the other sections.
Requirements
------------
KMS drivers might need to add extra properties to support new features. Each
new property introduced in a driver needs to meet a few requirements, in
addition to the one mentioned above:
* It must be standardized, documenting:
* The full, exact, name string;
* If the property is an enum, all the valid value name strings;
* What values are accepted, and what these values mean;
* What the property does and how it can be used;
* How the property might interact with other, existing properties.
* It must provide a generic helper in the core code to register that
property on the object it attaches to.
* Its content must be decoded by the core and provided in the object's
associated state structure. That includes anything drivers might want
to precompute, like struct drm_clip_rect for planes.
* Its initial state must match the behavior prior to the property
introduction. This might be a fixed value matching what the hardware
does, or it may be inherited from the state the firmware left the
system in during boot.
* An IGT test must be submitted where reasonable.
Property Types and Blob Property Support
----------------------------------------
@ -508,8 +539,8 @@ Plane Composition Properties
Damage Tracking Properties
--------------------------
.. kernel-doc:: drivers/gpu/drm/drm_damage_helper.c
:doc: overview
.. kernel-doc:: drivers/gpu/drm/drm_plane.c
:doc: damage tracking
Color Management Properties
---------------------------

View File

@ -37,6 +37,15 @@ Primary Nodes, DRM Master and Authentication
.. kernel-doc:: include/drm/drm_auth.h
:internal:
.. _drm_leasing:
DRM Display Resource Leasing
============================
.. kernel-doc:: drivers/gpu/drm/drm_lease.c
:doc: drm leasing
Open-Source Userspace Requirements
==================================
@ -457,6 +466,19 @@ Userspace API Structures
.. kernel-doc:: include/uapi/drm/drm_mode.h
:doc: overview
.. _crtc_index:
CRTC index
----------
CRTC's have both an object ID and an index, and they are not the same thing.
The index is used in cases where a densely packed identifier for a CRTC is
needed, for instance a bitmask of CRTC's. The member possible_crtcs of struct
drm_mode_get_plane is an example.
DRM_IOCTL_MODE_GETRESOURCES populates a structure with an array of CRTC ID's,
and the CRTC index is its position in this array.
.. kernel-doc:: include/uapi/drm/drm.h
:internal:

View File

@ -422,9 +422,16 @@ Batchbuffer Parsing
User Batchbuffer Execution
--------------------------
.. kernel-doc:: drivers/gpu/drm/i915/gem/i915_gem_context_types.h
.. kernel-doc:: drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
:doc: User command execution
Scheduling
----------
.. kernel-doc:: drivers/gpu/drm/i915/i915_scheduler_types.h
:functions: i915_sched_engine
Logical Rings, Logical Ring Contexts and Execlists
--------------------------------------------------
@ -518,6 +525,14 @@ GuC-based command submission
.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
:doc: GuC-based command submission
GuC ABI
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/abi/guc_messages_abi.h
.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/abi/guc_communication_mmio_abi.h
.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/abi/guc_communication_ctb_abi.h
.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
HuC
---
.. kernel-doc:: drivers/gpu/drm/i915/gt/uc/intel_huc.c

View File

@ -0,0 +1,122 @@
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2021 Intel Corporation
*/
#define I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT 2 /* see i915_context_engines_parallel_submit */
/**
* struct drm_i915_context_engines_parallel_submit - Configure engine for
* parallel submission.
*
* Setup a slot in the context engine map to allow multiple BBs to be submitted
* in a single execbuf IOCTL. Those BBs will then be scheduled to run on the GPU
* in parallel. Multiple hardware contexts are created internally in the i915
* run these BBs. Once a slot is configured for N BBs only N BBs can be
* submitted in each execbuf IOCTL and this is implicit behavior e.g. The user
* doesn't tell the execbuf IOCTL there are N BBs, the execbuf IOCTL knows how
* many BBs there are based on the slot's configuration. The N BBs are the last
* N buffer objects or first N if I915_EXEC_BATCH_FIRST is set.
*
* The default placement behavior is to create implicit bonds between each
* context if each context maps to more than 1 physical engine (e.g. context is
* a virtual engine). Also we only allow contexts of same engine class and these
* contexts must be in logically contiguous order. Examples of the placement
* behavior described below. Lastly, the default is to not allow BBs to
* preempted mid BB rather insert coordinated preemption on all hardware
* contexts between each set of BBs. Flags may be added in the future to change
* both of these default behaviors.
*
* Returns -EINVAL if hardware context placement configuration is invalid or if
* the placement configuration isn't supported on the platform / submission
* interface.
* Returns -ENODEV if extension isn't supported on the platform / submission
* interface.
*
* .. code-block:: none
*
* Example 1 pseudo code:
* CS[X] = generic engine of same class, logical instance X
* INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
* set_engines(INVALID)
* set_parallel(engine_index=0, width=2, num_siblings=1,
* engines=CS[0],CS[1])
*
* Results in the following valid placement:
* CS[0], CS[1]
*
* Example 2 pseudo code:
* CS[X] = generic engine of same class, logical instance X
* INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
* set_engines(INVALID)
* set_parallel(engine_index=0, width=2, num_siblings=2,
* engines=CS[0],CS[2],CS[1],CS[3])
*
* Results in the following valid placements:
* CS[0], CS[1]
* CS[2], CS[3]
*
* This can also be thought of as 2 virtual engines described by 2-D array
* in the engines the field with bonds placed between each index of the
* virtual engines. e.g. CS[0] is bonded to CS[1], CS[2] is bonded to
* CS[3].
* VE[0] = CS[0], CS[2]
* VE[1] = CS[1], CS[3]
*
* Example 3 pseudo code:
* CS[X] = generic engine of same class, logical instance X
* INVALID = I915_ENGINE_CLASS_INVALID, I915_ENGINE_CLASS_INVALID_NONE
* set_engines(INVALID)
* set_parallel(engine_index=0, width=2, num_siblings=2,
* engines=CS[0],CS[1],CS[1],CS[3])
*
* Results in the following valid and invalid placements:
* CS[0], CS[1]
* CS[1], CS[3] - Not logical contiguous, return -EINVAL
*/
struct drm_i915_context_engines_parallel_submit {
/**
* @base: base user extension.
*/
struct i915_user_extension base;
/**
* @engine_index: slot for parallel engine
*/
__u16 engine_index;
/**
* @width: number of contexts per parallel engine
*/
__u16 width;
/**
* @num_siblings: number of siblings per context
*/
__u16 num_siblings;
/**
* @mbz16: reserved for future use; must be zero
*/
__u16 mbz16;
/**
* @flags: all undefined flags must be zero, currently not defined flags
*/
__u64 flags;
/**
* @mbz64: reserved for future use; must be zero
*/
__u64 mbz64[3];
/**
* @engines: 2-d array of engine instances to configure parallel engine
*
* length = width (i) * num_siblings (j)
* index = j + i * num_siblings
*/
struct i915_engine_class_instance engines[0];
} __packed;

View File

@ -0,0 +1,148 @@
=========================================
I915 GuC Submission/DRM Scheduler Section
=========================================
Upstream plan
=============
For upstream the overall plan for landing GuC submission and integrating the
i915 with the DRM scheduler is:
* Merge basic GuC submission
* Basic submission support for all gen11+ platforms
* Not enabled by default on any current platforms but can be enabled via
modparam enable_guc
* Lots of rework will need to be done to integrate with DRM scheduler so
no need to nit pick everything in the code, it just should be
functional, no major coding style / layering errors, and not regress
execlists
* Update IGTs / selftests as needed to work with GuC submission
* Enable CI on supported platforms for a baseline
* Rework / get CI heathly for GuC submission in place as needed
* Merge new parallel submission uAPI
* Bonding uAPI completely incompatible with GuC submission, plus it has
severe design issues in general, which is why we want to retire it no
matter what
* New uAPI adds I915_CONTEXT_ENGINES_EXT_PARALLEL context setup step
which configures a slot with N contexts
* After I915_CONTEXT_ENGINES_EXT_PARALLEL a user can submit N batches to
a slot in a single execbuf IOCTL and the batches run on the GPU in
paralllel
* Initially only for GuC submission but execlists can be supported if
needed
* Convert the i915 to use the DRM scheduler
* GuC submission backend fully integrated with DRM scheduler
* All request queues removed from backend (e.g. all backpressure
handled in DRM scheduler)
* Resets / cancels hook in DRM scheduler
* Watchdog hooks into DRM scheduler
* Lots of complexity of the GuC backend can be pulled out once
integrated with DRM scheduler (e.g. state machine gets
simplier, locking gets simplier, etc...)
* Execlists backend will minimum required to hook in the DRM scheduler
* Legacy interface
* Features like timeslicing / preemption / virtual engines would
be difficult to integrate with the DRM scheduler and these
features are not required for GuC submission as the GuC does
these things for us
* ROI low on fully integrating into DRM scheduler
* Fully integrating would add lots of complexity to DRM
scheduler
* Port i915 priority inheritance / boosting feature in DRM scheduler
* Used for i915 page flip, may be useful to other DRM drivers as
well
* Will be an optional feature in the DRM scheduler
* Remove in-order completion assumptions from DRM scheduler
* Even when using the DRM scheduler the backends will handle
preemption, timeslicing, etc... so it is possible for jobs to
finish out of order
* Pull out i915 priority levels and use DRM priority levels
* Optimize DRM scheduler as needed
TODOs for GuC submission upstream
=================================
* Need an update to GuC firmware / i915 to enable error state capture
* Open source tool to decode GuC logs
* Public GuC spec
New uAPI for basic GuC submission
=================================
No major changes are required to the uAPI for basic GuC submission. The only
change is a new scheduler attribute: I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP.
This attribute indicates the 2k i915 user priority levels are statically mapped
into 3 levels as follows:
* -1k to -1 Low priority
* 0 Medium priority
* 1 to 1k High priority
This is needed because the GuC only has 4 priority bands. The highest priority
band is reserved with the kernel. This aligns with the DRM scheduler priority
levels too.
Spec references:
----------------
* https://www.khronos.org/registry/EGL/extensions/IMG/EGL_IMG_context_priority.txt
* https://www.khronos.org/registry/vulkan/specs/1.2-extensions/html/chap5.html#devsandqueues-priority
* https://spec.oneapi.com/level-zero/latest/core/api.html#ze-command-queue-priority-t
New parallel submission uAPI
============================
The existing bonding uAPI is completely broken with GuC submission because
whether a submission is a single context submit or parallel submit isn't known
until execbuf time activated via the I915_SUBMIT_FENCE. To submit multiple
contexts in parallel with the GuC the context must be explicitly registered with
N contexts and all N contexts must be submitted in a single command to the GuC.
The GuC interfaces do not support dynamically changing between N contexts as the
bonding uAPI does. Hence the need for a new parallel submission interface. Also
the legacy bonding uAPI is quite confusing and not intuitive at all. Furthermore
I915_SUBMIT_FENCE is by design a future fence, so not really something we should
continue to support.
The new parallel submission uAPI consists of 3 parts:
* Export engines logical mapping
* A 'set_parallel' extension to configure contexts for parallel
submission
* Extend execbuf2 IOCTL to support submitting N BBs in a single IOCTL
Export engines logical mapping
------------------------------
Certain use cases require BBs to be placed on engine instances in logical order
(e.g. split-frame on gen11+). The logical mapping of engine instances can change
based on fusing. Rather than making UMDs be aware of fusing, simply expose the
logical mapping with the existing query engine info IOCTL. Also the GuC
submission interface currently only supports submitting multiple contexts to
engines in logical order which is a new requirement compared to execlists.
Lastly, all current platforms have at most 2 engine instances and the logical
order is the same as uAPI order. This will change on platforms with more than 2
engine instances.
A single bit will be added to drm_i915_engine_info.flags indicating that the
logical instance has been returned and a new field,
drm_i915_engine_info.logical_instance, returns the logical instance.
A 'set_parallel' extension to configure contexts for parallel submission
------------------------------------------------------------------------
The 'set_parallel' extension configures a slot for parallel submission of N BBs.
It is a setup step that must be called before using any of the contexts. See
I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE or I915_CONTEXT_ENGINES_EXT_BOND for
similar existing examples. Once a slot is configured for parallel submission the
execbuf2 IOCTL can be called submitting N BBs in a single IOCTL. Initially only
supports GuC submission. Execlists supports can be added later if needed.
Add I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT and
drm_i915_context_engines_parallel_submit to the uAPI to implement this
extension.
.. kernel-doc:: Documentation/gpu/rfc/i915_parallel_execbuf.h
:functions: drm_i915_context_engines_parallel_submit
Extend execbuf2 IOCTL to support submitting N BBs in a single IOCTL
-------------------------------------------------------------------
Contexts that have been configured with the 'set_parallel' extension can only
submit N BBs in a single execbuf2 IOCTL. The BBs are either the last N objects
in the drm_i915_gem_exec_object2 list or the first N if I915_EXEC_BATCH_FIRST is
set. The number of BBs is implicit based on the slot submitted and how it has
been configured by 'set_parallel' or other extensions. No uAPI changes are
required to the execbuf2 IOCTL.

View File

@ -19,3 +19,7 @@ host such documentation:
.. toctree::
i915_gem_lmem.rst
.. toctree::
i915_scheduler.rst

View File

@ -98,9 +98,17 @@ with VKMS maintainers.
IGT better support
------------------
- Investigate: (1) test cases on kms_plane that are failing due to timeout on
capturing CRC; (2) when running kms_flip test cases in sequence, some
successful individual test cases are failing randomly.
Debugging:
- kms_plane: some test cases are failing due to timeout on capturing CRC;
- kms_flip: when running test cases in sequence, some successful individual
test cases are failing randomly; when individually, some successful test
cases display in the log the following error::
[drm:vkms_prepare_fb [vkms]] ERROR vmap failed: -4
Virtual hardware (vblank-less) mode:
- VKMS already has support for vblanks simulated via hrtimers, which can be
tested with kms_flip test; in some way, we can say that VKMS already mimics
@ -116,7 +124,17 @@ Add Plane Features
There's lots of plane features we could add support for:
- Real overlay planes, not just cursor.
- Multiple overlay planes. [Good to get started]
- Clearing primary plane: clear primary plane before plane composition (at the
start) for correctness of pixel blend ops. It also guarantees alpha channel
is cleared in the target buffer for stable crc. [Good to get started]
- ARGB format on primary plane: blend the primary plane into background with
translucent alpha.
- Support when the primary plane isn't exactly matching the output size: blend
the primary plane into the black background.
- Full alpha blending on all planes.
@ -129,13 +147,8 @@ There's lots of plane features we could add support for:
cursor api).
For all of these, we also want to review the igt test coverage and make sure
all relevant igt testcases work on vkms.
Prime Buffer Sharing
--------------------
- Syzbot report - WARNING in vkms_gem_free_object:
https://syzkaller.appspot.com/bug?extid=e7ad70d406e74d8fc9d0
all relevant igt testcases work on vkms. They are good options for internship
project.
Runtime Configuration
---------------------
@ -153,7 +166,7 @@ module. Use/Test-cases:
the refresh rate.
The currently proposed solution is to expose vkms configuration through
configfs. All existing module options should be supported through configfs
configfs. All existing module options should be supported through configfs
too.
Writeback support
@ -162,6 +175,7 @@ Writeback support
- The writeback and CRC capture operations share the use of composer_enabled
boolean to ensure vblanks. Probably, when these operations work together,
composer_enabled needs to refcounting the composer state to proper work.
[Good to get started]
- Add support for cloned writeback outputs and related test cases using a
cloned output in the IGT kms_writeback.

View File

@ -1703,7 +1703,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
W: https://asahilinux.org
B: https://github.com/AsahiLinux/linux/issues
C: irc://chat.freenode.net/asahi-dev
C: irc://irc.oftc.net/asahi-dev
T: git https://github.com/AsahiLinux/linux.git
F: Documentation/devicetree/bindings/arm/apple.yaml
F: Documentation/devicetree/bindings/interrupt-controller/apple,aic.yaml
@ -5799,7 +5799,7 @@ M: Gerd Hoffmann <kraxel@redhat.com>
L: virtualization@lists.linux-foundation.org
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/gpu/drm/bochs/
F: drivers/gpu/drm/tiny/bochs.c
DRM DRIVER FOR BOE HIMAX8279D PANELS
M: Jerry Han <hanxu5@huaqin.corp-partner.google.com>
@ -5984,6 +5984,13 @@ S: Maintained
F: Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml
F: drivers/gpu/drm/panel/panel-raydium-rm67191.c
DRM DRIVER FOR SAMSUNG DB7430 PANELS
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml
F: drivers/gpu/drm/panel/panel-samsung-db7430.c
DRM DRIVER FOR SITRONIX ST7703 PANELS
M: Guido Günther <agx@sigxcpu.org>
R: Purism Kernel Team <kernel@puri.sm>
@ -6082,21 +6089,27 @@ F: drivers/gpu/drm/vboxvideo/
DRM DRIVER FOR VMWARE VIRTUAL GPU
M: "VMware Graphics" <linux-graphics-maintainer@vmware.com>
M: Roland Scheidegger <sroland@vmware.com>
M: Zack Rusin <zackr@vmware.com>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://people.freedesktop.org/~sroland/linux
T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/gpu/drm/vmwgfx/
F: include/uapi/drm/vmwgfx_drm.h
DRM DRIVER FOR WIDECHIPS WS2401 PANELS
M: Linus Walleij <linus.walleij@linaro.org>
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
F: Documentation/devicetree/bindings/display/panel/samsung,lms380kf01.yaml
F: drivers/gpu/drm/panel/panel-widechips-ws2401.c
DRM DRIVERS
M: David Airlie <airlied@linux.ie>
M: Daniel Vetter <daniel@ffwll.ch>
L: dri-devel@lists.freedesktop.org
S: Maintained
B: https://gitlab.freedesktop.org/drm
C: irc://chat.freenode.net/dri-devel
C: irc://irc.oftc.net/dri-devel
T: git git://anongit.freedesktop.org/drm/drm
F: Documentation/devicetree/bindings/display/
F: Documentation/devicetree/bindings/gpu/
@ -9288,7 +9301,7 @@ S: Supported
W: https://01.org/linuxgraphics/
Q: http://patchwork.freedesktop.org/project/intel-gfx/
B: https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
C: irc://chat.freenode.net/intel-gfx
C: irc://irc.oftc.net/intel-gfx
T: git git://anongit.freedesktop.org/drm-intel
F: Documentation/gpu/i915.rst
F: drivers/gpu/drm/i915/

View File

@ -17,6 +17,7 @@
#ifdef CONFIG_EFI
void efi_init(void);
extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
@ -52,10 +53,6 @@ void efi_virtmap_unload(void);
struct screen_info *alloc_screen_info(void);
void free_screen_info(struct screen_info *si);
static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
{
}
/*
* A reasonable upper bound for the uncompressed kernel size is 32 MBytes,
* so we will reserve that amount of memory. We have no easy way to tell what

View File

@ -255,21 +255,6 @@
#sound-dai-cells = <0>;
};
panel: panel {
/* Compatible will be filled in per-board */
power-supply = <&pp3300_dx_edp>;
backlight = <&backlight>;
hpd-gpios = <&sn65dsi86_bridge 2 GPIO_ACTIVE_HIGH>;
ports {
port {
panel_in_edp: endpoint {
remote-endpoint = <&sn65dsi86_out>;
};
};
};
};
pwmleds {
compatible = "pwm-leds";
keyboard_backlight: keyboard-backlight {
@ -666,6 +651,21 @@ edp_brij_i2c: &i2c2 {
};
};
};
aux-bus {
panel: panel {
/* Compatible will be filled in per-board */
power-supply = <&pp3300_dx_edp>;
backlight = <&backlight>;
hpd-gpios = <&sn65dsi86_bridge 2 GPIO_ACTIVE_HIGH>;
port {
panel_in_edp: endpoint {
remote-endpoint = <&sn65dsi86_out>;
};
};
};
};
};
};

View File

@ -14,6 +14,7 @@
#ifdef CONFIG_EFI
extern void efi_init(void);
extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
#else
#define efi_init()
#endif
@ -85,10 +86,6 @@ static inline void free_screen_info(struct screen_info *si)
{
}
static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
{
}
#define EFI_ALLOC_ALIGN SZ_64K
/*

View File

@ -13,6 +13,7 @@
#ifdef CONFIG_EFI
extern void efi_init(void);
extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
#else
#define efi_init()
#endif
@ -39,10 +40,6 @@ static inline void free_screen_info(struct screen_info *si)
{
}
static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
{
}
void efi_virtmap_load(void);
void efi_virtmap_unload(void);

View File

@ -2767,32 +2767,6 @@ config AMD_NB
def_bool y
depends on CPU_SUP_AMD && PCI
config X86_SYSFB
bool "Mark VGA/VBE/EFI FB as generic system framebuffer"
help
Firmwares often provide initial graphics framebuffers so the BIOS,
bootloader or kernel can show basic video-output during boot for
user-guidance and debugging. Historically, x86 used the VESA BIOS
Extensions and EFI-framebuffers for this, which are mostly limited
to x86.
This option, if enabled, marks VGA/VBE/EFI framebuffers as generic
framebuffers so the new generic system-framebuffer drivers can be
used on x86. If the framebuffer is not compatible with the generic
modes, it is advertised as fallback platform framebuffer so legacy
drivers like efifb, vesafb and uvesafb can pick it up.
If this option is not selected, all system framebuffers are always
marked as fallback platform framebuffers as usual.
Note: Legacy fbdev drivers, including vesafb, efifb, uvesafb, will
not be able to pick up generic system framebuffers if this option
is selected. You are highly encouraged to enable simplefb as
replacement if you select this option. simplefb can correctly deal
with generic system framebuffers. But you should still keep vesafb
and others enabled as fallback if a system framebuffer is
incompatible with simplefb.
If unsure, say Y.
endmenu

View File

@ -136,9 +136,6 @@ obj-$(CONFIG_X86_CHECK_BIOS_CORRUPTION) += check.o
obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
obj-$(CONFIG_OF) += devicetree.o
obj-$(CONFIG_UPROBES) += uprobes.o
obj-y += sysfb.o
obj-$(CONFIG_X86_SYSFB) += sysfb_simplefb.o
obj-$(CONFIG_EFI) += sysfb_efi.o
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
obj-$(CONFIG_TRACING) += tracepoint.o

View File

@ -72,6 +72,17 @@ menuconfig DMABUF_HEAPS
allows userspace to allocate dma-bufs that can be shared
between drivers.
menuconfig DMABUF_SYSFS_STATS
bool "DMA-BUF sysfs statistics"
select DMA_SHARED_BUFFER
help
Choose this option to enable DMA-BUF sysfs statistics
in location /sys/kernel/dmabuf/buffers.
/sys/kernel/dmabuf/buffers/<inode_number> will contain
statistics for the DMA-BUF with the unique inode number
<inode_number>.
source "drivers/dma-buf/heaps/Kconfig"
endmenu

View File

@ -6,6 +6,7 @@ obj-$(CONFIG_DMABUF_HEAPS) += heaps/
obj-$(CONFIG_SYNC_FILE) += sync_file.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
obj-$(CONFIG_UDMABUF) += udmabuf.o
obj-$(CONFIG_DMABUF_SYSFS_STATS) += dma-buf-sysfs-stats.o
dmabuf_selftests-y := \
selftest.o \

View File

@ -0,0 +1,205 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* DMA-BUF sysfs statistics.
*
* Copyright (C) 2021 Google LLC.
*/
#include <linux/dma-buf.h>
#include <linux/dma-resv.h>
#include <linux/kobject.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include "dma-buf-sysfs-stats.h"
#define to_dma_buf_entry_from_kobj(x) container_of(x, struct dma_buf_sysfs_entry, kobj)
/**
* DOC: overview
*
* ``/sys/kernel/debug/dma_buf/bufinfo`` provides an overview of every DMA-BUF
* in the system. However, since debugfs is not safe to be mounted in
* production, procfs and sysfs can be used to gather DMA-BUF statistics on
* production systems.
*
* The ``/proc/<pid>/fdinfo/<fd>`` files in procfs can be used to gather
* information about DMA-BUF fds. Detailed documentation about the interface
* is present in Documentation/filesystems/proc.rst.
*
* Unfortunately, the existing procfs interfaces can only provide information
* about the DMA-BUFs for which processes hold fds or have the buffers mmapped
* into their address space. This necessitated the creation of the DMA-BUF sysfs
* statistics interface to provide per-buffer information on production systems.
*
* The interface at ``/sys/kernel/dma-buf/buffers`` exposes information about
* every DMA-BUF when ``CONFIG_DMABUF_SYSFS_STATS`` is enabled.
*
* The following stats are exposed by the interface:
*
* * ``/sys/kernel/dmabuf/buffers/<inode_number>/exporter_name``
* * ``/sys/kernel/dmabuf/buffers/<inode_number>/size``
*
* The information in the interface can also be used to derive per-exporter
* statistics. The data from the interface can be gathered on error conditions
* or other important events to provide a snapshot of DMA-BUF usage.
* It can also be collected periodically by telemetry to monitor various metrics.
*
* Detailed documentation about the interface is present in
* Documentation/ABI/testing/sysfs-kernel-dmabuf-buffers.
*/
struct dma_buf_stats_attribute {
struct attribute attr;
ssize_t (*show)(struct dma_buf *dmabuf,
struct dma_buf_stats_attribute *attr, char *buf);
};
#define to_dma_buf_stats_attr(x) container_of(x, struct dma_buf_stats_attribute, attr)
static ssize_t dma_buf_stats_attribute_show(struct kobject *kobj,
struct attribute *attr,
char *buf)
{
struct dma_buf_stats_attribute *attribute;
struct dma_buf_sysfs_entry *sysfs_entry;
struct dma_buf *dmabuf;
attribute = to_dma_buf_stats_attr(attr);
sysfs_entry = to_dma_buf_entry_from_kobj(kobj);
dmabuf = sysfs_entry->dmabuf;
if (!dmabuf || !attribute->show)
return -EIO;
return attribute->show(dmabuf, attribute, buf);
}
static const struct sysfs_ops dma_buf_stats_sysfs_ops = {
.show = dma_buf_stats_attribute_show,
};
static ssize_t exporter_name_show(struct dma_buf *dmabuf,
struct dma_buf_stats_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%s\n", dmabuf->exp_name);
}
static ssize_t size_show(struct dma_buf *dmabuf,
struct dma_buf_stats_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%zu\n", dmabuf->size);
}
static struct dma_buf_stats_attribute exporter_name_attribute =
__ATTR_RO(exporter_name);
static struct dma_buf_stats_attribute size_attribute = __ATTR_RO(size);
static struct attribute *dma_buf_stats_default_attrs[] = {
&exporter_name_attribute.attr,
&size_attribute.attr,
NULL,
};
ATTRIBUTE_GROUPS(dma_buf_stats_default);
static void dma_buf_sysfs_release(struct kobject *kobj)
{
struct dma_buf_sysfs_entry *sysfs_entry;
sysfs_entry = to_dma_buf_entry_from_kobj(kobj);
kfree(sysfs_entry);
}
static struct kobj_type dma_buf_ktype = {
.sysfs_ops = &dma_buf_stats_sysfs_ops,
.release = dma_buf_sysfs_release,
.default_groups = dma_buf_stats_default_groups,
};
void dma_buf_stats_teardown(struct dma_buf *dmabuf)
{
struct dma_buf_sysfs_entry *sysfs_entry;
sysfs_entry = dmabuf->sysfs_entry;
if (!sysfs_entry)
return;
kobject_del(&sysfs_entry->kobj);
kobject_put(&sysfs_entry->kobj);
}
/* Statistics files do not need to send uevents. */
static int dmabuf_sysfs_uevent_filter(struct kset *kset, struct kobject *kobj)
{
return 0;
}
static const struct kset_uevent_ops dmabuf_sysfs_no_uevent_ops = {
.filter = dmabuf_sysfs_uevent_filter,
};
static struct kset *dma_buf_stats_kset;
static struct kset *dma_buf_per_buffer_stats_kset;
int dma_buf_init_sysfs_statistics(void)
{
dma_buf_stats_kset = kset_create_and_add("dmabuf",
&dmabuf_sysfs_no_uevent_ops,
kernel_kobj);
if (!dma_buf_stats_kset)
return -ENOMEM;
dma_buf_per_buffer_stats_kset = kset_create_and_add("buffers",
&dmabuf_sysfs_no_uevent_ops,
&dma_buf_stats_kset->kobj);
if (!dma_buf_per_buffer_stats_kset) {
kset_unregister(dma_buf_stats_kset);
return -ENOMEM;
}
return 0;
}
void dma_buf_uninit_sysfs_statistics(void)
{
kset_unregister(dma_buf_per_buffer_stats_kset);
kset_unregister(dma_buf_stats_kset);
}
int dma_buf_stats_setup(struct dma_buf *dmabuf)
{
struct dma_buf_sysfs_entry *sysfs_entry;
int ret;
if (!dmabuf || !dmabuf->file)
return -EINVAL;
if (!dmabuf->exp_name) {
pr_err("exporter name must not be empty if stats needed\n");
return -EINVAL;
}
sysfs_entry = kzalloc(sizeof(struct dma_buf_sysfs_entry), GFP_KERNEL);
if (!sysfs_entry)
return -ENOMEM;
sysfs_entry->kobj.kset = dma_buf_per_buffer_stats_kset;
sysfs_entry->dmabuf = dmabuf;
dmabuf->sysfs_entry = sysfs_entry;
/* create the directory for buffer stats */
ret = kobject_init_and_add(&sysfs_entry->kobj, &dma_buf_ktype, NULL,
"%lu", file_inode(dmabuf->file)->i_ino);
if (ret)
goto err_sysfs_dmabuf;
return 0;
err_sysfs_dmabuf:
kobject_put(&sysfs_entry->kobj);
dmabuf->sysfs_entry = NULL;
return ret;
}

View File

@ -0,0 +1,35 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* DMA-BUF sysfs statistics.
*
* Copyright (C) 2021 Google LLC.
*/
#ifndef _DMA_BUF_SYSFS_STATS_H
#define _DMA_BUF_SYSFS_STATS_H
#ifdef CONFIG_DMABUF_SYSFS_STATS
int dma_buf_init_sysfs_statistics(void);
void dma_buf_uninit_sysfs_statistics(void);
int dma_buf_stats_setup(struct dma_buf *dmabuf);
void dma_buf_stats_teardown(struct dma_buf *dmabuf);
#else
static inline int dma_buf_init_sysfs_statistics(void)
{
return 0;
}
static inline void dma_buf_uninit_sysfs_statistics(void) {}
static inline int dma_buf_stats_setup(struct dma_buf *dmabuf)
{
return 0;
}
static inline void dma_buf_stats_teardown(struct dma_buf *dmabuf) {}
#endif
#endif // _DMA_BUF_SYSFS_STATS_H

View File

@ -29,6 +29,8 @@
#include <uapi/linux/dma-buf.h>
#include <uapi/linux/magic.h>
#include "dma-buf-sysfs-stats.h"
static inline int is_dma_buf_file(struct file *);
struct dma_buf_list {
@ -74,6 +76,7 @@ static void dma_buf_release(struct dentry *dentry)
*/
BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
dma_buf_stats_teardown(dmabuf);
dmabuf->ops->release(dmabuf);
if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
@ -580,6 +583,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
file->f_mode |= FMODE_LSEEK;
dmabuf->file = file;
ret = dma_buf_stats_setup(dmabuf);
if (ret)
goto err_sysfs;
mutex_init(&dmabuf->lock);
INIT_LIST_HEAD(&dmabuf->attachments);
@ -589,6 +596,14 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
return dmabuf;
err_sysfs:
/*
* Set file->f_path.dentry->d_fsdata to NULL so that when
* dma_buf_release() gets invoked by dentry_ops, it exits
* early before calling the release() dma_buf op.
*/
file->f_path.dentry->d_fsdata = NULL;
fput(file);
err_dmabuf:
kfree(dmabuf);
err_module:
@ -926,6 +941,9 @@ EXPORT_SYMBOL_GPL(dma_buf_unpin);
* the underlying backing storage is pinned for as long as a mapping exists,
* therefore users/importers should not hold onto a mapping for undue amounts of
* time.
*
* Important: Dynamic importers must wait for the exclusive fence of the struct
* dma_resv attached to the DMA-BUF first.
*/
struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
@ -992,7 +1010,6 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
}
}
#endif /* CONFIG_DMA_API_DEBUG */
return sg_table;
}
EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
@ -1469,6 +1486,12 @@ static inline void dma_buf_uninit_debugfs(void)
static int __init dma_buf_init(void)
{
int ret;
ret = dma_buf_init_sysfs_statistics();
if (ret)
return ret;
dma_buf_mnt = kern_mount(&dma_buf_fs_type);
if (IS_ERR(dma_buf_mnt))
return PTR_ERR(dma_buf_mnt);
@ -1484,5 +1507,6 @@ static void __exit dma_buf_deinit(void)
{
dma_buf_uninit_debugfs();
kern_unmount(dma_buf_mnt);
dma_buf_uninit_sysfs_statistics();
}
__exitcall(dma_buf_deinit);

View File

@ -137,6 +137,7 @@ static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
struct dma_fence_chain *chain;
chain = container_of(cb, typeof(*chain), cb);
init_irq_work(&chain->work, dma_fence_chain_irq_work);
irq_work_queue(&chain->work);
dma_fence_put(f);
}
@ -239,7 +240,6 @@ void dma_fence_chain_init(struct dma_fence_chain *chain,
rcu_assign_pointer(chain->prev, prev);
chain->fence = fence;
chain->prev_seqno = 0;
init_irq_work(&chain->work, dma_fence_chain_irq_work);
/* Try to reuse the context of the previous chain node. */
if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) {

View File

@ -615,25 +615,21 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
*/
bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
{
unsigned int seq, shared_count;
struct dma_fence *fence;
unsigned int seq;
int ret;
rcu_read_lock();
retry:
ret = true;
shared_count = 0;
seq = read_seqcount_begin(&obj->seq);
if (test_all) {
struct dma_resv_list *fobj = dma_resv_shared_list(obj);
unsigned int i;
if (fobj)
shared_count = fobj->shared_count;
unsigned int i, shared_count;
shared_count = fobj ? fobj->shared_count : 0;
for (i = 0; i < shared_count; ++i) {
struct dma_fence *fence;
fence = rcu_dereference(fobj->shared[i]);
ret = dma_resv_test_signaled_single(fence);
if (ret < 0)
@ -641,23 +637,18 @@ retry:
else if (!ret)
break;
}
}
if (read_seqcount_retry(&obj->seq, seq))
fence = dma_resv_excl_fence(obj);
if (ret && fence) {
ret = dma_resv_test_signaled_single(fence);
if (ret < 0)
goto retry;
}
if (!shared_count) {
struct dma_fence *fence_excl = dma_resv_excl_fence(obj);
if (fence_excl) {
ret = dma_resv_test_signaled_single(fence_excl);
if (ret < 0)
goto retry;
if (read_seqcount_retry(&obj->seq, seq))
goto retry;
}
}
if (read_seqcount_retry(&obj->seq, seq))
goto retry;
rcu_read_unlock();
return ret;

View File

@ -58,28 +58,20 @@ static struct dma_fence *mock_fence(void)
return &f->base;
}
static inline struct mock_chain {
struct dma_fence_chain base;
} *to_mock_chain(struct dma_fence *f) {
return container_of(f, struct mock_chain, base.base);
}
static struct dma_fence *mock_chain(struct dma_fence *prev,
struct dma_fence *fence,
u64 seqno)
{
struct mock_chain *f;
struct dma_fence_chain *f;
f = kmalloc(sizeof(*f), GFP_KERNEL);
f = dma_fence_chain_alloc();
if (!f)
return NULL;
dma_fence_chain_init(&f->base,
dma_fence_get(prev),
dma_fence_get(fence),
dma_fence_chain_init(f, dma_fence_get(prev), dma_fence_get(fence),
seqno);
return &f->base.base;
return &f->base;
}
static int sanitycheck(void *arg)

View File

@ -11,9 +11,15 @@
#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/udmabuf.h>
#include <linux/hugetlb.h>
static const u32 list_limit = 1024; /* udmabuf_create_list->count limit */
static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes */
static int list_limit = 1024;
module_param(list_limit, int, 0644);
MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024.");
static int size_limit_mb = 64;
module_param(size_limit_mb, int, 0644);
MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64.");
struct udmabuf {
pgoff_t pagecount;
@ -160,10 +166,13 @@ static long udmabuf_create(struct miscdevice *device,
{
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
struct file *memfd = NULL;
struct address_space *mapping = NULL;
struct udmabuf *ubuf;
struct dma_buf *buf;
pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
struct page *page;
struct page *page, *hpage = NULL;
pgoff_t subpgoff, maxsubpgs;
struct hstate *hpstate;
int seals, ret = -EINVAL;
u32 i, flags;
@ -194,7 +203,8 @@ static long udmabuf_create(struct miscdevice *device,
memfd = fget(list[i].memfd);
if (!memfd)
goto err;
if (!shmem_mapping(file_inode(memfd)->i_mapping))
mapping = file_inode(memfd)->i_mapping;
if (!shmem_mapping(mapping) && !is_file_hugepages(memfd))
goto err;
seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
if (seals == -EINVAL)
@ -205,17 +215,48 @@ static long udmabuf_create(struct miscdevice *device,
goto err;
pgoff = list[i].offset >> PAGE_SHIFT;
pgcnt = list[i].size >> PAGE_SHIFT;
if (is_file_hugepages(memfd)) {
hpstate = hstate_file(memfd);
pgoff = list[i].offset >> huge_page_shift(hpstate);
subpgoff = (list[i].offset &
~huge_page_mask(hpstate)) >> PAGE_SHIFT;
maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT;
}
for (pgidx = 0; pgidx < pgcnt; pgidx++) {
page = shmem_read_mapping_page(
file_inode(memfd)->i_mapping, pgoff + pgidx);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto err;
if (is_file_hugepages(memfd)) {
if (!hpage) {
hpage = find_get_page_flags(mapping, pgoff,
FGP_ACCESSED);
if (!hpage) {
ret = -EINVAL;
goto err;
}
}
page = hpage + subpgoff;
get_page(page);
subpgoff++;
if (subpgoff == maxsubpgs) {
put_page(hpage);
hpage = NULL;
subpgoff = 0;
pgoff++;
}
} else {
page = shmem_read_mapping_page(mapping,
pgoff + pgidx);
if (IS_ERR(page)) {
ret = PTR_ERR(page);
goto err;
}
}
ubuf->pages[pgbuf++] = page;
}
fput(memfd);
memfd = NULL;
if (hpage) {
put_page(hpage);
hpage = NULL;
}
}
exp_info.ops = &udmabuf_ops;

View File

@ -251,6 +251,38 @@ config QCOM_SCM_DOWNLOAD_MODE_DEFAULT
Say Y here to enable "download mode" by default.
config SYSFB
bool
default y
depends on X86 || EFI
config SYSFB_SIMPLEFB
bool "Mark VGA/VBE/EFI FB as generic system framebuffer"
depends on SYSFB
help
Firmwares often provide initial graphics framebuffers so the BIOS,
bootloader or kernel can show basic video-output during boot for
user-guidance and debugging. Historically, x86 used the VESA BIOS
Extensions and EFI-framebuffers for this, which are mostly limited
to x86 BIOS or EFI systems.
This option, if enabled, marks VGA/VBE/EFI framebuffers as generic
framebuffers so the new generic system-framebuffer drivers can be
used instead. If the framebuffer is not compatible with the generic
modes, it is advertised as fallback platform framebuffer so legacy
drivers like efifb, vesafb and uvesafb can pick it up.
If this option is not selected, all system framebuffers are always
marked as fallback platform framebuffers as usual.
Note: Legacy fbdev drivers, including vesafb, efifb, uvesafb, will
not be able to pick up generic system framebuffers if this option
is selected. You are highly encouraged to enable simplefb as
replacement if you select this option. simplefb can correctly deal
with generic system framebuffers. But you should still keep vesafb
and others enabled as fallback if a system framebuffer is
incompatible with simplefb.
If unsure, say Y.
config TI_SCI_PROTOCOL
tristate "TI System Control Interface (TISCI) Message Protocol"
depends on TI_MESSAGE_MANAGER

View File

@ -18,6 +18,8 @@ obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
obj-$(CONFIG_RASPBERRYPI_FIRMWARE) += raspberrypi.o
obj-$(CONFIG_FW_CFG_SYSFS) += qemu_fw_cfg.o
obj-$(CONFIG_QCOM_SCM) += qcom_scm.o qcom_scm-smc.o qcom_scm-legacy.o
obj-$(CONFIG_SYSFB) += sysfb.o
obj-$(CONFIG_SYSFB_SIMPLEFB) += sysfb_simplefb.o
obj-$(CONFIG_TI_SCI_PROTOCOL) += ti_sci.o
obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o

View File

@ -36,6 +36,8 @@ obj-$(CONFIG_LOAD_UEFI_KEYS) += mokvar-table.o
fake_map-y += fake_mem.o
fake_map-$(CONFIG_X86) += x86_fake_mem.o
obj-$(CONFIG_SYSFB) += sysfb_efi.o
arm-obj-$(CONFIG_EFI) := efi-init.o arm-runtime.o
obj-$(CONFIG_ARM) += $(arm-obj-y)
obj-$(CONFIG_ARM64) += $(arm-obj-y)

View File

@ -275,93 +275,3 @@ void __init efi_init(void)
}
#endif
}
static bool efifb_overlaps_pci_range(const struct of_pci_range *range)
{
u64 fb_base = screen_info.lfb_base;
if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
fb_base |= (u64)(unsigned long)screen_info.ext_lfb_base << 32;
return fb_base >= range->cpu_addr &&
fb_base < (range->cpu_addr + range->size);
}
static struct device_node *find_pci_overlap_node(void)
{
struct device_node *np;
for_each_node_by_type(np, "pci") {
struct of_pci_range_parser parser;
struct of_pci_range range;
int err;
err = of_pci_range_parser_init(&parser, np);
if (err) {
pr_warn("of_pci_range_parser_init() failed: %d\n", err);
continue;
}
for_each_of_pci_range(&parser, &range)
if (efifb_overlaps_pci_range(&range))
return np;
}
return NULL;
}
/*
* If the efifb framebuffer is backed by a PCI graphics controller, we have
* to ensure that this relation is expressed using a device link when
* running in DT mode, or the probe order may be reversed, resulting in a
* resource reservation conflict on the memory window that the efifb
* framebuffer steals from the PCIe host bridge.
*/
static int efifb_add_links(struct fwnode_handle *fwnode)
{
struct device_node *sup_np;
sup_np = find_pci_overlap_node();
/*
* If there's no PCI graphics controller backing the efifb, we are
* done here.
*/
if (!sup_np)
return 0;
fwnode_link_add(fwnode, of_fwnode_handle(sup_np));
of_node_put(sup_np);
return 0;
}
static const struct fwnode_operations efifb_fwnode_ops = {
.add_links = efifb_add_links,
};
static struct fwnode_handle efifb_fwnode;
static int __init register_gop_device(void)
{
struct platform_device *pd;
int err;
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI)
return 0;
pd = platform_device_alloc("efi-framebuffer", 0);
if (!pd)
return -ENOMEM;
if (IS_ENABLED(CONFIG_PCI)) {
fwnode_init(&efifb_fwnode, &efifb_fwnode_ops);
pd->dev.fwnode = &efifb_fwnode;
}
err = platform_device_add_data(pd, &screen_info, sizeof(screen_info));
if (err)
return err;
return platform_device_add(pd);
}
subsys_initcall(register_gop_device);

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Generic System Framebuffers on x86
* Generic System Framebuffers
* Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com>
*
* EFI Quirks Copyright (c) 2006 Edgar Hucek <gimli@dark-green.com>
@ -19,12 +19,14 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/of_address.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/screen_info.h>
#include <linux/sysfb.h>
#include <video/vga.h>
#include <asm/efi.h>
#include <asm/sysfb.h>
enum {
OVERRIDE_NONE = 0x0,
@ -267,7 +269,73 @@ static const struct dmi_system_id efifb_dmi_swap_width_height[] __initconst = {
{},
};
__init void sysfb_apply_efi_quirks(void)
static bool efifb_overlaps_pci_range(const struct of_pci_range *range)
{
u64 fb_base = screen_info.lfb_base;
if (screen_info.capabilities & VIDEO_CAPABILITY_64BIT_BASE)
fb_base |= (u64)(unsigned long)screen_info.ext_lfb_base << 32;
return fb_base >= range->cpu_addr &&
fb_base < (range->cpu_addr + range->size);
}
static struct device_node *find_pci_overlap_node(void)
{
struct device_node *np;
for_each_node_by_type(np, "pci") {
struct of_pci_range_parser parser;
struct of_pci_range range;
int err;
err = of_pci_range_parser_init(&parser, np);
if (err) {
pr_warn("of_pci_range_parser_init() failed: %d\n", err);
continue;
}
for_each_of_pci_range(&parser, &range)
if (efifb_overlaps_pci_range(&range))
return np;
}
return NULL;
}
/*
* If the efifb framebuffer is backed by a PCI graphics controller, we have
* to ensure that this relation is expressed using a device link when
* running in DT mode, or the probe order may be reversed, resulting in a
* resource reservation conflict on the memory window that the efifb
* framebuffer steals from the PCIe host bridge.
*/
static int efifb_add_links(struct fwnode_handle *fwnode)
{
struct device_node *sup_np;
sup_np = find_pci_overlap_node();
/*
* If there's no PCI graphics controller backing the efifb, we are
* done here.
*/
if (!sup_np)
return 0;
fwnode_link_add(fwnode, of_fwnode_handle(sup_np));
of_node_put(sup_np);
return 0;
}
static const struct fwnode_operations efifb_fwnode_ops = {
.add_links = efifb_add_links,
};
#ifdef CONFIG_EFI
static struct fwnode_handle efifb_fwnode;
__init void sysfb_apply_efi_quirks(struct platform_device *pd)
{
if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI ||
!(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS))
@ -281,4 +349,10 @@ __init void sysfb_apply_efi_quirks(void)
screen_info.lfb_height = temp;
screen_info.lfb_linelength = 4 * screen_info.lfb_width;
}
if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI && IS_ENABLED(CONFIG_PCI)) {
fwnode_init(&efifb_fwnode, &efifb_fwnode_ops);
pd->dev.fwnode = &efifb_fwnode;
}
}
#endif

View File

@ -1,11 +1,11 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Generic System Framebuffers on x86
* Generic System Framebuffers
* Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com>
*/
/*
* Simple-Framebuffer support for x86 systems
* Simple-Framebuffer support
* Create a platform-device for any available boot framebuffer. The
* simple-framebuffer platform device is already available on DT systems, so
* this module parses the global "screen_info" object and creates a suitable
@ -16,12 +16,12 @@
* to pick these devices up without messing with simple-framebuffer drivers.
* The global "screen_info" is still valid at all times.
*
* If CONFIG_X86_SYSFB is not selected, we never register "simple-framebuffer"
* If CONFIG_SYSFB_SIMPLEFB is not selected, never register "simple-framebuffer"
* platform devices, but only use legacy framebuffer devices for
* backwards compatibility.
*
* TODO: We set the dev_id field of all platform-devices to 0. This allows
* other x86 OF/DT parsers to create such devices, too. However, they must
* other OF/DT parsers to create such devices, too. However, they must
* start at offset 1 for this to work.
*/
@ -32,7 +32,7 @@
#include <linux/platform_data/simplefb.h>
#include <linux/platform_device.h>
#include <linux/screen_info.h>
#include <asm/sysfb.h>
#include <linux/sysfb.h>
static __init int sysfb_init(void)
{
@ -43,12 +43,10 @@ static __init int sysfb_init(void)
bool compatible;
int ret;
sysfb_apply_efi_quirks();
/* try to create a simple-framebuffer device */
compatible = parse_mode(si, &mode);
compatible = sysfb_parse_mode(si, &mode);
if (compatible) {
ret = create_simplefb(si, &mode);
ret = sysfb_create_simplefb(si, &mode);
if (!ret)
return 0;
}
@ -61,9 +59,24 @@ static __init int sysfb_init(void)
else
name = "platform-framebuffer";
pd = platform_device_register_resndata(NULL, name, 0,
NULL, 0, si, sizeof(*si));
return PTR_ERR_OR_ZERO(pd);
pd = platform_device_alloc(name, 0);
if (!pd)
return -ENOMEM;
sysfb_apply_efi_quirks(pd);
ret = platform_device_add_data(pd, si, sizeof(*si));
if (ret)
goto err;
ret = platform_device_add(pd);
if (ret)
goto err;
return 0;
err:
platform_device_put(pd);
return ret;
}
/* must execute after PCI subsystem for EFI quirks */

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Generic System Framebuffers on x86
* Generic System Framebuffers
* Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com>
*/
@ -18,14 +18,14 @@
#include <linux/platform_data/simplefb.h>
#include <linux/platform_device.h>
#include <linux/screen_info.h>
#include <asm/sysfb.h>
#include <linux/sysfb.h>
static const char simplefb_resname[] = "BOOTFB";
static const struct simplefb_format formats[] = SIMPLEFB_FORMATS;
/* try parsing x86 screen_info into a simple-framebuffer mode struct */
__init bool parse_mode(const struct screen_info *si,
struct simplefb_platform_data *mode)
/* try parsing screen_info into a simple-framebuffer mode struct */
__init bool sysfb_parse_mode(const struct screen_info *si,
struct simplefb_platform_data *mode)
{
const struct simplefb_format *f;
__u8 type;
@ -57,13 +57,14 @@ __init bool parse_mode(const struct screen_info *si,
return false;
}
__init int create_simplefb(const struct screen_info *si,
const struct simplefb_platform_data *mode)
__init int sysfb_create_simplefb(const struct screen_info *si,
const struct simplefb_platform_data *mode)
{
struct platform_device *pd;
struct resource res;
u64 base, size;
u32 length;
int ret;
/*
* If the 64BIT_BASE capability is set, ext_lfb_base will contain the
@ -105,7 +106,19 @@ __init int create_simplefb(const struct screen_info *si,
if (res.end <= res.start)
return -EINVAL;
pd = platform_device_register_resndata(NULL, "simple-framebuffer", 0,
&res, 1, mode, sizeof(*mode));
return PTR_ERR_OR_ZERO(pd);
pd = platform_device_alloc("simple-framebuffer", 0);
if (!pd)
return -ENOMEM;
sysfb_apply_efi_quirks(pd);
ret = platform_device_add_resources(pd, &res, 1);
if (ret)
return ret;
ret = platform_device_add_data(pd, mode, sizeof(*mode));
if (ret)
return ret;
return platform_device_add(pd);
}

View File

@ -35,6 +35,11 @@ config DRM_MIPI_DSI
bool
depends on DRM
config DRM_DP_AUX_BUS
tristate
depends on DRM
depends on OF
config DRM_DP_AUX_CHARDEV
bool "DRM DP AUX Interface"
depends on DRM
@ -251,7 +256,6 @@ config DRM_AMDGPU
select HWMON
select BACKLIGHT_CLASS_DEVICE
select INTERVAL_TREE
select CHASH
help
Choose this option if you have a recent AMD Radeon graphics card.
@ -317,8 +321,6 @@ source "drivers/gpu/drm/tilcdc/Kconfig"
source "drivers/gpu/drm/qxl/Kconfig"
source "drivers/gpu/drm/bochs/Kconfig"
source "drivers/gpu/drm/virtio/Kconfig"
source "drivers/gpu/drm/msm/Kconfig"

View File

@ -33,6 +33,8 @@ drm-$(CONFIG_PCI) += drm_pci.o
drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
obj-$(CONFIG_DRM_DP_AUX_BUS) += drm_dp_aux_bus.o
drm_vram_helper-y := drm_gem_vram_helper.o
obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o
@ -96,7 +98,6 @@ obj-y += omapdrm/
obj-$(CONFIG_DRM_SUN4I) += sun4i/
obj-y += tilcdc/
obj-$(CONFIG_DRM_QXL) += qxl/
obj-$(CONFIG_DRM_BOCHS) += bochs/
obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio/
obj-$(CONFIG_DRM_MSM) += msm/
obj-$(CONFIG_DRM_TEGRA) += tegra/

View File

@ -57,7 +57,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \
amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o amdgpu_hdp.o
amdgpu_fw_attestation.o amdgpu_securedisplay.o amdgpu_hdp.o \
amdgpu_eeprom.o amdgpu_mca.o
amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
@ -75,7 +76,7 @@ amdgpu-y += \
vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
arct_reg_init.o navi12_reg_init.o mxgpu_nv.o sienna_cichlid_reg_init.o vangogh_reg_init.o \
nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o \
beige_goby_reg_init.o yellow_carp_reg_init.o
beige_goby_reg_init.o yellow_carp_reg_init.o cyan_skillfish_reg_init.o
# add DF block
amdgpu-y += \
@ -111,6 +112,7 @@ amdgpu-y += \
psp_v3_1.o \
psp_v10_0.o \
psp_v11_0.o \
psp_v11_0_8.o \
psp_v12_0.o \
psp_v13_0.o
@ -118,7 +120,7 @@ amdgpu-y += \
amdgpu-y += \
dce_v10_0.o \
dce_v11_0.o \
dce_virtual.o
amdgpu_vkms.o
# add GFX block
amdgpu-y += \
@ -187,6 +189,10 @@ amdgpu-y += \
amdgpu-y += \
amdgpu_reset.o
# add MCA block
amdgpu-y += \
mca_v3_0.o
# add amdkfd interfaces
amdgpu-y += amdgpu_amdkfd.o

View File

@ -108,6 +108,7 @@
#include "amdgpu_df.h"
#include "amdgpu_smuio.h"
#include "amdgpu_fdinfo.h"
#include "amdgpu_mca.h"
#define MAX_GPU_INSTANCE 16
@ -916,6 +917,7 @@ struct amdgpu_device {
/* display */
bool enable_virtual_display;
struct amdgpu_vkms_output *amdgpu_vkms_output;
struct amdgpu_mode_info mode_info;
/* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */
struct work_struct hotplug_work;
@ -1008,6 +1010,9 @@ struct amdgpu_device {
/* df */
struct amdgpu_df df;
/* MCA */
struct amdgpu_mca mca;
struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM];
uint32_t harvest_ip_mask;
int num_ip_blocks;
@ -1108,8 +1113,13 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev);
int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev);
void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
void *buf, size_t size, bool write);
size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
void *buf, size_t size, bool write);
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
uint32_t *buf, size_t size, bool write);
void *buf, size_t size, bool write);
uint32_t amdgpu_device_rreg(struct amdgpu_device *adev,
uint32_t reg, uint32_t acc_flags);
void amdgpu_device_wreg(struct amdgpu_device *adev,
@ -1265,6 +1275,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev);
#define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter));
#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
/* Common functions */
bool amdgpu_device_has_job_running(struct amdgpu_device *adev);
bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev);
@ -1385,12 +1397,12 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta
int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev);
void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps);
bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev);
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev);
void amdgpu_acpi_detect(void);
#else
static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
static inline bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev) { return false; }
static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; }
static inline void amdgpu_acpi_detect(void) { }
static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; }
static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev,

View File

@ -160,17 +160,28 @@ static int acp_poweron(struct generic_pm_domain *genpd)
return 0;
}
static struct device *get_mfd_cell_dev(const char *device_name, int r)
static int acp_genpd_add_device(struct device *dev, void *data)
{
char auto_dev_name[25];
struct device *dev;
struct generic_pm_domain *gpd = data;
int ret;
snprintf(auto_dev_name, sizeof(auto_dev_name),
"%s.%d.auto", device_name, r);
dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
dev_info(dev, "device %s added to pm domain\n", auto_dev_name);
ret = pm_genpd_add_device(gpd, dev);
if (ret)
dev_err(dev, "Failed to add dev to genpd %d\n", ret);
return dev;
return ret;
}
static int acp_genpd_remove_device(struct device *dev, void *data)
{
int ret;
ret = pm_genpd_remove_device(dev);
if (ret)
dev_err(dev, "Failed to remove dev from genpd %d\n", ret);
/* Continue to remove */
return 0;
}
/**
@ -181,11 +192,10 @@ static struct device *get_mfd_cell_dev(const char *device_name, int r)
*/
static int acp_hw_init(void *handle)
{
int r, i;
int r;
uint64_t acp_base;
u32 val = 0;
u32 count = 0;
struct device *dev;
struct i2s_platform_data *i2s_pdata = NULL;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@ -341,15 +351,10 @@ static int acp_hw_init(void *handle)
if (r)
goto failure;
for (i = 0; i < ACP_DEVS ; i++) {
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
r = pm_genpd_add_device(&adev->acp.acp_genpd->gpd, dev);
if (r) {
dev_err(dev, "Failed to add dev to genpd\n");
goto failure;
}
}
r = device_for_each_child(adev->acp.parent, &adev->acp.acp_genpd->gpd,
acp_genpd_add_device);
if (r)
goto failure;
/* Assert Soft reset of ACP */
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
@ -410,10 +415,8 @@ failure:
*/
static int acp_hw_fini(void *handle)
{
int i, ret;
u32 val = 0;
u32 count = 0;
struct device *dev;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/* return early if no ACP */
@ -458,13 +461,8 @@ static int acp_hw_fini(void *handle)
udelay(100);
}
for (i = 0; i < ACP_DEVS ; i++) {
dev = get_mfd_cell_dev(adev->acp.acp_cell[i].name, i);
ret = pm_genpd_remove_device(dev);
/* If removal fails, dont giveup and try rest */
if (ret)
dev_err(dev, "remove dev from genpd failed\n");
}
device_for_each_child(adev->acp.parent, NULL,
acp_genpd_remove_device);
mfd_remove_devices(adev->acp.parent);
kfree(adev->acp.acp_res);

View File

@ -854,8 +854,8 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
if (amdgpu_device_has_dc_support(adev)) {
#if defined(CONFIG_DRM_AMD_DC)
struct amdgpu_display_manager *dm = &adev->dm;
if (dm->backlight_dev)
atif->bd = dm->backlight_dev;
if (dm->backlight_dev[0])
atif->bd = dm->backlight_dev[0];
#endif
} else {
struct drm_encoder *tmp;
@ -1032,13 +1032,13 @@ void amdgpu_acpi_detect(void)
}
/**
* amdgpu_acpi_is_s0ix_supported
* amdgpu_acpi_is_s0ix_active
*
* @adev: amdgpu_device_pointer
*
* returns true if supported, false if not.
*/
bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev)
bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev)
{
#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND)
if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) {

View File

@ -21,6 +21,7 @@
*/
#include "amdgpu_amdkfd.h"
#include "amd_pcie.h"
#include "amd_shared.h"
#include "amdgpu.h"
@ -553,6 +554,88 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s
return (uint8_t)ret;
}
int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev *src, bool is_min)
{
struct amdgpu_device *adev = (struct amdgpu_device *)dst, *peer_adev;
int num_links;
if (adev->asic_type != CHIP_ALDEBARAN)
return 0;
if (src)
peer_adev = (struct amdgpu_device *)src;
/* num links returns 0 for indirect peers since indirect route is unknown. */
num_links = is_min ? 1 : amdgpu_xgmi_get_num_links(adev, peer_adev);
if (num_links < 0) {
DRM_ERROR("amdgpu: failed to get xgmi num links between node %d and %d. ret = %d\n",
adev->gmc.xgmi.physical_node_id,
peer_adev->gmc.xgmi.physical_node_id, num_links);
num_links = 0;
}
/* Aldebaran xGMI DPM is defeatured so assume x16 x 25Gbps for bandwidth. */
return (num_links * 16 * 25000)/BITS_PER_BYTE;
}
int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min)
{
struct amdgpu_device *adev = (struct amdgpu_device *)dev;
int num_lanes_shift = (is_min ? ffs(adev->pm.pcie_mlw_mask) :
fls(adev->pm.pcie_mlw_mask)) - 1;
int gen_speed_shift = (is_min ? ffs(adev->pm.pcie_gen_mask &
CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) :
fls(adev->pm.pcie_gen_mask &
CAIL_PCIE_LINK_SPEED_SUPPORT_MASK)) - 1;
uint32_t num_lanes_mask = 1 << num_lanes_shift;
uint32_t gen_speed_mask = 1 << gen_speed_shift;
int num_lanes_factor = 0, gen_speed_mbits_factor = 0;
switch (num_lanes_mask) {
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X1:
num_lanes_factor = 1;
break;
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X2:
num_lanes_factor = 2;
break;
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X4:
num_lanes_factor = 4;
break;
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X8:
num_lanes_factor = 8;
break;
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X12:
num_lanes_factor = 12;
break;
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X16:
num_lanes_factor = 16;
break;
case CAIL_PCIE_LINK_WIDTH_SUPPORT_X32:
num_lanes_factor = 32;
break;
}
switch (gen_speed_mask) {
case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1:
gen_speed_mbits_factor = 2500;
break;
case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2:
gen_speed_mbits_factor = 5000;
break;
case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3:
gen_speed_mbits_factor = 8000;
break;
case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4:
gen_speed_mbits_factor = 16000;
break;
case CAIL_PCIE_LINK_SPEED_SUPPORT_GEN5:
gen_speed_mbits_factor = 32000;
break;
}
return (num_lanes_factor * gen_speed_mbits_factor)/BITS_PER_BYTE;
}
uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
@ -631,7 +714,6 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
ret = dma_fence_wait(f, false);
err_ib_sched:
dma_fence_put(f);
amdgpu_job_free(job);
err:
return ret;

View File

@ -226,6 +226,8 @@ uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
uint32_t amdgpu_amdkfd_get_asic_rev_id(struct kgd_dev *kgd);
int amdgpu_amdkfd_get_noretry(struct kgd_dev *kgd);
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct kgd_dev *dst, struct kgd_dev *src, bool is_min);
int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct kgd_dev *dev, bool is_min);
/* Read user wptr from a specified user address space with page fault
* disabled. The memory must be pinned and mapped to the hardware when
@ -269,7 +271,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv,
uint64_t *size);
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, bool *table_freed);
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv);
int amdgpu_amdkfd_gpuvm_sync_memory(
@ -330,7 +332,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd);
int kgd2kfd_post_reset(struct kfd_dev *kfd);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd);
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask);
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask);
#else
static inline int kgd2kfd_init(void)
{
@ -389,7 +391,7 @@ void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
}
static inline
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
{
}
#endif

View File

@ -44,4 +44,5 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = {
.get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings
};

View File

@ -305,5 +305,6 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base =
kgd_gfx_v9_set_vm_context_page_table_base,
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings
};

View File

@ -560,6 +560,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
type = RESET_WAVES;
break;
case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
type = SAVE_WAVES;
break;
default:
type = DRAIN_PIPE;
break;
@ -754,6 +757,33 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
}
static void program_trap_handler_settings(struct kgd_dev *kgd,
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
lock_srbm(kgd, 0, 0, 0, vmid);
/*
* Program TBA registers
*/
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
lower_32_bits(tba_addr >> 8));
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
upper_32_bits(tba_addr >> 8) |
(1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT));
/*
* Program TMA registers
*/
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
lower_32_bits(tma_addr >> 8));
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
upper_32_bits(tma_addr >> 8));
unlock_srbm(kgd);
}
const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.program_sh_mem_settings = kgd_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
@ -774,4 +804,5 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.get_atc_vmid_pasid_mapping_info =
get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
.program_trap_handler_settings = program_trap_handler_settings,
};

View File

@ -537,6 +537,9 @@ static int hqd_destroy_v10_3(struct kgd_dev *kgd, void *mqd,
case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
type = RESET_WAVES;
break;
case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
type = SAVE_WAVES;
break;
default:
type = DRAIN_PIPE;
break;
@ -658,6 +661,33 @@ static void set_vm_context_page_table_base_v10_3(struct kgd_dev *kgd, uint32_t v
adev->gfxhub.funcs->setup_vm_pt_regs(adev, vmid, page_table_base);
}
static void program_trap_handler_settings_v10_3(struct kgd_dev *kgd,
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
lock_srbm(kgd, 0, 0, 0, vmid);
/*
* Program TBA registers
*/
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
lower_32_bits(tba_addr >> 8));
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
upper_32_bits(tba_addr >> 8) |
(1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT));
/*
* Program TMA registers
*/
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
lower_32_bits(tma_addr >> 8));
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
upper_32_bits(tma_addr >> 8));
unlock_srbm(kgd);
}
#if 0
uint32_t enable_debug_trap_v10_3(struct kgd_dev *kgd,
uint32_t trap_debug_wave_launch_mode,
@ -820,6 +850,7 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = {
.address_watch_get_offset = address_watch_get_offset_v10_3,
.get_atc_vmid_pasid_mapping_info = NULL,
.set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3,
.program_trap_handler_settings = program_trap_handler_settings_v10_3,
#if 0
.enable_debug_trap = enable_debug_trap_v10_3,
.disable_debug_trap = disable_debug_trap_v10_3,

View File

@ -42,7 +42,8 @@
enum hqd_dequeue_request_type {
NO_ACTION = 0,
DRAIN_PIPE,
RESET_WAVES
RESET_WAVES,
SAVE_WAVES
};
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
@ -566,6 +567,9 @@ int kgd_gfx_v9_hqd_destroy(struct kgd_dev *kgd, void *mqd,
case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
type = RESET_WAVES;
break;
case KFD_PREEMPT_TYPE_WAVEFRONT_SAVE:
type = SAVE_WAVES;
break;
default:
type = DRAIN_PIPE;
break;
@ -878,6 +882,32 @@ void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
adev->gfx.cu_info.max_waves_per_simd;
}
void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd,
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
lock_srbm(kgd, 0, 0, 0, vmid);
/*
* Program TBA registers
*/
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_LO),
lower_32_bits(tba_addr >> 8));
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TBA_HI),
upper_32_bits(tba_addr >> 8));
/*
* Program TMA registers
*/
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_LO),
lower_32_bits(tma_addr >> 8));
WREG32(SOC15_REG_OFFSET(GC, 0, mmSQ_SHADER_TMA_HI),
upper_32_bits(tma_addr >> 8));
unlock_srbm(kgd);
}
const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings,
.set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping,
@ -899,4 +929,5 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
.get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy,
.program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings,
};

View File

@ -65,3 +65,5 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd,
uint32_t vmid, uint64_t page_table_base);
void kgd_gfx_v9_get_cu_occupancy(struct kgd_dev *kgd, int pasid,
int *pasid_wave_cnt, int *max_waves_per_cu);
void kgd_gfx_v9_program_trap_handler_settings(struct kgd_dev *kgd,
uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr);

View File

@ -1057,7 +1057,8 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
static int update_gpuvm_pte(struct kgd_mem *mem,
struct kfd_mem_attachment *entry,
struct amdgpu_sync *sync)
struct amdgpu_sync *sync,
bool *table_freed)
{
struct amdgpu_bo_va *bo_va = entry->bo_va;
struct amdgpu_device *adev = entry->adev;
@ -1068,7 +1069,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
return ret;
/* Update the page tables */
ret = amdgpu_vm_bo_update(adev, bo_va, false);
ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed);
if (ret) {
pr_err("amdgpu_vm_bo_update failed\n");
return ret;
@ -1080,7 +1081,8 @@ static int update_gpuvm_pte(struct kgd_mem *mem,
static int map_bo_to_gpuvm(struct kgd_mem *mem,
struct kfd_mem_attachment *entry,
struct amdgpu_sync *sync,
bool no_update_pte)
bool no_update_pte,
bool *table_freed)
{
int ret;
@ -1097,7 +1099,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem,
if (no_update_pte)
return 0;
ret = update_gpuvm_pte(mem, entry, sync);
ret = update_gpuvm_pte(mem, entry, sync, table_freed);
if (ret) {
pr_err("update_gpuvm_pte() failed\n");
goto update_gpuvm_pte_failed;
@ -1285,11 +1287,22 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct kgd_dev *kgd,
if (avm->process_info)
return -EINVAL;
/* Free the original amdgpu allocated pasid,
* will be replaced with kfd allocated pasid.
*/
if (avm->pasid) {
amdgpu_pasid_free(avm->pasid);
amdgpu_vm_set_pasid(adev, avm, 0);
}
/* Convert VM into a compute VM */
ret = amdgpu_vm_make_compute(adev, avm, pasid);
ret = amdgpu_vm_make_compute(adev, avm);
if (ret)
return ret;
ret = amdgpu_vm_set_pasid(adev, avm, pasid);
if (ret)
return ret;
/* Initialize KFD part of the VM and process info */
ret = init_kfd_vm(avm, process_info, ef);
if (ret)
@ -1594,7 +1607,8 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu(
}
int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
struct kgd_dev *kgd, struct kgd_mem *mem,
void *drm_priv, bool *table_freed)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
@ -1682,7 +1696,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
entry->va, entry->va + bo_size, entry);
ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
is_invalid_userptr);
is_invalid_userptr, table_freed);
if (ret) {
pr_err("Failed to map bo to gpuvm\n");
goto out_unreserve;
@ -1706,6 +1720,12 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
true);
ret = unreserve_bo_and_vms(&ctx, false, false);
/* Only apply no TLB flush on Aldebaran to
* workaround regressions on other Asics.
*/
if (table_freed && (adev->asic_type != CHIP_ALDEBARAN))
*table_freed = true;
goto out;
out_unreserve:
@ -2132,7 +2152,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
continue;
kfd_mem_dmaunmap_attachment(mem, attachment);
ret = update_gpuvm_pte(mem, attachment, &sync);
ret = update_gpuvm_pte(mem, attachment, &sync, NULL);
if (ret) {
pr_err("%s: update PTE failed\n", __func__);
/* make sure this gets validated again */
@ -2338,7 +2358,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
continue;
kfd_mem_dmaunmap_attachment(mem, attachment);
ret = update_gpuvm_pte(mem, attachment, &sync_obj);
ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL);
if (ret) {
pr_debug("Memory eviction: update PTE failed. Try again\n");
goto validate_map_fail;

View File

@ -34,6 +34,7 @@ struct amdgpu_fpriv;
struct amdgpu_bo_list_entry {
struct ttm_validate_buffer tv;
struct amdgpu_bo_va *bo_va;
struct dma_fence_chain *chain;
uint32_t priority;
struct page **user_pages;
bool user_invalidated;

View File

@ -572,6 +572,20 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
goto out;
}
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
e->bo_va = amdgpu_vm_bo_find(vm, bo);
if (bo->tbo.base.dma_buf && !amdgpu_bo_explicit_sync(bo)) {
e->chain = dma_fence_chain_alloc();
if (!e->chain) {
r = -ENOMEM;
goto error_validate;
}
}
}
amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold,
&p->bytes_moved_vis_threshold);
p->bytes_moved = 0;
@ -599,15 +613,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
gws = p->bo_list->gws_obj;
oa = p->bo_list->oa_obj;
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
/* Make sure we use the exclusive slot for shared BOs */
if (bo->prime_shared_count)
e->tv.num_shared = 0;
e->bo_va = amdgpu_vm_bo_find(vm, bo);
}
if (gds) {
p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT;
p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT;
@ -629,8 +634,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
}
error_validate:
if (r)
if (r) {
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
dma_fence_chain_free(e->chain);
e->chain = NULL;
}
ttm_eu_backoff_reservation(&p->ticket, &p->validated);
}
out:
return r;
}
@ -670,9 +680,17 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
{
unsigned i;
if (error && backoff)
if (error && backoff) {
struct amdgpu_bo_list_entry *e;
amdgpu_bo_list_for_each_entry(e, parser->bo_list) {
dma_fence_chain_free(e->chain);
e->chain = NULL;
}
ttm_eu_backoff_reservation(&parser->ticket,
&parser->validated);
}
for (i = 0; i < parser->num_post_deps; i++) {
drm_syncobj_put(parser->post_deps[i].syncobj);
@ -781,7 +799,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (r)
return r;
r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false);
r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false, NULL);
if (r)
return r;
@ -792,7 +810,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) {
bo_va = fpriv->csa_va;
BUG_ON(!bo_va);
r = amdgpu_vm_bo_update(adev, bo_va, false);
r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
if (r)
return r;
@ -811,7 +829,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
if (bo_va == NULL)
continue;
r = amdgpu_vm_bo_update(adev, bo_va, false);
r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
if (r)
return r;
@ -1109,7 +1127,7 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p
dep->chain = NULL;
if (syncobj_deps[i].point) {
dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL);
dep->chain = dma_fence_chain_alloc();
if (!dep->chain)
return -ENOMEM;
}
@ -1117,7 +1135,7 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p
dep->syncobj = drm_syncobj_find(p->filp,
syncobj_deps[i].handle);
if (!dep->syncobj) {
kfree(dep->chain);
dma_fence_chain_free(dep->chain);
return -EINVAL;
}
dep->point = syncobj_deps[i].point;
@ -1245,6 +1263,28 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
amdgpu_bo_list_for_each_entry(e, p->bo_list) {
struct dma_resv *resv = e->tv.bo->base.resv;
struct dma_fence_chain *chain = e->chain;
if (!chain)
continue;
/*
* Work around dma_resv shortcommings by wrapping up the
* submission in a dma_fence_chain and add it as exclusive
* fence, but first add the submission as shared fence to make
* sure that shared fences never signal before the exclusive
* one.
*/
dma_fence_chain_init(chain, dma_resv_excl_fence(resv),
dma_fence_get(p->fence), 1);
dma_resv_add_shared_fence(resv, p->fence);
rcu_assign_pointer(resv->fence_excl, &chain->base);
e->chain = NULL;
}
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
mutex_unlock(&p->adev->notifier_lock);

View File

@ -1414,7 +1414,7 @@ no_preempt:
continue;
}
job = to_amdgpu_job(s_job);
if (preempted && job->fence == fence)
if (preempted && (&job->hw_fence) == fence)
/* mark the job as preempted */
job->preemption_status |= AMDGPU_IB_PREEMPTED;
}

View File

@ -116,6 +116,7 @@ const char *amdgpu_asic_name[] = {
"RENOIR",
"ALDEBARAN",
"NAVI10",
"CYAN_SKILLFISH",
"NAVI14",
"NAVI12",
"SIENNA_CICHLID",
@ -287,7 +288,7 @@ bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
*/
/**
* amdgpu_device_vram_access - read/write a buffer in vram
* amdgpu_device_mm_access - access vram by MM_INDEX/MM_DATA
*
* @adev: amdgpu_device pointer
* @pos: offset of the buffer in vram
@ -295,22 +296,65 @@ bool amdgpu_device_supports_smart_shift(struct drm_device *dev)
* @size: read/write size, sizeof(@buf) must > @size
* @write: true - write to vram, otherwise - read from vram
*/
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
uint32_t *buf, size_t size, bool write)
void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
void *buf, size_t size, bool write)
{
unsigned long flags;
uint32_t hi = ~0;
uint32_t hi = ~0, tmp = 0;
uint32_t *data = buf;
uint64_t last;
int idx;
if (!drm_dev_enter(&adev->ddev, &idx))
return;
BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
for (last = pos + size; pos < last; pos += 4) {
tmp = pos >> 31;
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
if (tmp != hi) {
WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
hi = tmp;
}
if (write)
WREG32_NO_KIQ(mmMM_DATA, *data++);
else
*data++ = RREG32_NO_KIQ(mmMM_DATA);
}
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
drm_dev_exit(idx);
}
/**
* amdgpu_device_vram_access - access vram by vram aperature
*
* @adev: amdgpu_device pointer
* @pos: offset of the buffer in vram
* @buf: virtual address of the buffer in system memory
* @size: read/write size, sizeof(@buf) must > @size
* @write: true - write to vram, otherwise - read from vram
*
* The return value means how many bytes have been transferred.
*/
size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos,
void *buf, size_t size, bool write)
{
#ifdef CONFIG_64BIT
void __iomem *addr;
size_t count = 0;
uint64_t last;
if (!adev->mman.aper_base_kaddr)
return 0;
last = min(pos + size, adev->gmc.visible_vram_size);
if (last > pos) {
void __iomem *addr = adev->mman.aper_base_kaddr + pos;
size_t count = last - pos;
addr = adev->mman.aper_base_kaddr + pos;
count = last - pos;
if (write) {
memcpy_toio(addr, buf, count);
@ -322,35 +366,37 @@ void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
memcpy_fromio(buf, addr, count);
}
if (count == size)
goto exit;
}
return count;
#else
return 0;
#endif
}
/**
* amdgpu_device_vram_access - read/write a buffer in vram
*
* @adev: amdgpu_device pointer
* @pos: offset of the buffer in vram
* @buf: virtual address of the buffer in system memory
* @size: read/write size, sizeof(@buf) must > @size
* @write: true - write to vram, otherwise - read from vram
*/
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
void *buf, size_t size, bool write)
{
size_t count;
/* try to using vram apreature to access vram first */
count = amdgpu_device_aper_access(adev, pos, buf, size, write);
size -= count;
if (size) {
/* using MM to access rest vram */
pos += count;
buf += count / 4;
size -= count;
buf += count;
amdgpu_device_mm_access(adev, pos, buf, size, write);
}
#endif
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
for (last = pos + size; pos < last; pos += 4) {
uint32_t tmp = pos >> 31;
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
if (tmp != hi) {
WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
hi = tmp;
}
if (write)
WREG32_NO_KIQ(mmMM_DATA, *buf++);
else
*buf++ = RREG32_NO_KIQ(mmMM_DATA);
}
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
#ifdef CONFIG_64BIT
exit:
#endif
drm_dev_exit(idx);
}
/*
@ -518,7 +564,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev,
adev->gfx.rlc.funcs &&
adev->gfx.rlc.funcs->is_rlcg_access_range) {
if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v, 0, 0);
return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0);
} else {
writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
}
@ -1266,15 +1312,16 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
/**
* amdgpu_device_vga_set_decode - enable/disable vga decode
*
* @cookie: amdgpu_device pointer
* @pdev: PCI device pointer
* @state: enable/disable vga decode
*
* Enable/disable vga decode (all asics).
* Returns VGA resource flags.
*/
static unsigned int amdgpu_device_vga_set_decode(void *cookie, bool state)
static unsigned int amdgpu_device_vga_set_decode(struct pci_dev *pdev,
bool state)
{
struct amdgpu_device *adev = cookie;
struct amdgpu_device *adev = drm_to_adev(pci_get_drvdata(pdev));
amdgpu_asic_set_vga_state(adev, state);
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
@ -1394,6 +1441,10 @@ static int amdgpu_device_init_apu_flags(struct amdgpu_device *adev)
break;
case CHIP_YELLOW_CARP:
break;
case CHIP_CYAN_SKILLFISH:
if (adev->pdev->device == 0x13FE)
adev->apu_flags |= AMD_APU_IS_CYAN_SKILLFISH2;
break;
default:
return -EINVAL;
}
@ -2100,6 +2151,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
case CHIP_BEIGE_GOBY:
case CHIP_VANGOGH:
case CHIP_YELLOW_CARP:
case CHIP_CYAN_SKILLFISH:
if (adev->asic_type == CHIP_VANGOGH)
adev->family = AMDGPU_FAMILY_VGH;
else if (adev->asic_type == CHIP_YELLOW_CARP)
@ -3594,9 +3646,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
fence_driver_init:
/* Fence driver */
r = amdgpu_fence_driver_init(adev);
r = amdgpu_fence_driver_sw_init(adev);
if (r) {
dev_err(adev->dev, "amdgpu_fence_driver_init failed\n");
dev_err(adev->dev, "amdgpu_fence_driver_sw_init failed\n");
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_FENCE_INIT_FAIL, 0, 0);
goto failed;
}
@ -3623,6 +3675,8 @@ fence_driver_init:
goto release_ras_con;
}
amdgpu_fence_driver_hw_init(adev);
dev_info(adev->dev,
"SE %d, SH per SE %d, CU per SH %d, active_cu_number %d\n",
adev->gfx.config.max_shader_engines,
@ -3714,7 +3768,7 @@ fence_driver_init:
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, adev, NULL, amdgpu_device_vga_set_decode);
vga_client_register(adev->pdev, amdgpu_device_vga_set_decode);
if (amdgpu_device_supports_px(ddev)) {
px = true;
@ -3771,7 +3825,10 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
{
dev_info(adev->dev, "amdgpu: finishing device.\n");
flush_delayed_work(&adev->delayed_init_work);
ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
if (adev->mman.initialized) {
flush_delayed_work(&adev->mman.bdev.wq);
ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
}
adev->shutdown = true;
/* make sure IB test finished before entering exclusive mode
@ -3790,7 +3847,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
else
drm_atomic_helper_shutdown(adev_to_drm(adev));
}
amdgpu_fence_driver_fini_hw(adev);
amdgpu_fence_driver_hw_fini(adev);
if (adev->pm_sysfs_en)
amdgpu_pm_sysfs_fini(adev);
@ -3812,7 +3869,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
void amdgpu_device_fini_sw(struct amdgpu_device *adev)
{
amdgpu_device_ip_fini(adev);
amdgpu_fence_driver_fini_sw(adev);
amdgpu_fence_driver_sw_fini(adev);
release_firmware(adev->firmware.gpu_info_fw);
adev->firmware.gpu_info_fw = NULL;
adev->accel_working = false;
@ -3833,7 +3890,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
vga_switcheroo_fini_domain_pm_ops(adev->dev);
}
if ((adev->pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
vga_client_register(adev->pdev, NULL, NULL, NULL);
vga_client_unregister(adev->pdev);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev);
@ -3887,7 +3944,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
amdgpu_fence_driver_suspend(adev);
amdgpu_fence_driver_hw_fini(adev);
amdgpu_device_ip_suspend_phase2(adev);
/* evict remaining vram memory
@ -3932,8 +3989,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r);
return r;
}
amdgpu_fence_driver_resume(adev);
amdgpu_fence_driver_hw_init(adev);
r = amdgpu_device_ip_late_init(adev);
if (r)
@ -4394,7 +4450,7 @@ int amdgpu_device_mode1_reset(struct amdgpu_device *adev)
int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
struct amdgpu_reset_context *reset_context)
{
int i, r = 0;
int i, j, r = 0;
struct amdgpu_job *job = NULL;
bool need_full_reset =
test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags);
@ -4418,11 +4474,22 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
if (!ring || !ring->sched.thread)
continue;
/*clear job fence from fence drv to avoid force_completion
*leave NULL and vm flush fence in fence drv */
for (j = 0; j <= ring->fence_drv.num_fences_mask; j++) {
struct dma_fence *old, **ptr;
ptr = &ring->fence_drv.fences[j];
old = rcu_dereference_protected(*ptr, 1);
if (old && test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &old->flags)) {
RCU_INIT_POINTER(*ptr, NULL);
}
}
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
amdgpu_fence_driver_force_completion(ring);
}
if(job)
if (job && job->vm)
drm_sched_increase_karma(&job->base);
r = amdgpu_reset_prepare_hwcontext(adev, reset_context);
@ -4886,7 +4953,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
DRM_INFO("Bailing on TDR for s_job:%llx, hive: %llx as another already in progress",
job ? job->base.id : -1, hive->hive_id);
amdgpu_put_xgmi_hive(hive);
if (job)
if (job && job->vm)
drm_sched_increase_karma(&job->base);
return 0;
}
@ -4910,7 +4977,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
job ? job->base.id : -1);
/* even we skipped this reset, still need to set the job to guilty */
if (job)
if (job && job->vm)
drm_sched_increase_karma(&job->base);
goto skip_recovery;
}
@ -5277,6 +5344,10 @@ int amdgpu_device_baco_exit(struct drm_device *dev)
adev->nbio.funcs->enable_doorbell_interrupt)
adev->nbio.funcs->enable_doorbell_interrupt(adev, true);
if (amdgpu_passthrough(adev) &&
adev->nbio.funcs->clear_doorbell_interrupt)
adev->nbio.funcs->clear_doorbell_interrupt(adev);
return 0;
}

View File

@ -42,48 +42,6 @@
#include <linux/pci-p2pdma.h>
#include <linux/pm_runtime.h>
static int
__dma_resv_make_exclusive(struct dma_resv *obj)
{
struct dma_fence **fences;
unsigned int count;
int r;
if (!dma_resv_shared_list(obj)) /* no shared fences to convert */
return 0;
r = dma_resv_get_fences(obj, NULL, &count, &fences);
if (r)
return r;
if (count == 0) {
/* Now that was unexpected. */
} else if (count == 1) {
dma_resv_add_excl_fence(obj, fences[0]);
dma_fence_put(fences[0]);
kfree(fences);
} else {
struct dma_fence_array *array;
array = dma_fence_array_create(count, fences,
dma_fence_context_alloc(1), 0,
false);
if (!array)
goto err_fences_put;
dma_resv_add_excl_fence(obj, &array->base);
dma_fence_put(&array->base);
}
return 0;
err_fences_put:
while (count--)
dma_fence_put(fences[count]);
kfree(fences);
return -ENOMEM;
}
/**
* amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation
*
@ -110,24 +68,6 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
if (r < 0)
goto out;
r = amdgpu_bo_reserve(bo, false);
if (unlikely(r != 0))
goto out;
/*
* We only create shared fences for internal use, but importers
* of the dmabuf rely on exclusive fences for implicitly
* tracking write hazards. As any of the current fences may
* correspond to a write, we need to convert all existing
* fences on the reservation object into a single exclusive
* fence.
*/
r = __dma_resv_make_exclusive(bo->tbo.base.resv);
if (r)
goto out;
bo->prime_shared_count++;
amdgpu_bo_unreserve(bo);
return 0;
out:
@ -150,9 +90,6 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
bo->prime_shared_count--;
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
}
@ -418,8 +355,6 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf)
bo = gem_to_amdgpu_bo(gobj);
bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT;
if (dma_buf->ops != &amdgpu_dmabuf_ops)
bo->prime_shared_count = 1;
dma_resv_unlock(resv);
return gobj;

View File

@ -870,11 +870,10 @@ MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legac
module_param_named(reset_method, amdgpu_reset_method, int, 0444);
/**
* DOC: bad_page_threshold (int)
* Bad page threshold is to specify the threshold value of faulty pages
* detected by RAS ECC, that may result in GPU entering bad status if total
* faulty pages by ECC exceed threshold value and leave it for user's further
* check.
* DOC: bad_page_threshold (int) Bad page threshold is specifies the
* threshold value of faulty pages detected by RAS ECC, which may
* result in the GPU entering bad status when the number of total
* faulty pages by ECC exceeds the threshold value.
*/
MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = auto(default value), 0 = disable bad page retirement)");
module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444);
@ -1213,6 +1212,9 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x740F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
{0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN|AMD_EXP_HW_SUPPORT},
/* CYAN_SKILLFISH */
{0x1002, 0x13FE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
/* BEIGE_GOBY */
{0x1002, 0x7420, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
{0x1002, 0x7421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
@ -1236,7 +1238,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
int ret, retry = 0;
bool supports_atomic = false;
if (!amdgpu_virtual_display &&
if (amdgpu_virtual_display ||
amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
supports_atomic = true;
@ -1292,7 +1294,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
#endif
/* Get rid of things like offb */
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "amdgpudrmfb");
ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &amdgpu_kms_driver);
if (ret)
return ret;
@ -1474,7 +1476,7 @@ static int amdgpu_pmops_suspend(struct device *dev)
struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
if (amdgpu_acpi_is_s0ix_supported(adev))
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true;
adev->in_s3 = true;
r = amdgpu_device_suspend(drm_dev, true);
@ -1490,7 +1492,7 @@ static int amdgpu_pmops_resume(struct device *dev)
int r;
r = amdgpu_device_resume(drm_dev, true);
if (amdgpu_acpi_is_s0ix_supported(adev))
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = false;
return r;
}
@ -1784,7 +1786,6 @@ static const struct drm_driver amdgpu_kms_driver = {
.open = amdgpu_driver_open_kms,
.postclose = amdgpu_driver_postclose_kms,
.lastclose = amdgpu_driver_lastclose_kms,
.irq_handler = amdgpu_irq_handler,
.ioctls = amdgpu_ioctls_kms,
.num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms),
.dumb_create = amdgpu_mode_dumb_create,

View File

@ -0,0 +1,239 @@
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu_eeprom.h"
#include "amdgpu.h"
/* AT24CM02 and M24M02-R have a 256-byte write page size.
*/
#define EEPROM_PAGE_BITS 8
#define EEPROM_PAGE_SIZE (1U << EEPROM_PAGE_BITS)
#define EEPROM_PAGE_MASK (EEPROM_PAGE_SIZE - 1)
#define EEPROM_OFFSET_SIZE 2
/* EEPROM memory addresses are 19-bits long, which can
* be partitioned into 3, 8, 8 bits, for a total of 19.
* The upper 3 bits are sent as part of the 7-bit
* "Device Type Identifier"--an I2C concept, which for EEPROM devices
* is hard-coded as 1010b, indicating that it is an EEPROM
* device--this is the wire format, followed by the upper
* 3 bits of the 19-bit address, followed by the direction,
* followed by two bytes holding the rest of the 16-bits of
* the EEPROM memory address. The format on the wire for EEPROM
* devices is: 1010XYZD, A15:A8, A7:A0,
* Where D is the direction and sequenced out by the hardware.
* Bits XYZ are memory address bits 18, 17 and 16.
* These bits are compared to how pins 1-3 of the part are connected,
* depending on the size of the part, more on that later.
*
* Note that of this wire format, a client is in control
* of, and needs to specify only XYZ, A15:A8, A7:0, bits,
* which is exactly the EEPROM memory address, or offset,
* in order to address up to 8 EEPROM devices on the I2C bus.
*
* For instance, a 2-Mbit I2C EEPROM part, addresses all its bytes,
* using an 18-bit address, bit 17 to 0 and thus would use all but one bit of
* the 19 bits previously mentioned. The designer would then not connect
* pins 1 and 2, and pin 3 usually named "A_2" or "E2", would be connected to
* either Vcc or GND. This would allow for up to two 2-Mbit parts on
* the same bus, where one would be addressable with bit 18 as 1, and
* the other with bit 18 of the address as 0.
*
* For a 2-Mbit part, bit 18 is usually known as the "Chip Enable" or
* "Hardware Address Bit". This bit is compared to the load on pin 3
* of the device, described above, and if there is a match, then this
* device responds to the command. This way, you can connect two
* 2-Mbit EEPROM devices on the same bus, but see one contiguous
* memory from 0 to 7FFFFh, where address 0 to 3FFFF is in the device
* whose pin 3 is connected to GND, and address 40000 to 7FFFFh is in
* the 2nd device, whose pin 3 is connected to Vcc.
*
* This addressing you encode in the 32-bit "eeprom_addr" below,
* namely the 19-bits "XYZ,A15:A0", as a single 19-bit address. For
* instance, eeprom_addr = 0x6DA01, is 110_1101_1010_0000_0001, where
* XYZ=110b, and A15:A0=DA01h. The XYZ bits become part of the device
* address, and the rest of the address bits are sent as the memory
* address bytes.
*
* That is, for an I2C EEPROM driver everything is controlled by
* the "eeprom_addr".
*
* P.S. If you need to write, lock and read the Identification Page,
* (M24M02-DR device only, which we do not use), change the "7" to
* "0xF" in the macro below, and let the client set bit 20 to 1 in
* "eeprom_addr", and set A10 to 0 to write into it, and A10 and A1 to
* 1 to lock it permanently.
*/
#define MAKE_I2C_ADDR(_aa) ((0xA << 3) | (((_aa) >> 16) & 7))
static int __amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
u8 *eeprom_buf, u16 buf_size, bool read)
{
u8 eeprom_offset_buf[EEPROM_OFFSET_SIZE];
struct i2c_msg msgs[] = {
{
.flags = 0,
.len = EEPROM_OFFSET_SIZE,
.buf = eeprom_offset_buf,
},
{
.flags = read ? I2C_M_RD : 0,
},
};
const u8 *p = eeprom_buf;
int r;
u16 len;
for (r = 0; buf_size > 0;
buf_size -= len, eeprom_addr += len, eeprom_buf += len) {
/* Set the EEPROM address we want to write to/read from.
*/
msgs[0].addr = MAKE_I2C_ADDR(eeprom_addr);
msgs[1].addr = msgs[0].addr;
msgs[0].buf[0] = (eeprom_addr >> 8) & 0xff;
msgs[0].buf[1] = eeprom_addr & 0xff;
if (!read) {
/* Write the maximum amount of data, without
* crossing the device's page boundary, as per
* its spec. Partial page writes are allowed,
* starting at any location within the page,
* so long as the page boundary isn't crossed
* over (actually the page pointer rolls
* over).
*
* As per the AT24CM02 EEPROM spec, after
* writing into a page, the I2C driver should
* terminate the transfer, i.e. in
* "i2c_transfer()" below, with a STOP
* condition, so that the self-timed write
* cycle begins. This is implied for the
* "i2c_transfer()" abstraction.
*/
len = min(EEPROM_PAGE_SIZE - (eeprom_addr &
EEPROM_PAGE_MASK),
(u32)buf_size);
} else {
/* Reading from the EEPROM has no limitation
* on the number of bytes read from the EEPROM
* device--they are simply sequenced out.
*/
len = buf_size;
}
msgs[1].len = len;
msgs[1].buf = eeprom_buf;
/* This constitutes a START-STOP transaction.
*/
r = i2c_transfer(i2c_adap, msgs, ARRAY_SIZE(msgs));
if (r != ARRAY_SIZE(msgs))
break;
if (!read) {
/* According to EEPROM specs the length of the
* self-writing cycle, tWR (tW), is 10 ms.
*
* TODO: Use polling on ACK, aka Acknowledge
* Polling, to minimize waiting for the
* internal write cycle to complete, as it is
* usually smaller than tWR (tW).
*/
msleep(10);
}
}
return r < 0 ? r : eeprom_buf - p;
}
/**
* amdgpu_eeprom_xfer -- Read/write from/to an I2C EEPROM device
* @i2c_adap: pointer to the I2C adapter to use
* @eeprom_addr: EEPROM address from which to read/write
* @eeprom_buf: pointer to data buffer to read into/write from
* @buf_size: the size of @eeprom_buf
* @read: True if reading from the EEPROM, false if writing
*
* Returns the number of bytes read/written; -errno on error.
*/
static int amdgpu_eeprom_xfer(struct i2c_adapter *i2c_adap, u32 eeprom_addr,
u8 *eeprom_buf, u16 buf_size, bool read)
{
const struct i2c_adapter_quirks *quirks = i2c_adap->quirks;
u16 limit;
if (!quirks)
limit = 0;
else if (read)
limit = quirks->max_read_len;
else
limit = quirks->max_write_len;
if (limit == 0) {
return __amdgpu_eeprom_xfer(i2c_adap, eeprom_addr,
eeprom_buf, buf_size, read);
} else if (limit <= EEPROM_OFFSET_SIZE) {
dev_err_ratelimited(&i2c_adap->dev,
"maddr:0x%04X size:0x%02X:quirk max_%s_len must be > %d",
eeprom_addr, buf_size,
read ? "read" : "write", EEPROM_OFFSET_SIZE);
return -EINVAL;
} else {
u16 ps; /* Partial size */
int res = 0, r;
/* The "limit" includes all data bytes sent/received,
* which would include the EEPROM_OFFSET_SIZE bytes.
* Account for them here.
*/
limit -= EEPROM_OFFSET_SIZE;
for ( ; buf_size > 0;
buf_size -= ps, eeprom_addr += ps, eeprom_buf += ps) {
ps = min(limit, buf_size);
r = __amdgpu_eeprom_xfer(i2c_adap, eeprom_addr,
eeprom_buf, ps, read);
if (r < 0)
return r;
res += r;
}
return res;
}
}
int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
u32 eeprom_addr, u8 *eeprom_buf,
u16 bytes)
{
return amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, eeprom_buf, bytes,
true);
}
int amdgpu_eeprom_write(struct i2c_adapter *i2c_adap,
u32 eeprom_addr, u8 *eeprom_buf,
u16 bytes)
{
return amdgpu_eeprom_xfer(i2c_adap, eeprom_addr, eeprom_buf, bytes,
false);
}

View File

@ -0,0 +1,37 @@
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#ifndef _AMDGPU_EEPROM_H
#define _AMDGPU_EEPROM_H
#include <linux/i2c.h>
int amdgpu_eeprom_read(struct i2c_adapter *i2c_adap,
u32 eeprom_addr, u8 *eeprom_buf,
u16 bytes);
int amdgpu_eeprom_write(struct i2c_adapter *i2c_adap,
u32 eeprom_addr, u8 *eeprom_buf,
u16 bytes);
#endif

View File

@ -273,9 +273,6 @@ static int amdgpufb_create(struct drm_fb_helper *helper,
return 0;
out:
if (abo) {
}
if (fb && ret) {
drm_gem_object_put(gobj);
drm_framebuffer_unregister_private(fb);
@ -344,7 +341,7 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
}
/* disable all the possible outputs/crtcs before entering KMS mode */
if (!amdgpu_device_has_dc_support(adev))
if (!amdgpu_device_has_dc_support(adev) && !amdgpu_virtual_display)
drm_helper_disable_unused_functions(adev_to_drm(adev));
drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel);

View File

@ -129,30 +129,50 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
*
* @ring: ring the fence is associated with
* @f: resulting fence object
* @job: job the fence is embedded in
* @flags: flags to pass into the subordinate .emit_fence() call
*
* Emits a fence command on the requested ring (all asics).
* Returns 0 on success, -ENOMEM on failure.
*/
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f, struct amdgpu_job *job,
unsigned flags)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_fence *fence;
struct dma_fence *fence;
struct amdgpu_fence *am_fence;
struct dma_fence __rcu **ptr;
uint32_t seq;
int r;
fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
if (fence == NULL)
return -ENOMEM;
if (job == NULL) {
/* create a sperate hw fence */
am_fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_ATOMIC);
if (am_fence == NULL)
return -ENOMEM;
fence = &am_fence->base;
am_fence->ring = ring;
} else {
/* take use of job-embedded fence */
fence = &job->hw_fence;
}
seq = ++ring->fence_drv.sync_seq;
fence->ring = ring;
dma_fence_init(&fence->base, &amdgpu_fence_ops,
&ring->fence_drv.lock,
adev->fence_context + ring->idx,
seq);
if (job != NULL && job->job_run_counter) {
/* reinit seq for resubmitted jobs */
fence->seqno = seq;
} else {
dma_fence_init(fence, &amdgpu_fence_ops,
&ring->fence_drv.lock,
adev->fence_context + ring->idx,
seq);
}
if (job != NULL) {
/* mark this fence has a parent job */
set_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &fence->flags);
}
amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
seq, flags | AMDGPU_FENCE_FLAG_INT);
pm_runtime_get_noresume(adev_to_drm(adev)->dev);
@ -175,9 +195,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
/* This function can't be called concurrently anyway, otherwise
* emitting the fence would mess up the hardware ring buffer.
*/
rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
rcu_assign_pointer(*ptr, dma_fence_get(fence));
*f = &fence->base;
*f = fence;
return 0;
}
@ -417,9 +437,6 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
}
amdgpu_fence_write(ring, atomic_read(&ring->fence_drv.last_seq));
if (irq_src)
amdgpu_irq_get(adev, irq_src, irq_type);
ring->fence_drv.irq_src = irq_src;
ring->fence_drv.irq_type = irq_type;
ring->fence_drv.initialized = true;
@ -490,7 +507,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
r = drm_sched_init(&ring->sched, &amdgpu_sched_ops,
num_hw_submission, amdgpu_job_hang_limit,
timeout, sched_score, ring->name);
timeout, NULL, sched_score, ring->name);
if (r) {
DRM_ERROR("Failed to create scheduler on ring %s.\n",
ring->name);
@ -501,7 +518,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
}
/**
* amdgpu_fence_driver_init - init the fence driver
* amdgpu_fence_driver_sw_init - init the fence driver
* for all possible rings.
*
* @adev: amdgpu device pointer
@ -512,20 +529,20 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
* amdgpu_fence_driver_start_ring().
* Returns 0 for success.
*/
int amdgpu_fence_driver_init(struct amdgpu_device *adev)
int amdgpu_fence_driver_sw_init(struct amdgpu_device *adev)
{
return 0;
}
/**
* amdgpu_fence_driver_fini - tear down the fence driver
* amdgpu_fence_driver_hw_fini - tear down the fence driver
* for all possible rings.
*
* @adev: amdgpu device pointer
*
* Tear down the fence driver for all possible rings (all asics).
*/
void amdgpu_fence_driver_fini_hw(struct amdgpu_device *adev)
void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
{
int i, r;
@ -534,8 +551,7 @@ void amdgpu_fence_driver_fini_hw(struct amdgpu_device *adev)
if (!ring || !ring->fence_drv.initialized)
continue;
if (!ring->no_scheduler)
drm_sched_fini(&ring->sched);
/* You can't wait for HW to signal if it's gone */
if (!drm_dev_is_unplugged(&adev->ddev))
r = amdgpu_fence_wait_empty(ring);
@ -553,7 +569,7 @@ void amdgpu_fence_driver_fini_hw(struct amdgpu_device *adev)
}
}
void amdgpu_fence_driver_fini_sw(struct amdgpu_device *adev)
void amdgpu_fence_driver_sw_fini(struct amdgpu_device *adev)
{
unsigned int i, j;
@ -563,6 +579,9 @@ void amdgpu_fence_driver_fini_sw(struct amdgpu_device *adev)
if (!ring || !ring->fence_drv.initialized)
continue;
if (!ring->no_scheduler)
drm_sched_fini(&ring->sched);
for (j = 0; j <= ring->fence_drv.num_fences_mask; ++j)
dma_fence_put(ring->fence_drv.fences[j]);
kfree(ring->fence_drv.fences);
@ -572,49 +591,18 @@ void amdgpu_fence_driver_fini_sw(struct amdgpu_device *adev)
}
/**
* amdgpu_fence_driver_suspend - suspend the fence driver
* amdgpu_fence_driver_hw_init - enable the fence driver
* for all possible rings.
*
* @adev: amdgpu device pointer
*
* Suspend the fence driver for all possible rings (all asics).
*/
void amdgpu_fence_driver_suspend(struct amdgpu_device *adev)
{
int i, r;
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !ring->fence_drv.initialized)
continue;
/* wait for gpu to finish processing current batch */
r = amdgpu_fence_wait_empty(ring);
if (r) {
/* delay GPU reset to resume */
amdgpu_fence_driver_force_completion(ring);
}
/* disable the interrupt */
if (ring->fence_drv.irq_src)
amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type);
}
}
/**
* amdgpu_fence_driver_resume - resume the fence driver
* for all possible rings.
*
* @adev: amdgpu device pointer
*
* Resume the fence driver for all possible rings (all asics).
* Enable the fence driver for all possible rings (all asics).
* Not all asics have all rings, so each asic will only
* start the fence driver on the rings it has using
* amdgpu_fence_driver_start_ring().
* Returns 0 for success.
*/
void amdgpu_fence_driver_resume(struct amdgpu_device *adev)
void amdgpu_fence_driver_hw_init(struct amdgpu_device *adev)
{
int i;
@ -653,8 +641,16 @@ static const char *amdgpu_fence_get_driver_name(struct dma_fence *fence)
static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
{
struct amdgpu_fence *fence = to_amdgpu_fence(f);
return (const char *)fence->ring->name;
struct amdgpu_ring *ring;
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
ring = to_amdgpu_ring(job->base.sched);
} else {
ring = to_amdgpu_fence(f)->ring;
}
return (const char *)ring->name;
}
/**
@ -667,13 +663,20 @@ static const char *amdgpu_fence_get_timeline_name(struct dma_fence *f)
*/
static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
{
struct amdgpu_fence *fence = to_amdgpu_fence(f);
struct amdgpu_ring *ring = fence->ring;
struct amdgpu_ring *ring;
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
struct amdgpu_job *job = container_of(f, struct amdgpu_job, hw_fence);
ring = to_amdgpu_ring(job->base.sched);
} else {
ring = to_amdgpu_fence(f)->ring;
}
if (!timer_pending(&ring->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(ring);
DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
DMA_FENCE_TRACE(f, "armed on ring %i!\n", ring->idx);
return true;
}
@ -688,8 +691,20 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
static void amdgpu_fence_free(struct rcu_head *rcu)
{
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
struct amdgpu_fence *fence = to_amdgpu_fence(f);
kmem_cache_free(amdgpu_fence_slab, fence);
if (test_bit(AMDGPU_FENCE_FLAG_EMBED_IN_JOB_BIT, &f->flags)) {
/* free job if fence has a parent job */
struct amdgpu_job *job;
job = container_of(f, struct amdgpu_job, hw_fence);
kfree(job);
} else {
/* free fence_slab if it's separated fence*/
struct amdgpu_fence *fence;
fence = to_amdgpu_fence(f);
kmem_cache_free(amdgpu_fence_slab, fence);
}
}
/**
@ -712,6 +727,7 @@ static const struct dma_fence_ops amdgpu_fence_ops = {
.release = amdgpu_fence_release,
};
/*
* Fence debugfs
*/

View File

@ -27,10 +27,10 @@
#include "smu_v11_0_i2c.h"
#include "atom.h"
#include "amdgpu_fru_eeprom.h"
#include "amdgpu_eeprom.h"
#define I2C_PRODUCT_INFO_ADDR 0xAC
#define I2C_PRODUCT_INFO_ADDR_SIZE 0x2
#define I2C_PRODUCT_INFO_OFFSET 0xC0
#define FRU_EEPROM_MADDR 0x60000
#define I2C_PRODUCT_INFO_OFFSET 0xC0
static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
{
@ -62,19 +62,11 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev)
}
static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
unsigned char *buff)
unsigned char *buff)
{
int ret, size;
struct i2c_msg msg = {
.addr = I2C_PRODUCT_INFO_ADDR,
.flags = I2C_M_RD,
.buf = buff,
};
buff[0] = 0;
buff[1] = addrptr;
msg.len = I2C_PRODUCT_INFO_ADDR_SIZE + 1;
ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
ret = amdgpu_eeprom_read(&adev->pm.smu_i2c, addrptr, buff, 1);
if (ret < 1) {
DRM_WARN("FRU: Failed to get size field");
return ret;
@ -83,13 +75,9 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
/* The size returned by the i2c requires subtraction of 0xC0 since the
* size apparently always reports as 0xC0+actual size.
*/
size = buff[2] - I2C_PRODUCT_INFO_OFFSET;
/* Add 1 since address field was 1 byte */
buff[1] = addrptr + 1;
msg.len = I2C_PRODUCT_INFO_ADDR_SIZE + size;
ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
size = buff[0] - I2C_PRODUCT_INFO_OFFSET;
ret = amdgpu_eeprom_read(&adev->pm.smu_i2c, addrptr + 1, buff, size);
if (ret < 1) {
DRM_WARN("FRU: Failed to get data field");
return ret;
@ -101,8 +89,8 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr,
int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
{
unsigned char buff[34];
int addrptr, size;
int len;
u32 addrptr;
int size, len;
if (!is_fru_eeprom_supported(adev))
return 0;
@ -125,7 +113,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev)
* Bytes 8-a are all 1-byte and refer to the size of the entire struct,
* and the language field, so just start from 0xb, manufacturer size
*/
addrptr = 0xb;
addrptr = FRU_EEPROM_MADDR + 0xb;
size = amdgpu_fru_read_eeprom(adev, addrptr, buff);
if (size < 1) {
DRM_ERROR("Failed to read FRU Manufacturer, ret:%d", size);

View File

@ -76,7 +76,7 @@ static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
if (adev->dummy_page_addr)
return 0;
adev->dummy_page_addr = dma_map_page(&adev->pdev->dev, dummy_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(&adev->pdev->dev, adev->dummy_page_addr)) {
dev_err(&adev->pdev->dev, "Failed to DMA MAP the dummy page\n");
adev->dummy_page_addr = 0;
@ -96,8 +96,8 @@ void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev)
{
if (!adev->dummy_page_addr)
return;
pci_unmap_page(adev->pdev, adev->dummy_page_addr,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
dma_unmap_page(&adev->pdev->dev, adev->dummy_page_addr, PAGE_SIZE,
DMA_BIDIRECTIONAL);
adev->dummy_page_addr = 0;
}

View File

@ -621,7 +621,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
if (operation == AMDGPU_VA_OP_MAP ||
operation == AMDGPU_VA_OP_REPLACE) {
r = amdgpu_vm_bo_update(adev, bo_va, false);
r = amdgpu_vm_bo_update(adev, bo_va, false, NULL);
if (r)
goto error;
}
@ -838,7 +838,8 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
break;
}
case AMDGPU_GEM_OP_SET_PLACEMENT:
if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) {
if (robj->tbo.base.import_attach &&
args->value & AMDGPU_GEM_DOMAIN_VRAM) {
r = -EINVAL;
amdgpu_bo_unreserve(robj);
break;
@ -903,7 +904,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
DIV_ROUND_UP(args->bpp, 8), 0);
args->size = (u64)args->pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
domain = amdgpu_bo_get_preferred_pin_domain(adev,
domain = amdgpu_bo_get_preferred_domain(adev,
amdgpu_display_supported_domains(adev, flags));
r = amdgpu_gem_object_create(adev, args->size, 0, domain, flags,
ttm_bo_type_device, NULL, &gobj);

View File

@ -629,7 +629,6 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX;
adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
adev->gfx.ras_if->sub_block_index = 0;
strcpy(adev->gfx.ras_if->name, "gfx");
}
fs_info.head = ih_info.head = *adev->gfx.ras_if;
r = amdgpu_ras_late_init(adev, adev->gfx.ras_if,

View File

@ -471,6 +471,27 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev)
return r;
}
if (adev->mca.mp0.ras_funcs &&
adev->mca.mp0.ras_funcs->ras_late_init) {
r = adev->mca.mp0.ras_funcs->ras_late_init(adev);
if (r)
return r;
}
if (adev->mca.mp1.ras_funcs &&
adev->mca.mp1.ras_funcs->ras_late_init) {
r = adev->mca.mp1.ras_funcs->ras_late_init(adev);
if (r)
return r;
}
if (adev->mca.mpio.ras_funcs &&
adev->mca.mpio.ras_funcs->ras_late_init) {
r = adev->mca.mpio.ras_funcs->ras_late_init(adev);
if (r)
return r;
}
return 0;
}

View File

@ -132,14 +132,11 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
struct amdgpu_gtt_node *node;
int r;
spin_lock(&mgr->lock);
if (tbo->resource && tbo->resource->mem_type != TTM_PL_TT &&
atomic64_read(&mgr->available) < num_pages) {
spin_unlock(&mgr->lock);
if (!(place->flags & TTM_PL_FLAG_TEMPORARY) &&
atomic64_add_return(num_pages, &mgr->used) > man->size) {
atomic64_sub(num_pages, &mgr->used);
return -ENOSPC;
}
atomic64_sub(num_pages, &mgr->available);
spin_unlock(&mgr->lock);
node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL);
if (!node) {
@ -175,7 +172,8 @@ err_free:
kfree(node);
err_out:
atomic64_add(num_pages, &mgr->available);
if (!(place->flags & TTM_PL_FLAG_TEMPORARY))
atomic64_sub(num_pages, &mgr->used);
return r;
}
@ -198,7 +196,9 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
if (drm_mm_node_allocated(&node->base.mm_nodes[0]))
drm_mm_remove_node(&node->base.mm_nodes[0]);
spin_unlock(&mgr->lock);
atomic64_add(res->num_pages, &mgr->available);
if (!(res->placement & TTM_PL_FLAG_TEMPORARY))
atomic64_sub(res->num_pages, &mgr->used);
kfree(node);
}
@ -213,9 +213,8 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man,
uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man)
{
struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man);
s64 result = man->size - atomic64_read(&mgr->available);
return (result > 0 ? result : 0) * PAGE_SIZE;
return atomic64_read(&mgr->used) * PAGE_SIZE;
}
/**
@ -265,9 +264,8 @@ static void amdgpu_gtt_mgr_debug(struct ttm_resource_manager *man,
drm_mm_print(&mgr->mm, printer);
spin_unlock(&mgr->lock);
drm_printf(printer, "man size:%llu pages, gtt available:%lld pages, usage:%lluMB\n",
man->size, (u64)atomic64_read(&mgr->available),
amdgpu_gtt_mgr_usage(man) >> 20);
drm_printf(printer, "man size:%llu pages, gtt used:%llu pages\n",
man->size, atomic64_read(&mgr->used));
}
static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func = {
@ -299,7 +297,7 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size)
size = (adev->gmc.gart_size >> PAGE_SHIFT) - start;
drm_mm_init(&mgr->mm, start, size);
spin_lock_init(&mgr->lock);
atomic64_set(&mgr->available, gtt_size >> PAGE_SHIFT);
atomic64_set(&mgr->used, 0);
ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, &mgr->manager);
ttm_resource_manager_set_used(man, true);

View File

@ -41,7 +41,6 @@ int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev)
adev->hdp.ras_if->block = AMDGPU_RAS_BLOCK__HDP;
adev->hdp.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
adev->hdp.ras_if->sub_block_index = 0;
strcpy(adev->hdp.ras_if->name, "hdp");
}
ih_info.head = fs_info.head = *adev->hdp.ras_if;
r = amdgpu_ras_late_init(adev, adev->hdp.ras_if,

View File

@ -339,7 +339,7 @@ static void amdgpu_i2c_put_byte(struct amdgpu_i2c_chan *i2c_bus,
void
amdgpu_i2c_router_select_ddc_port(const struct amdgpu_connector *amdgpu_connector)
{
u8 val;
u8 val = 0;
if (!amdgpu_connector->router.ddc_valid)
return;

View File

@ -262,7 +262,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
fence_flags | AMDGPU_FENCE_FLAG_64BIT);
}
r = amdgpu_fence_emit(ring, f, fence_flags);
r = amdgpu_fence_emit(ring, f, job, fence_flags);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
if (job && job->vmid)

View File

@ -46,7 +46,6 @@
#include <linux/pci.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_irq.h>
#include <drm/drm_vblank.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_drv.h>
@ -184,7 +183,7 @@ void amdgpu_irq_disable_all(struct amdgpu_device *adev)
* Returns:
* result of handling the IRQ, as defined by &irqreturn_t
*/
irqreturn_t amdgpu_irq_handler(int irq, void *arg)
static irqreturn_t amdgpu_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
struct amdgpu_device *adev = drm_to_adev(dev);
@ -307,6 +306,7 @@ static void amdgpu_restore_msix(struct amdgpu_device *adev)
int amdgpu_irq_init(struct amdgpu_device *adev)
{
int r = 0;
unsigned int irq;
spin_lock_init(&adev->irq.lock);
@ -349,15 +349,22 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2);
INIT_WORK(&adev->irq.ih_soft_work, amdgpu_irq_handle_ih_soft);
adev->irq.installed = true;
/* Use vector 0 for MSI-X */
r = drm_irq_install(adev_to_drm(adev), pci_irq_vector(adev->pdev, 0));
/* Use vector 0 for MSI-X. */
r = pci_irq_vector(adev->pdev, 0);
if (r < 0)
return r;
irq = r;
/* PCI devices require shared interrupts. */
r = request_irq(irq, amdgpu_irq_handler, IRQF_SHARED, adev_to_drm(adev)->driver->name,
adev_to_drm(adev));
if (r) {
adev->irq.installed = false;
if (!amdgpu_device_has_dc_support(adev))
flush_work(&adev->hotplug_work);
return r;
}
adev->irq.installed = true;
adev->irq.irq = irq;
adev_to_drm(adev)->max_vblank_count = 0x00ffffff;
DRM_DEBUG("amdgpu: irq initialized.\n");
@ -368,7 +375,7 @@ int amdgpu_irq_init(struct amdgpu_device *adev)
void amdgpu_irq_fini_hw(struct amdgpu_device *adev)
{
if (adev->irq.installed) {
drm_irq_uninstall(&adev->ddev);
free_irq(adev->irq.irq, adev_to_drm(adev));
adev->irq.installed = false;
if (adev->irq.msi_enabled)
pci_free_irq_vectors(adev->pdev);
@ -584,7 +591,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
{
int i, j, k;
if (amdgpu_sriov_vf(adev))
if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev))
amdgpu_restore_msix(adev);
for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) {
@ -617,7 +624,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev)
int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type)
{
if (!adev_to_drm(adev)->irq_enabled)
if (!adev->irq.installed)
return -ENOENT;
if (type >= src->num_types)
@ -647,7 +654,7 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type)
{
if (!adev_to_drm(adev)->irq_enabled)
if (!adev->irq.installed)
return -ENOENT;
if (type >= src->num_types)
@ -678,7 +685,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src,
unsigned type)
{
if (!adev_to_drm(adev)->irq_enabled)
if (!adev->irq.installed)
return false;
if (type >= src->num_types)

View File

@ -80,6 +80,7 @@ struct amdgpu_irq_src_funcs {
struct amdgpu_irq {
bool installed;
unsigned int irq;
spinlock_t lock;
/* interrupt sources */
struct amdgpu_irq_client client[AMDGPU_IRQ_CLIENTID_MAX];
@ -100,7 +101,6 @@ struct amdgpu_irq {
};
void amdgpu_irq_disable_all(struct amdgpu_device *adev);
irqreturn_t amdgpu_irq_handler(int irq, void *arg);
int amdgpu_irq_init(struct amdgpu_device *adev);
void amdgpu_irq_fini_sw(struct amdgpu_device *adev);

View File

@ -127,11 +127,16 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
{
struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched);
struct dma_fence *f;
struct dma_fence *hw_fence;
unsigned i;
/* use sched fence if available */
f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
if (job->hw_fence.ops == NULL)
hw_fence = job->external_hw_fence;
else
hw_fence = &job->hw_fence;
/* use sched fence if available */
f = job->base.s_fence ? &job->base.s_fence->finished : hw_fence;
for (i = 0; i < job->num_ibs; ++i)
amdgpu_ib_free(ring->adev, &job->ibs[i], f);
}
@ -142,20 +147,27 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
drm_sched_job_cleanup(s_job);
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
kfree(job);
/* only put the hw fence if has embedded fence */
if (job->hw_fence.ops != NULL)
dma_fence_put(&job->hw_fence);
else
kfree(job);
}
void amdgpu_job_free(struct amdgpu_job *job)
{
amdgpu_job_free_resources(job);
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
kfree(job);
/* only put the hw fence if has embedded fence */
if (job->hw_fence.ops != NULL)
dma_fence_put(&job->hw_fence);
else
kfree(job);
}
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
@ -184,11 +196,14 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
job->base.sched = &ring->sched;
r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, NULL, fence);
job->fence = dma_fence_get(*fence);
/* record external_hw_fence for direct submit */
job->external_hw_fence = dma_fence_get(*fence);
if (r)
return r;
amdgpu_job_free(job);
dma_fence_put(*fence);
return 0;
}
@ -246,10 +261,12 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
if (r)
DRM_ERROR("Error scheduling IBs (%d)\n", r);
}
/* if gpu reset, hw fence will be replaced here */
dma_fence_put(job->fence);
job->fence = dma_fence_get(fence);
if (!job->job_run_counter)
dma_fence_get(fence);
else if (finished->error < 0)
dma_fence_put(&job->hw_fence);
job->job_run_counter++;
amdgpu_job_free_resources(job);
fence = r ? ERR_PTR(r) : fence;

View File

@ -46,7 +46,8 @@ struct amdgpu_job {
struct amdgpu_sync sync;
struct amdgpu_sync sched_sync;
struct amdgpu_ib *ibs;
struct dma_fence *fence; /* the hw fence */
struct dma_fence hw_fence;
struct dma_fence *external_hw_fence;
uint32_t preamble_status;
uint32_t preemption_status;
uint32_t num_ibs;
@ -62,6 +63,9 @@ struct amdgpu_job {
/* user fence handling */
uint64_t uf_addr;
uint64_t uf_sequence;
/* job_run_counter >= 1 means a resubmit job */
uint32_t job_run_counter;
};
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,

View File

@ -47,8 +47,6 @@ int amdgpu_jpeg_sw_fini(struct amdgpu_device *adev)
{
int i;
cancel_delayed_work_sync(&adev->jpeg.idle_work);
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
if (adev->jpeg.harvest_config & (1 << i))
continue;

View File

@ -341,27 +341,27 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
switch (query_fw->index) {
case TA_FW_TYPE_PSP_XGMI:
fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.ta_xgmi_ucode_version;
fw_info->feature = adev->psp.xgmi.feature_version;
break;
case TA_FW_TYPE_PSP_RAS:
fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.ta_ras_ucode_version;
fw_info->feature = adev->psp.ras.feature_version;
break;
case TA_FW_TYPE_PSP_HDCP:
fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.ta_hdcp_ucode_version;
fw_info->feature = adev->psp.hdcp.feature_version;
break;
case TA_FW_TYPE_PSP_DTM:
fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.ta_dtm_ucode_version;
fw_info->feature = adev->psp.dtm.feature_version;
break;
case TA_FW_TYPE_PSP_RAP:
fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.ta_rap_ucode_version;
fw_info->feature = adev->psp.rap.feature_version;
break;
case TA_FW_TYPE_PSP_SECUREDISPLAY:
fw_info->ver = adev->psp.ta_fw_version;
fw_info->feature = adev->psp.ta_securedisplay_ucode_version;
fw_info->feature = adev->psp.securedisplay.feature_version;
break;
default:
return -EINVAL;
@ -374,12 +374,12 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->feature = adev->sdma.instance[query_fw->index].feature_version;
break;
case AMDGPU_INFO_FW_SOS:
fw_info->ver = adev->psp.sos_fw_version;
fw_info->feature = adev->psp.sos_feature_version;
fw_info->ver = adev->psp.sos.fw_version;
fw_info->feature = adev->psp.sos.feature_version;
break;
case AMDGPU_INFO_FW_ASD:
fw_info->ver = adev->psp.asd_fw_version;
fw_info->feature = adev->psp.asd_feature_version;
fw_info->ver = adev->psp.asd.fw_version;
fw_info->feature = adev->psp.asd.feature_version;
break;
case AMDGPU_INFO_FW_DMCU:
fw_info->ver = adev->dm.dmcu_fw_version;
@ -390,8 +390,8 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->feature = 0;
break;
case AMDGPU_INFO_FW_TOC:
fw_info->ver = adev->psp.toc_fw_version;
fw_info->feature = adev->psp.toc_feature_version;
fw_info->ver = adev->psp.toc.fw_version;
fw_info->feature = adev->psp.toc.feature_version;
break;
default:
return -EINVAL;
@ -1179,10 +1179,14 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
pasid = 0;
}
r = amdgpu_vm_init(adev, &fpriv->vm, pasid);
r = amdgpu_vm_init(adev, &fpriv->vm);
if (r)
goto error_pasid;
r = amdgpu_vm_set_pasid(adev, &fpriv->vm, pasid);
if (r)
goto error_vm;
fpriv->prt_va = amdgpu_vm_bo_add(adev, &fpriv->vm, NULL);
if (!fpriv->prt_va) {
r = -ENOMEM;
@ -1210,8 +1214,10 @@ error_vm:
amdgpu_vm_fini(adev, &fpriv->vm);
error_pasid:
if (pasid)
if (pasid) {
amdgpu_pasid_free(pasid);
amdgpu_vm_set_pasid(adev, &fpriv->vm, 0);
}
kfree(fpriv);

View File

@ -0,0 +1,117 @@
/*
* Copyright 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "amdgpu_ras.h"
#include "amdgpu.h"
#include "amdgpu_mca.h"
#include "umc/umc_6_7_0_offset.h"
#include "umc/umc_6_7_0_sh_mask.h"
void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr,
unsigned long *error_count)
{
uint64_t mc_status = RREG64_PCIE(mc_status_addr * 4);
if (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
*error_count += 1;
}
void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr,
unsigned long *error_count)
{
uint64_t mc_status = RREG64_PCIE(mc_status_addr * 4);
if ((REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
(REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 ||
REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 ||
REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 ||
REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1))
*error_count += 1;
}
void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr)
{
WREG64_PCIE(mc_status_addr * 4, 0x0ULL);
}
void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr,
void *ras_error_status)
{
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
amdgpu_mca_query_correctable_error_count(adev, mc_status_addr, &(err_data->ce_count));
amdgpu_mca_query_uncorrectable_error_count(adev, mc_status_addr, &(err_data->ue_count));
amdgpu_mca_reset_error_count(adev, mc_status_addr);
}
int amdgpu_mca_ras_late_init(struct amdgpu_device *adev,
struct amdgpu_mca_ras *mca_dev)
{
int r;
struct ras_ih_if ih_info = {
.cb = NULL,
};
struct ras_fs_if fs_info = {
.sysfs_name = mca_dev->ras_funcs->sysfs_name,
};
if (!mca_dev->ras_if) {
mca_dev->ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
if (!mca_dev->ras_if)
return -ENOMEM;
mca_dev->ras_if->block = mca_dev->ras_funcs->ras_block;
mca_dev->ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
mca_dev->ras_if->sub_block_index = 0;
}
ih_info.head = fs_info.head = *mca_dev->ras_if;
r = amdgpu_ras_late_init(adev, mca_dev->ras_if,
&fs_info, &ih_info);
if (r || !amdgpu_ras_is_supported(adev, mca_dev->ras_if->block)) {
kfree(mca_dev->ras_if);
mca_dev->ras_if = NULL;
}
return r;
}
void amdgpu_mca_ras_fini(struct amdgpu_device *adev,
struct amdgpu_mca_ras *mca_dev)
{
struct ras_ih_if ih_info = {
.cb = NULL,
};
if (!mca_dev->ras_if)
return;
amdgpu_ras_late_fini(adev, mca_dev->ras_if, &ih_info);
kfree(mca_dev->ras_if);
mca_dev->ras_if = NULL;
}

View File

@ -0,0 +1,72 @@
/*
* Copyright (C) 2021 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included
* in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
* AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#ifndef __AMDGPU_MCA_H__
#define __AMDGPU_MCA_H__
struct amdgpu_mca_ras_funcs {
int (*ras_late_init)(struct amdgpu_device *adev);
void (*ras_fini)(struct amdgpu_device *adev);
void (*query_ras_error_count)(struct amdgpu_device *adev,
void *ras_error_status);
void (*query_ras_error_address)(struct amdgpu_device *adev,
void *ras_error_status);
uint32_t ras_block;
const char* sysfs_name;
};
struct amdgpu_mca_ras {
struct ras_common_if *ras_if;
const struct amdgpu_mca_ras_funcs *ras_funcs;
};
struct amdgpu_mca_funcs {
void (*init)(struct amdgpu_device *adev);
};
struct amdgpu_mca {
const struct amdgpu_mca_funcs *funcs;
struct amdgpu_mca_ras mp0;
struct amdgpu_mca_ras mp1;
struct amdgpu_mca_ras mpio;
};
void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr,
unsigned long *error_count);
void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr,
unsigned long *error_count);
void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr);
void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr,
void *ras_error_status);
int amdgpu_mca_ras_late_init(struct amdgpu_device *adev,
struct amdgpu_mca_ras *mca_dev);
void amdgpu_mca_ras_fini(struct amdgpu_device *adev,
struct amdgpu_mca_ras *mca_dev);
#endif

View File

@ -41,7 +41,6 @@ int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev)
adev->mmhub.ras_if->block = AMDGPU_RAS_BLOCK__MMHUB;
adev->mmhub.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
adev->mmhub.ras_if->sub_block_index = 0;
strcpy(adev->mmhub.ras_if->name, "mmhub");
}
ih_info.head = fs_info.head = *adev->mmhub.ras_if;
r = amdgpu_ras_late_init(adev, adev->mmhub.ras_if,

View File

@ -39,7 +39,6 @@ int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev)
adev->nbio.ras_if->block = AMDGPU_RAS_BLOCK__PCIE_BIF;
adev->nbio.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
adev->nbio.ras_if->sub_block_index = 0;
strcpy(adev->nbio.ras_if->name, "pcie_bif");
}
ih_info.head = fs_info.head = *adev->nbio.ras_if;
r = amdgpu_ras_late_init(adev, adev->nbio.ras_if,

View File

@ -95,6 +95,7 @@ struct amdgpu_nbio_funcs {
void (*program_aspm)(struct amdgpu_device *adev);
void (*apply_lc_spc_mode_wa)(struct amdgpu_device *adev);
void (*apply_l1_link_width_reconfig_wa)(struct amdgpu_device *adev);
void (*clear_doorbell_interrupt)(struct amdgpu_device *adev);
};
struct amdgpu_nbio {

View File

@ -196,7 +196,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
c++;
}
BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
placement->num_placement = c;
placement->placement = places;
@ -731,7 +731,7 @@ retry:
/**
* amdgpu_bo_add_to_shadow_list - add a BO to the shadow list
*
* @bo: BO that will be inserted into the shadow list
* @vmbo: BO that will be inserted into the shadow list
*
* Insert a BO to the shadow list.
*/
@ -913,7 +913,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return -EINVAL;
/* A shared bo cannot be migrated to VRAM */
if (bo->prime_shared_count || bo->tbo.base.import_attach) {
if (bo->tbo.base.import_attach) {
if (domain & AMDGPU_GEM_DOMAIN_GTT)
domain = AMDGPU_GEM_DOMAIN_GTT;
else
@ -947,7 +947,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
/* This assumes only APU display buffers are pinned with (VRAM|GTT).
* See function amdgpu_display_supported_domains()
*/
domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
domain = amdgpu_bo_get_preferred_domain(adev, domain);
if (bo->tbo.base.import_attach)
dma_buf_pin(bo->tbo.base.import_attach);
@ -1518,14 +1518,14 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
}
/**
* amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
* amdgpu_bo_get_preferred_domain - get preferred domain
* @adev: amdgpu device object
* @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
*
* Returns:
* Which of the allowed domains is preferred for pinning the BO for scanout.
* Which of the allowed domains is preferred for allocating the BO.
*/
uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
uint32_t domain)
{
if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {

View File

@ -100,7 +100,6 @@ struct amdgpu_bo {
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
u64 flags;
unsigned prime_shared_count;
/* per VM structure for page tables and with virtual addresses */
struct amdgpu_vm_bo_base *vm_bo;
/* Constant after initialization */
@ -334,7 +333,7 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo);
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
struct dma_fence **fence);
uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
uint32_t domain);
/*

View File

@ -80,12 +80,17 @@ static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den,
* Calculate feedback and reference divider for a given post divider. Makes
* sure we stay within the limits.
*/
static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
unsigned fb_div_max, unsigned ref_div_max,
unsigned *fb_div, unsigned *ref_div)
static void amdgpu_pll_get_fb_ref_div(struct amdgpu_device *adev, unsigned int nom,
unsigned int den, unsigned int post_div,
unsigned int fb_div_max, unsigned int ref_div_max,
unsigned int *fb_div, unsigned int *ref_div)
{
/* limit reference * post divider to a maximum */
ref_div_max = min(128 / post_div, ref_div_max);
if (adev->family == AMDGPU_FAMILY_SI)
ref_div_max = min(100 / post_div, ref_div_max);
else
ref_div_max = min(128 / post_div, ref_div_max);
/* get matching reference and feedback divider */
*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
@ -112,7 +117,8 @@ static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_
* Try to calculate the PLL parameters to generate the given frequency:
* dot_clock = (ref_freq * feedback_div) / (ref_div * post_div)
*/
void amdgpu_pll_compute(struct amdgpu_pll *pll,
void amdgpu_pll_compute(struct amdgpu_device *adev,
struct amdgpu_pll *pll,
u32 freq,
u32 *dot_clock_p,
u32 *fb_div_p,
@ -199,7 +205,7 @@ void amdgpu_pll_compute(struct amdgpu_pll *pll,
for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
unsigned diff;
amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max,
amdgpu_pll_get_fb_ref_div(adev, nom, den, post_div, fb_div_max,
ref_div_max, &fb_div, &ref_div);
diff = abs(target_clock - (pll->reference_freq * fb_div) /
(ref_div * post_div));
@ -214,7 +220,7 @@ void amdgpu_pll_compute(struct amdgpu_pll *pll,
post_div = post_div_best;
/* get the feedback and reference divider for the optimal value */
amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max,
amdgpu_pll_get_fb_ref_div(adev, nom, den, post_div, fb_div_max, ref_div_max,
&fb_div, &ref_div);
/* reduce the numbers to a simpler ratio once more */

View File

@ -24,7 +24,8 @@
#ifndef __AMDGPU_PLL_H__
#define __AMDGPU_PLL_H__
void amdgpu_pll_compute(struct amdgpu_pll *pll,
void amdgpu_pll_compute(struct amdgpu_device *adev,
struct amdgpu_pll *pll,
u32 freq,
u32 *dot_clock_p,
u32 *fb_div_p,

File diff suppressed because it is too large Load Diff

View File

@ -48,11 +48,15 @@
struct psp_context;
struct psp_xgmi_node_info;
struct psp_xgmi_topology_info;
struct psp_bin_desc;
enum psp_bootloader_cmd {
PSP_BL__LOAD_SYSDRV = 0x10000,
PSP_BL__LOAD_SOSDRV = 0x20000,
PSP_BL__LOAD_KEY_DATABASE = 0x80000,
PSP_BL__LOAD_SOCDRV = 0xB0000,
PSP_BL__LOAD_INTFDRV = 0xC0000,
PSP_BL__LOAD_DBGDRV = 0xD0000,
PSP_BL__DRAM_LONG_TRAIN = 0x100000,
PSP_BL__DRAM_SHORT_TRAIN = 0x200000,
PSP_BL__LOAD_TOS_SPL_TABLE = 0x10000000,
@ -93,6 +97,9 @@ struct psp_funcs
int (*bootloader_load_kdb)(struct psp_context *psp);
int (*bootloader_load_spl)(struct psp_context *psp);
int (*bootloader_load_sysdrv)(struct psp_context *psp);
int (*bootloader_load_soc_drv)(struct psp_context *psp);
int (*bootloader_load_intf_drv)(struct psp_context *psp);
int (*bootloader_load_dbg_drv)(struct psp_context *psp);
int (*bootloader_load_sos)(struct psp_context *psp);
int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
int (*ring_create)(struct psp_context *psp,
@ -106,7 +113,7 @@ struct psp_funcs
int (*mem_training)(struct psp_context *psp, uint32_t ops);
uint32_t (*ring_get_wptr)(struct psp_context *psp);
void (*ring_set_wptr)(struct psp_context *psp, uint32_t value);
int (*load_usbc_pd_fw)(struct psp_context *psp, dma_addr_t dma_addr);
int (*load_usbc_pd_fw)(struct psp_context *psp, uint64_t fw_pri_mc_addr);
int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver);
};
@ -116,6 +123,7 @@ struct psp_xgmi_node_info {
uint8_t num_hops;
uint8_t is_sharing_enabled;
enum ta_xgmi_assigned_sdma_engine sdma_engine;
uint8_t num_links;
};
struct psp_xgmi_topology_info {
@ -128,59 +136,32 @@ struct psp_asd_context {
uint32_t session_id;
};
struct ta_mem_context {
struct amdgpu_bo *shared_bo;
uint64_t shared_mc_addr;
void *shared_buf;
};
struct ta_context {
bool initialized;
uint32_t session_id;
struct ta_mem_context mem_context;
};
struct ta_cp_context {
struct ta_context context;
struct mutex mutex;
};
struct psp_xgmi_context {
uint8_t initialized;
uint32_t session_id;
struct amdgpu_bo *xgmi_shared_bo;
uint64_t xgmi_shared_mc_addr;
void *xgmi_shared_buf;
struct ta_context context;
struct psp_xgmi_topology_info top_info;
bool supports_extended_data;
};
struct psp_ras_context {
/*ras fw*/
bool ras_initialized;
uint32_t session_id;
struct amdgpu_bo *ras_shared_bo;
uint64_t ras_shared_mc_addr;
void *ras_shared_buf;
struct amdgpu_ras *ras;
};
struct psp_hdcp_context {
bool hdcp_initialized;
uint32_t session_id;
struct amdgpu_bo *hdcp_shared_bo;
uint64_t hdcp_shared_mc_addr;
void *hdcp_shared_buf;
struct mutex mutex;
};
struct psp_dtm_context {
bool dtm_initialized;
uint32_t session_id;
struct amdgpu_bo *dtm_shared_bo;
uint64_t dtm_shared_mc_addr;
void *dtm_shared_buf;
struct mutex mutex;
};
struct psp_rap_context {
bool rap_initialized;
uint32_t session_id;
struct amdgpu_bo *rap_shared_bo;
uint64_t rap_shared_mc_addr;
void *rap_shared_buf;
struct mutex mutex;
};
struct psp_securedisplay_context {
bool securedisplay_initialized;
uint32_t session_id;
struct amdgpu_bo *securedisplay_shared_bo;
uint64_t securedisplay_shared_mc_addr;
void *securedisplay_shared_buf;
struct mutex mutex;
struct ta_context context;
struct amdgpu_ras *ras;
};
#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942
@ -282,6 +263,13 @@ struct psp_runtime_boot_cfg_entry {
uint32_t reserved;
};
struct psp_bin_desc {
uint32_t fw_version;
uint32_t feature_version;
uint32_t size_bytes;
uint8_t *start_addr;
};
struct psp_context
{
struct amdgpu_device *adev;
@ -297,36 +285,26 @@ struct psp_context
/* sos firmware */
const struct firmware *sos_fw;
uint32_t sos_fw_version;
uint32_t sos_feature_version;
uint32_t sys_bin_size;
uint32_t sos_bin_size;
uint32_t toc_bin_size;
uint32_t kdb_bin_size;
uint32_t spl_bin_size;
uint32_t rl_bin_size;
uint8_t *sys_start_addr;
uint8_t *sos_start_addr;
uint8_t *toc_start_addr;
uint8_t *kdb_start_addr;
uint8_t *spl_start_addr;
uint8_t *rl_start_addr;
struct psp_bin_desc sys;
struct psp_bin_desc sos;
struct psp_bin_desc toc;
struct psp_bin_desc kdb;
struct psp_bin_desc spl;
struct psp_bin_desc rl;
struct psp_bin_desc soc_drv;
struct psp_bin_desc intf_drv;
struct psp_bin_desc dbg_drv;
/* tmr buffer */
struct amdgpu_bo *tmr_bo;
uint64_t tmr_mc_addr;
/* asd firmware */
const struct firmware *asd_fw;
uint32_t asd_fw_version;
uint32_t asd_feature_version;
uint32_t asd_ucode_size;
uint8_t *asd_start_addr;
const struct firmware *asd_fw;
struct psp_bin_desc asd;
/* toc firmware */
const struct firmware *toc_fw;
uint32_t toc_fw_version;
uint32_t toc_feature_version;
/* fence buffer */
struct amdgpu_bo *fence_buf_bo;
@ -348,36 +326,20 @@ struct psp_context
/* xgmi ta firmware and buffer */
const struct firmware *ta_fw;
uint32_t ta_fw_version;
uint32_t ta_xgmi_ucode_version;
uint32_t ta_xgmi_ucode_size;
uint8_t *ta_xgmi_start_addr;
uint32_t ta_ras_ucode_version;
uint32_t ta_ras_ucode_size;
uint8_t *ta_ras_start_addr;
uint32_t ta_hdcp_ucode_version;
uint32_t ta_hdcp_ucode_size;
uint8_t *ta_hdcp_start_addr;
uint32_t ta_dtm_ucode_version;
uint32_t ta_dtm_ucode_size;
uint8_t *ta_dtm_start_addr;
uint32_t ta_rap_ucode_version;
uint32_t ta_rap_ucode_size;
uint8_t *ta_rap_start_addr;
uint32_t ta_securedisplay_ucode_version;
uint32_t ta_securedisplay_ucode_size;
uint8_t *ta_securedisplay_start_addr;
struct psp_bin_desc xgmi;
struct psp_bin_desc ras;
struct psp_bin_desc hdcp;
struct psp_bin_desc dtm;
struct psp_bin_desc rap;
struct psp_bin_desc securedisplay;
struct psp_asd_context asd_context;
struct psp_xgmi_context xgmi_context;
struct psp_ras_context ras;
struct psp_hdcp_context hdcp_context;
struct psp_dtm_context dtm_context;
struct psp_rap_context rap_context;
struct psp_securedisplay_context securedisplay_context;
struct psp_ras_context ras_context;
struct ta_cp_context hdcp_context;
struct ta_cp_context dtm_context;
struct ta_cp_context rap_context;
struct ta_cp_context securedisplay_context;
struct mutex mutex;
struct psp_memory_training_context mem_train_ctx;
@ -402,6 +364,12 @@ struct amdgpu_psp_funcs {
((psp)->funcs->bootloader_load_spl ? (psp)->funcs->bootloader_load_spl((psp)) : 0)
#define psp_bootloader_load_sysdrv(psp) \
((psp)->funcs->bootloader_load_sysdrv ? (psp)->funcs->bootloader_load_sysdrv((psp)) : 0)
#define psp_bootloader_load_soc_drv(psp) \
((psp)->funcs->bootloader_load_soc_drv ? (psp)->funcs->bootloader_load_soc_drv((psp)) : 0)
#define psp_bootloader_load_intf_drv(psp) \
((psp)->funcs->bootloader_load_intf_drv ? (psp)->funcs->bootloader_load_intf_drv((psp)) : 0)
#define psp_bootloader_load_dbg_drv(psp) \
((psp)->funcs->bootloader_load_dbg_drv ? (psp)->funcs->bootloader_load_dbg_drv((psp)) : 0)
#define psp_bootloader_load_sos(psp) \
((psp)->funcs->bootloader_load_sos ? (psp)->funcs->bootloader_load_sos((psp)) : 0)
#define psp_smu_reload_quirk(psp) \
@ -414,9 +382,9 @@ struct amdgpu_psp_funcs {
#define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp))
#define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value))
#define psp_load_usbc_pd_fw(psp, dma_addr) \
#define psp_load_usbc_pd_fw(psp, fw_pri_mc_addr) \
((psp)->funcs->load_usbc_pd_fw ? \
(psp)->funcs->load_usbc_pd_fw((psp), (dma_addr)) : -EINVAL)
(psp)->funcs->load_usbc_pd_fw((psp), (fw_pri_mc_addr)) : -EINVAL)
#define psp_read_usbc_pd_fw(psp, fw_ver) \
((psp)->funcs->read_usbc_pd_fw ? \
@ -427,6 +395,7 @@ extern const struct amd_ip_funcs psp_ip_funcs;
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
extern const struct amdgpu_ip_block_version psp_v10_0_ip_block;
extern const struct amdgpu_ip_block_version psp_v11_0_ip_block;
extern const struct amdgpu_ip_block_version psp_v11_0_8_ip_block;
extern const struct amdgpu_ip_block_version psp_v12_0_ip_block;
extern const struct amdgpu_ip_block_version psp_v13_0_ip_block;
@ -437,14 +406,15 @@ int psp_gpu_reset(struct amdgpu_device *adev);
int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
uint64_t cmd_gpu_addr, int cmd_size);
int psp_xgmi_initialize(struct psp_context *psp);
int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool load_ta);
int psp_xgmi_terminate(struct psp_context *psp);
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_xgmi_get_hive_id(struct psp_context *psp, uint64_t *hive_id);
int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id);
int psp_xgmi_get_topology_info(struct psp_context *psp,
int number_devices,
struct psp_xgmi_topology_info *topology);
struct psp_xgmi_topology_info *topology,
bool get_extended_data);
int psp_xgmi_set_topology_info(struct psp_context *psp,
int number_devices,
struct psp_xgmi_topology_info *topology);
@ -483,4 +453,5 @@ int psp_load_fw_list(struct psp_context *psp,
struct amdgpu_firmware_info **ucode_list, int ucode_count);
void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size);
int is_psp_fw_valid(struct psp_bin_desc bin);
#endif

View File

@ -76,7 +76,7 @@ static ssize_t amdgpu_rap_debugfs_write(struct file *f, const char __user *buf,
dev_info(adev->dev, "RAP L0 validate test success.\n");
} else {
rap_shared_mem = (struct ta_rap_shared_memory *)
adev->psp.rap_context.rap_shared_buf;
adev->psp.rap_context.context.mem_context.shared_buf;
rap_cmd_output = &(rap_shared_mem->rap_out_message.output);
dev_info(adev->dev, "RAP test failed, the output is:\n");
@ -119,7 +119,7 @@ void amdgpu_rap_debugfs_init(struct amdgpu_device *adev)
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev_to_drm(adev)->primary;
if (!adev->psp.rap_context.rap_initialized)
if (!adev->psp.rap_context.context.initialized)
return;
debugfs_create_file("rap_test", S_IWUSR, minor->debugfs_root,

Some files were not shown because too many files have changed in this diff Show More