diff --git a/Documentation/ABI/testing/sysfs-kernel-dmabuf-buffers b/Documentation/ABI/testing/sysfs-kernel-dmabuf-buffers new file mode 100644 index 000000000000..a243984ed420 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-kernel-dmabuf-buffers @@ -0,0 +1,52 @@ +What: /sys/kernel/dmabuf/buffers +Date: May 2021 +KernelVersion: v5.13 +Contact: Hridya Valsaraju +Description: The /sys/kernel/dmabuf/buffers directory contains a + snapshot of the internal state of every DMA-BUF. + /sys/kernel/dmabuf/buffers/ will contain the + statistics for the DMA-BUF with the unique inode number + +Users: kernel memory tuning/debugging tools + +What: /sys/kernel/dmabuf/buffers//exporter_name +Date: May 2021 +KernelVersion: v5.13 +Contact: Hridya Valsaraju +Description: This file is read-only and contains the name of the exporter of + the DMA-BUF. + +What: /sys/kernel/dmabuf/buffers//size +Date: May 2021 +KernelVersion: v5.13 +Contact: Hridya Valsaraju +Description: This file is read-only and specifies the size of the DMA-BUF in + bytes. + +What: /sys/kernel/dmabuf/buffers//attachments +Date: May 2021 +KernelVersion: v5.13 +Contact: Hridya Valsaraju +Description: This directory will contain subdirectories representing every + attachment of the DMA-BUF. + +What: /sys/kernel/dmabuf/buffers//attachments/ +Date: May 2021 +KernelVersion: v5.13 +Contact: Hridya Valsaraju +Description: This directory will contain information on the attached device + and the number of current distinct device mappings. + +What: /sys/kernel/dmabuf/buffers//attachments//device +Date: May 2021 +KernelVersion: v5.13 +Contact: Hridya Valsaraju +Description: This file is read-only and is a symlink to the attached device's + sysfs entry. + +What: /sys/kernel/dmabuf/buffers//attachments//map_counter +Date: May 2021 +KernelVersion: v5.13 +Contact: Hridya Valsaraju +Description: This file is read-only and contains a map_counter indicating the + number of distinct device mappings of the attachment. diff --git a/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml index 26932d2e86ab..1c2daf7c24cc 100644 --- a/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml +++ b/Documentation/devicetree/bindings/display/bridge/ti,sn65dsi86.yaml @@ -70,6 +70,9 @@ properties: const: 1 description: See ../../pwm/pwm.yaml for description of the cell formats. + aux-bus: + $ref: /schemas/display/dp-aux-bus.yaml# + ports: $ref: /schemas/graph.yaml#/properties/ports @@ -150,7 +153,6 @@ properties: required: - compatible - reg - - enable-gpios - vccio-supply - vpll-supply - vcca-supply @@ -201,11 +203,26 @@ examples: port@1 { reg = <1>; - endpoint { + sn65dsi86_out: endpoint { remote-endpoint = <&panel_in_edp>; }; }; }; + + aux-bus { + panel { + compatible = "boe,nv133fhm-n62"; + power-supply = <&pp3300_dx_edp>; + backlight = <&backlight>; + hpd-gpios = <&sn65dsi86_bridge 2 GPIO_ACTIVE_HIGH>; + + port { + panel_in_edp: endpoint { + remote-endpoint = <&sn65dsi86_out>; + }; + }; + }; + }; }; }; - | diff --git a/Documentation/devicetree/bindings/display/dp-aux-bus.yaml b/Documentation/devicetree/bindings/display/dp-aux-bus.yaml new file mode 100644 index 000000000000..5e4afe9f98fb --- /dev/null +++ b/Documentation/devicetree/bindings/display/dp-aux-bus.yaml @@ -0,0 +1,37 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/dp-aux-bus.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: DisplayPort AUX bus + +maintainers: + - Douglas Anderson + +description: + DisplayPort controllers provide a control channel to the sinks that + are hooked up to them. This is the DP AUX bus. Over the DP AUX bus + we can query properties about a sink and also configure it. In + particular, DP sinks support DDC over DP AUX which allows tunneling + a standard I2C DDC connection over the AUX channel. + + To model this relationship, DP sinks should be placed as children + of the DP controller under the "aux-bus" node. + + At the moment, this binding only handles the eDP case. It is + possible it will be extended in the future to handle the DP case. + For DP, presumably a connector would be listed under the DP AUX + bus instead of a panel. + +properties: + $nodename: + const: "aux-bus" + + panel: + $ref: panel/panel-common.yaml# + +additionalProperties: false + +required: + - panel diff --git a/Documentation/devicetree/bindings/display/panel/innolux,ej030na.yaml b/Documentation/devicetree/bindings/display/panel/innolux,ej030na.yaml new file mode 100644 index 000000000000..cda36c04e85c --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/innolux,ej030na.yaml @@ -0,0 +1,62 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/innolux,ej030na.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Innolux EJ030NA 3.0" (320x480 pixels) 24-bit TFT LCD panel + +description: | + The panel must obey the rules for a SPI slave device as specified in + spi/spi-controller.yaml + +maintainers: + - Paul Cercueil + +allOf: + - $ref: panel-common.yaml# + +properties: + compatible: + const: innolux,ej030na + + backlight: true + port: true + power-supply: true + reg: true + reset-gpios: true + +required: + - compatible + - reg + - power-supply + - reset-gpios + +unevaluatedProperties: false + +examples: + - | + #include + + spi { + #address-cells = <1>; + #size-cells = <0>; + + panel@0 { + compatible = "innolux,ej030na"; + reg = <0>; + + spi-max-frequency = <10000000>; + + reset-gpios = <&gpe 4 GPIO_ACTIVE_LOW>; + power-supply = <&lcd_power>; + + backlight = <&backlight>; + + port { + panel_input: endpoint { + remote-endpoint = <&panel_output>; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml index b3797ba2698b..3624363938dd 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml @@ -110,6 +110,9 @@ properties: # Emerging Display Technology Corp. 5.7" VGA TFT LCD panel - edt,et057090dhu - edt,et070080dh6 + # Emerging Display Technology Corp. 3.5" WVGA TFT LCD panel with + # capacitive multitouch + - edt,etm0350g0dh6 # Emerging Display Technology Corp. 480x272 TFT Display with capacitive touch - edt,etm043080dh6gp # Emerging Display Technology Corp. 480x272 TFT Display @@ -128,6 +131,9 @@ properties: # Emerging Display Technology Corp. WVGA TFT Display with capacitive touch - edt,etm0700g0dh6 - edt,etm0700g0edh6 + # Emerging Display Technology Corp. 5.7" VGA TFT LCD panel with + # capacitive touch + - edt,etmv570g2dhu # Evervision Electronics Co. Ltd. VGG804821 5.0" WVGA TFT LCD Panel - evervision,vgg804821 # Foxlink Group 5" WVGA TFT LCD panel @@ -242,6 +248,8 @@ properties: - rocktech,rk101ii01d-ct # Rocktech Display Ltd. RK070ER9427 800(RGB)x480 TFT LCD panel - rocktech,rk070er9427 + # Samsung 13.3" FHD (1920x1080 pixels) eDP AMOLED panel + - samsung,atna33xc20 # Samsung 12.2" (2560x1600 pixels) TFT LCD panel - samsung,lsn122dl01-c01 # Samsung Electronics 10.1" WSVGA TFT LCD panel @@ -298,6 +306,8 @@ properties: enable-gpios: true port: true power-supply: true + no-hpd: true + hpd-gpios: true additionalProperties: false diff --git a/Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml b/Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml index 4cb75a5f2e3a..cd62968426fb 100644 --- a/Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml +++ b/Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml @@ -33,8 +33,11 @@ properties: backlight: true + spi-cpha: true + + spi-cpol: true + spi-max-frequency: - $ref: /schemas/types.yaml#/definitions/uint32 description: inherited as a SPI client node, the datasheet specifies maximum 300 ns minimum cycle which gives around 3 MHz max frequency maximum: 3000000 @@ -44,6 +47,9 @@ properties: required: - compatible - reg + - spi-cpha + - spi-cpol + - port additionalProperties: false @@ -52,15 +58,23 @@ examples: #include spi { + compatible = "spi-gpio"; + sck-gpios = <&gpio 0 GPIO_ACTIVE_HIGH>; + miso-gpios = <&gpio 1 GPIO_ACTIVE_HIGH>; + mosi-gpios = <&gpio 2 GPIO_ACTIVE_HIGH>; + cs-gpios = <&gpio 3 GPIO_ACTIVE_HIGH>; + num-chipselects = <1>; #address-cells = <1>; #size-cells = <0>; panel@0 { compatible = "samsung,lms397kf04"; spi-max-frequency = <3000000>; + spi-cpha; + spi-cpol; reg = <0>; vci-supply = <&lcd_3v0_reg>; vccio-supply = <&lcd_1v8_reg>; - reset-gpios = <&gpio 1 GPIO_ACTIVE_LOW>; + reset-gpios = <&gpio 4 GPIO_ACTIVE_LOW>; backlight = <&ktd259>; port { diff --git a/Documentation/driver-api/dma-buf.rst b/Documentation/driver-api/dma-buf.rst index 7f21425d9435..f5ac4c90b237 100644 --- a/Documentation/driver-api/dma-buf.rst +++ b/Documentation/driver-api/dma-buf.rst @@ -88,6 +88,9 @@ consider though: - The DMA buffer FD is also pollable, see `Implicit Fence Poll Support`_ below for details. +- The DMA buffer FD also supports a few dma-buf-specific ioctls, see + `DMA Buffer ioctls`_ below for details. + Basic Operation and Device DMA Access ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -106,6 +109,16 @@ Implicit Fence Poll Support .. kernel-doc:: drivers/dma-buf/dma-buf.c :doc: implicit fence polling +DMA-BUF statistics +~~~~~~~~~~~~~~~~~~ +.. kernel-doc:: drivers/dma-buf/dma-buf-sysfs-stats.c + :doc: overview + +DMA Buffer ioctls +~~~~~~~~~~~~~~~~~ + +.. kernel-doc:: include/uapi/linux/dma-buf.h + Kernel Functions and Structures Reference ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/Documentation/gpu/drm-uapi.rst b/Documentation/gpu/drm-uapi.rst index 04bdc7a91d53..7e51dd40bf6e 100644 --- a/Documentation/gpu/drm-uapi.rst +++ b/Documentation/gpu/drm-uapi.rst @@ -457,6 +457,19 @@ Userspace API Structures .. kernel-doc:: include/uapi/drm/drm_mode.h :doc: overview +.. _crtc_index: + +CRTC index +---------- + +CRTC's have both an object ID and an index, and they are not the same thing. +The index is used in cases where a densely packed identifier for a CRTC is +needed, for instance a bitmask of CRTC's. The member possible_crtcs of struct +drm_mode_get_plane is an example. + +DRM_IOCTL_MODE_GETRESOURCES populates a structure with an array of CRTC ID's, +and the CRTC index is its position in this array. + .. kernel-doc:: include/uapi/drm/drm.h :internal: diff --git a/Documentation/gpu/vkms.rst b/Documentation/gpu/vkms.rst index 2c9b376da5ca..941f0e7e5eef 100644 --- a/Documentation/gpu/vkms.rst +++ b/Documentation/gpu/vkms.rst @@ -98,9 +98,17 @@ with VKMS maintainers. IGT better support ------------------ -- Investigate: (1) test cases on kms_plane that are failing due to timeout on - capturing CRC; (2) when running kms_flip test cases in sequence, some - successful individual test cases are failing randomly. +Debugging: + +- kms_plane: some test cases are failing due to timeout on capturing CRC; + +- kms_flip: when running test cases in sequence, some successful individual + test cases are failing randomly; when individually, some successful test + cases display in the log the following error:: + + [drm:vkms_prepare_fb [vkms]] ERROR vmap failed: -4 + +Virtual hardware (vblank-less) mode: - VKMS already has support for vblanks simulated via hrtimers, which can be tested with kms_flip test; in some way, we can say that VKMS already mimics @@ -116,7 +124,17 @@ Add Plane Features There's lots of plane features we could add support for: -- Real overlay planes, not just cursor. +- Multiple overlay planes. [Good to get started] + +- Clearing primary plane: clear primary plane before plane composition (at the + start) for correctness of pixel blend ops. It also guarantees alpha channel + is cleared in the target buffer for stable crc. [Good to get started] + +- ARGB format on primary plane: blend the primary plane into background with + translucent alpha. + +- Support when the primary plane isn't exactly matching the output size: blend + the primary plane into the black background. - Full alpha blending on all planes. @@ -129,13 +147,8 @@ There's lots of plane features we could add support for: cursor api). For all of these, we also want to review the igt test coverage and make sure -all relevant igt testcases work on vkms. - -Prime Buffer Sharing --------------------- - -- Syzbot report - WARNING in vkms_gem_free_object: - https://syzkaller.appspot.com/bug?extid=e7ad70d406e74d8fc9d0 +all relevant igt testcases work on vkms. They are good options for internship +project. Runtime Configuration --------------------- @@ -153,7 +166,7 @@ module. Use/Test-cases: the refresh rate. The currently proposed solution is to expose vkms configuration through -configfs. All existing module options should be supported through configfs +configfs. All existing module options should be supported through configfs too. Writeback support @@ -162,6 +175,7 @@ Writeback support - The writeback and CRC capture operations share the use of composer_enabled boolean to ensure vblanks. Probably, when these operations work together, composer_enabled needs to refcounting the composer state to proper work. + [Good to get started] - Add support for cloned writeback outputs and related test cases using a cloned output in the IGT kms_writeback. diff --git a/MAINTAINERS b/MAINTAINERS index 6c8be735cc91..830816c5fea5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5770,7 +5770,7 @@ M: Gerd Hoffmann L: virtualization@lists.linux-foundation.org S: Maintained T: git git://anongit.freedesktop.org/drm/drm-misc -F: drivers/gpu/drm/bochs/ +F: drivers/gpu/drm/tiny/bochs.c DRM DRIVER FOR BOE HIMAX8279D PANELS M: Jerry Han @@ -5955,6 +5955,13 @@ S: Maintained F: Documentation/devicetree/bindings/display/panel/raydium,rm67191.yaml F: drivers/gpu/drm/panel/panel-raydium-rm67191.c +DRM DRIVER FOR SAMSUNG DB7430 PANELS +M: Linus Walleij +S: Maintained +T: git git://anongit.freedesktop.org/drm/drm-misc +F: Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml +F: drivers/gpu/drm/panel/panel-samsung-db7430.c + DRM DRIVER FOR SITRONIX ST7703 PANELS M: Guido Günther R: Purism Kernel Team @@ -6053,11 +6060,10 @@ F: drivers/gpu/drm/vboxvideo/ DRM DRIVER FOR VMWARE VIRTUAL GPU M: "VMware Graphics" -M: Roland Scheidegger M: Zack Rusin L: dri-devel@lists.freedesktop.org S: Supported -T: git git://people.freedesktop.org/~sroland/linux +T: git git://anongit.freedesktop.org/drm/drm-misc F: drivers/gpu/drm/vmwgfx/ F: include/uapi/drm/vmwgfx_drm.h diff --git a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi index 77ae7561d436..781625d9344c 100644 --- a/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi +++ b/arch/arm64/boot/dts/qcom/sc7180-trogdor.dtsi @@ -255,21 +255,6 @@ #sound-dai-cells = <0>; }; - panel: panel { - /* Compatible will be filled in per-board */ - power-supply = <&pp3300_dx_edp>; - backlight = <&backlight>; - hpd-gpios = <&sn65dsi86_bridge 2 GPIO_ACTIVE_HIGH>; - - ports { - port { - panel_in_edp: endpoint { - remote-endpoint = <&sn65dsi86_out>; - }; - }; - }; - }; - pwmleds { compatible = "pwm-leds"; keyboard_backlight: keyboard-backlight { @@ -666,6 +651,21 @@ edp_brij_i2c: &i2c2 { }; }; }; + + aux-bus { + panel: panel { + /* Compatible will be filled in per-board */ + power-supply = <&pp3300_dx_edp>; + backlight = <&backlight>; + hpd-gpios = <&sn65dsi86_bridge 2 GPIO_ACTIVE_HIGH>; + + port { + panel_in_edp: endpoint { + remote-endpoint = <&sn65dsi86_out>; + }; + }; + }; + }; }; }; diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index 4e16c71c24b7..9561e3d2d428 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -72,6 +72,17 @@ menuconfig DMABUF_HEAPS allows userspace to allocate dma-bufs that can be shared between drivers. +menuconfig DMABUF_SYSFS_STATS + bool "DMA-BUF sysfs statistics" + select DMA_SHARED_BUFFER + help + Choose this option to enable DMA-BUF sysfs statistics + in location /sys/kernel/dmabuf/buffers. + + /sys/kernel/dmabuf/buffers/ will contain + statistics for the DMA-BUF with the unique inode number + . + source "drivers/dma-buf/heaps/Kconfig" endmenu diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile index 995e05f609ff..40d81f23cacf 100644 --- a/drivers/dma-buf/Makefile +++ b/drivers/dma-buf/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_DMABUF_HEAPS) += heaps/ obj-$(CONFIG_SYNC_FILE) += sync_file.o obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o obj-$(CONFIG_UDMABUF) += udmabuf.o +obj-$(CONFIG_DMABUF_SYSFS_STATS) += dma-buf-sysfs-stats.o dmabuf_selftests-y := \ selftest.o \ diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.c b/drivers/dma-buf/dma-buf-sysfs-stats.c new file mode 100644 index 000000000000..a2638e84199c --- /dev/null +++ b/drivers/dma-buf/dma-buf-sysfs-stats.c @@ -0,0 +1,337 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * DMA-BUF sysfs statistics. + * + * Copyright (C) 2021 Google LLC. + */ + +#include +#include +#include +#include +#include +#include + +#include "dma-buf-sysfs-stats.h" + +#define to_dma_buf_entry_from_kobj(x) container_of(x, struct dma_buf_sysfs_entry, kobj) + +/** + * DOC: overview + * + * ``/sys/kernel/debug/dma_buf/bufinfo`` provides an overview of every DMA-BUF + * in the system. However, since debugfs is not safe to be mounted in + * production, procfs and sysfs can be used to gather DMA-BUF statistics on + * production systems. + * + * The ``/proc//fdinfo/`` files in procfs can be used to gather + * information about DMA-BUF fds. Detailed documentation about the interface + * is present in Documentation/filesystems/proc.rst. + * + * Unfortunately, the existing procfs interfaces can only provide information + * about the DMA-BUFs for which processes hold fds or have the buffers mmapped + * into their address space. This necessitated the creation of the DMA-BUF sysfs + * statistics interface to provide per-buffer information on production systems. + * + * The interface at ``/sys/kernel/dma-buf/buffers`` exposes information about + * every DMA-BUF when ``CONFIG_DMABUF_SYSFS_STATS`` is enabled. + * + * The following stats are exposed by the interface: + * + * * ``/sys/kernel/dmabuf/buffers//exporter_name`` + * * ``/sys/kernel/dmabuf/buffers//size`` + * * ``/sys/kernel/dmabuf/buffers//attachments//device`` + * * ``/sys/kernel/dmabuf/buffers//attachments//map_counter`` + * + * The information in the interface can also be used to derive per-exporter and + * per-device usage statistics. The data from the interface can be gathered + * on error conditions or other important events to provide a snapshot of + * DMA-BUF usage. It can also be collected periodically by telemetry to monitor + * various metrics. + * + * Detailed documentation about the interface is present in + * Documentation/ABI/testing/sysfs-kernel-dmabuf-buffers. + */ + +struct dma_buf_stats_attribute { + struct attribute attr; + ssize_t (*show)(struct dma_buf *dmabuf, + struct dma_buf_stats_attribute *attr, char *buf); +}; +#define to_dma_buf_stats_attr(x) container_of(x, struct dma_buf_stats_attribute, attr) + +static ssize_t dma_buf_stats_attribute_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct dma_buf_stats_attribute *attribute; + struct dma_buf_sysfs_entry *sysfs_entry; + struct dma_buf *dmabuf; + + attribute = to_dma_buf_stats_attr(attr); + sysfs_entry = to_dma_buf_entry_from_kobj(kobj); + dmabuf = sysfs_entry->dmabuf; + + if (!dmabuf || !attribute->show) + return -EIO; + + return attribute->show(dmabuf, attribute, buf); +} + +static const struct sysfs_ops dma_buf_stats_sysfs_ops = { + .show = dma_buf_stats_attribute_show, +}; + +static ssize_t exporter_name_show(struct dma_buf *dmabuf, + struct dma_buf_stats_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%s\n", dmabuf->exp_name); +} + +static ssize_t size_show(struct dma_buf *dmabuf, + struct dma_buf_stats_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%zu\n", dmabuf->size); +} + +static struct dma_buf_stats_attribute exporter_name_attribute = + __ATTR_RO(exporter_name); +static struct dma_buf_stats_attribute size_attribute = __ATTR_RO(size); + +static struct attribute *dma_buf_stats_default_attrs[] = { + &exporter_name_attribute.attr, + &size_attribute.attr, + NULL, +}; +ATTRIBUTE_GROUPS(dma_buf_stats_default); + +static void dma_buf_sysfs_release(struct kobject *kobj) +{ + struct dma_buf_sysfs_entry *sysfs_entry; + + sysfs_entry = to_dma_buf_entry_from_kobj(kobj); + kfree(sysfs_entry); +} + +static struct kobj_type dma_buf_ktype = { + .sysfs_ops = &dma_buf_stats_sysfs_ops, + .release = dma_buf_sysfs_release, + .default_groups = dma_buf_stats_default_groups, +}; + +#define to_dma_buf_attach_entry_from_kobj(x) container_of(x, struct dma_buf_attach_sysfs_entry, kobj) + +struct dma_buf_attach_stats_attribute { + struct attribute attr; + ssize_t (*show)(struct dma_buf_attach_sysfs_entry *sysfs_entry, + struct dma_buf_attach_stats_attribute *attr, char *buf); +}; +#define to_dma_buf_attach_stats_attr(x) container_of(x, struct dma_buf_attach_stats_attribute, attr) + +static ssize_t dma_buf_attach_stats_attribute_show(struct kobject *kobj, + struct attribute *attr, + char *buf) +{ + struct dma_buf_attach_stats_attribute *attribute; + struct dma_buf_attach_sysfs_entry *sysfs_entry; + + attribute = to_dma_buf_attach_stats_attr(attr); + sysfs_entry = to_dma_buf_attach_entry_from_kobj(kobj); + + if (!attribute->show) + return -EIO; + + return attribute->show(sysfs_entry, attribute, buf); +} + +static const struct sysfs_ops dma_buf_attach_stats_sysfs_ops = { + .show = dma_buf_attach_stats_attribute_show, +}; + +static ssize_t map_counter_show(struct dma_buf_attach_sysfs_entry *sysfs_entry, + struct dma_buf_attach_stats_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%u\n", sysfs_entry->map_counter); +} + +static struct dma_buf_attach_stats_attribute map_counter_attribute = + __ATTR_RO(map_counter); + +static struct attribute *dma_buf_attach_stats_default_attrs[] = { + &map_counter_attribute.attr, + NULL, +}; +ATTRIBUTE_GROUPS(dma_buf_attach_stats_default); + +static void dma_buf_attach_sysfs_release(struct kobject *kobj) +{ + struct dma_buf_attach_sysfs_entry *sysfs_entry; + + sysfs_entry = to_dma_buf_attach_entry_from_kobj(kobj); + kfree(sysfs_entry); +} + +static struct kobj_type dma_buf_attach_ktype = { + .sysfs_ops = &dma_buf_attach_stats_sysfs_ops, + .release = dma_buf_attach_sysfs_release, + .default_groups = dma_buf_attach_stats_default_groups, +}; + +void dma_buf_attach_stats_teardown(struct dma_buf_attachment *attach) +{ + struct dma_buf_attach_sysfs_entry *sysfs_entry; + + sysfs_entry = attach->sysfs_entry; + if (!sysfs_entry) + return; + + sysfs_delete_link(&sysfs_entry->kobj, &attach->dev->kobj, "device"); + + kobject_del(&sysfs_entry->kobj); + kobject_put(&sysfs_entry->kobj); +} + +int dma_buf_attach_stats_setup(struct dma_buf_attachment *attach, + unsigned int uid) +{ + struct dma_buf_attach_sysfs_entry *sysfs_entry; + int ret; + struct dma_buf *dmabuf; + + if (!attach) + return -EINVAL; + + dmabuf = attach->dmabuf; + + sysfs_entry = kzalloc(sizeof(struct dma_buf_attach_sysfs_entry), + GFP_KERNEL); + if (!sysfs_entry) + return -ENOMEM; + + sysfs_entry->kobj.kset = dmabuf->sysfs_entry->attach_stats_kset; + + attach->sysfs_entry = sysfs_entry; + + ret = kobject_init_and_add(&sysfs_entry->kobj, &dma_buf_attach_ktype, + NULL, "%u", uid); + if (ret) + goto kobj_err; + + ret = sysfs_create_link(&sysfs_entry->kobj, &attach->dev->kobj, + "device"); + if (ret) + goto link_err; + + return 0; + +link_err: + kobject_del(&sysfs_entry->kobj); +kobj_err: + kobject_put(&sysfs_entry->kobj); + attach->sysfs_entry = NULL; + + return ret; +} +void dma_buf_stats_teardown(struct dma_buf *dmabuf) +{ + struct dma_buf_sysfs_entry *sysfs_entry; + + sysfs_entry = dmabuf->sysfs_entry; + if (!sysfs_entry) + return; + + kset_unregister(sysfs_entry->attach_stats_kset); + kobject_del(&sysfs_entry->kobj); + kobject_put(&sysfs_entry->kobj); +} + + +/* Statistics files do not need to send uevents. */ +static int dmabuf_sysfs_uevent_filter(struct kset *kset, struct kobject *kobj) +{ + return 0; +} + +static const struct kset_uevent_ops dmabuf_sysfs_no_uevent_ops = { + .filter = dmabuf_sysfs_uevent_filter, +}; + +static struct kset *dma_buf_stats_kset; +static struct kset *dma_buf_per_buffer_stats_kset; +int dma_buf_init_sysfs_statistics(void) +{ + dma_buf_stats_kset = kset_create_and_add("dmabuf", + &dmabuf_sysfs_no_uevent_ops, + kernel_kobj); + if (!dma_buf_stats_kset) + return -ENOMEM; + + dma_buf_per_buffer_stats_kset = kset_create_and_add("buffers", + &dmabuf_sysfs_no_uevent_ops, + &dma_buf_stats_kset->kobj); + if (!dma_buf_per_buffer_stats_kset) { + kset_unregister(dma_buf_stats_kset); + return -ENOMEM; + } + + return 0; +} + +void dma_buf_uninit_sysfs_statistics(void) +{ + kset_unregister(dma_buf_per_buffer_stats_kset); + kset_unregister(dma_buf_stats_kset); +} + +int dma_buf_stats_setup(struct dma_buf *dmabuf) +{ + struct dma_buf_sysfs_entry *sysfs_entry; + int ret; + struct kset *attach_stats_kset; + + if (!dmabuf || !dmabuf->file) + return -EINVAL; + + if (!dmabuf->exp_name) { + pr_err("exporter name must not be empty if stats needed\n"); + return -EINVAL; + } + + sysfs_entry = kzalloc(sizeof(struct dma_buf_sysfs_entry), GFP_KERNEL); + if (!sysfs_entry) + return -ENOMEM; + + sysfs_entry->kobj.kset = dma_buf_per_buffer_stats_kset; + sysfs_entry->dmabuf = dmabuf; + + dmabuf->sysfs_entry = sysfs_entry; + + /* create the directory for buffer stats */ + ret = kobject_init_and_add(&sysfs_entry->kobj, &dma_buf_ktype, NULL, + "%lu", file_inode(dmabuf->file)->i_ino); + if (ret) + goto err_sysfs_dmabuf; + + /* create the directory for attachment stats */ + attach_stats_kset = kset_create_and_add("attachments", + &dmabuf_sysfs_no_uevent_ops, + &sysfs_entry->kobj); + if (!attach_stats_kset) { + ret = -ENOMEM; + goto err_sysfs_attach; + } + + sysfs_entry->attach_stats_kset = attach_stats_kset; + + return 0; + +err_sysfs_attach: + kobject_del(&sysfs_entry->kobj); +err_sysfs_dmabuf: + kobject_put(&sysfs_entry->kobj); + dmabuf->sysfs_entry = NULL; + return ret; +} diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.h b/drivers/dma-buf/dma-buf-sysfs-stats.h new file mode 100644 index 000000000000..5f4703249117 --- /dev/null +++ b/drivers/dma-buf/dma-buf-sysfs-stats.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * DMA-BUF sysfs statistics. + * + * Copyright (C) 2021 Google LLC. + */ + +#ifndef _DMA_BUF_SYSFS_STATS_H +#define _DMA_BUF_SYSFS_STATS_H + +#ifdef CONFIG_DMABUF_SYSFS_STATS + +int dma_buf_init_sysfs_statistics(void); +void dma_buf_uninit_sysfs_statistics(void); + +int dma_buf_stats_setup(struct dma_buf *dmabuf); +int dma_buf_attach_stats_setup(struct dma_buf_attachment *attach, + unsigned int uid); +static inline void dma_buf_update_attachment_map_count(struct dma_buf_attachment *attach, + int delta) +{ + struct dma_buf_attach_sysfs_entry *entry = attach->sysfs_entry; + + entry->map_counter += delta; +} +void dma_buf_stats_teardown(struct dma_buf *dmabuf); +void dma_buf_attach_stats_teardown(struct dma_buf_attachment *attach); +static inline unsigned int dma_buf_update_attach_uid(struct dma_buf *dmabuf) +{ + struct dma_buf_sysfs_entry *entry = dmabuf->sysfs_entry; + + return entry->attachment_uid++; +} +#else + +static inline int dma_buf_init_sysfs_statistics(void) +{ + return 0; +} + +static inline void dma_buf_uninit_sysfs_statistics(void) {} + +static inline int dma_buf_stats_setup(struct dma_buf *dmabuf) +{ + return 0; +} +static inline int dma_buf_attach_stats_setup(struct dma_buf_attachment *attach, + unsigned int uid) +{ + return 0; +} + +static inline void dma_buf_stats_teardown(struct dma_buf *dmabuf) {} +static inline void dma_buf_attach_stats_teardown(struct dma_buf_attachment *attach) {} +static inline void dma_buf_update_attachment_map_count(struct dma_buf_attachment *attach, + int delta) {} +static inline unsigned int dma_buf_update_attach_uid(struct dma_buf *dmabuf) +{ + return 0; +} +#endif +#endif // _DMA_BUF_SYSFS_STATS_H diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 511fe0d217a0..510b42771974 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -29,6 +29,8 @@ #include #include +#include "dma-buf-sysfs-stats.h" + static inline int is_dma_buf_file(struct file *); struct dma_buf_list { @@ -79,6 +81,7 @@ static void dma_buf_release(struct dentry *dentry) if (dmabuf->resv == (struct dma_resv *)&dmabuf[1]) dma_resv_fini(dmabuf->resv); + dma_buf_stats_teardown(dmabuf); module_put(dmabuf->owner); kfree(dmabuf->name); kfree(dmabuf); @@ -580,6 +583,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) file->f_mode |= FMODE_LSEEK; dmabuf->file = file; + ret = dma_buf_stats_setup(dmabuf); + if (ret) + goto err_sysfs; + mutex_init(&dmabuf->lock); INIT_LIST_HEAD(&dmabuf->attachments); @@ -589,6 +596,14 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info) return dmabuf; +err_sysfs: + /* + * Set file->f_path.dentry->d_fsdata to NULL so that when + * dma_buf_release() gets invoked by dentry_ops, it exits + * early before calling the release() dma_buf op. + */ + file->f_path.dentry->d_fsdata = NULL; + fput(file); err_dmabuf: kfree(dmabuf); err_module: @@ -723,6 +738,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, { struct dma_buf_attachment *attach; int ret; + unsigned int attach_uid; if (WARN_ON(!dmabuf || !dev)) return ERR_PTR(-EINVAL); @@ -748,8 +764,13 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, } dma_resv_lock(dmabuf->resv, NULL); list_add(&attach->node, &dmabuf->attachments); + attach_uid = dma_buf_update_attach_uid(dmabuf); dma_resv_unlock(dmabuf->resv); + ret = dma_buf_attach_stats_setup(attach, attach_uid); + if (ret) + goto err_sysfs; + /* When either the importer or the exporter can't handle dynamic * mappings we cache the mapping here to avoid issues with the * reservation object lock. @@ -776,6 +797,7 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev, dma_resv_unlock(attach->dmabuf->resv); attach->sgt = sgt; attach->dir = DMA_BIDIRECTIONAL; + dma_buf_update_attachment_map_count(attach, 1 /* delta */); } return attach; @@ -792,6 +814,7 @@ err_unlock: if (dma_buf_is_dynamic(attach->dmabuf)) dma_resv_unlock(attach->dmabuf->resv); +err_sysfs: dma_buf_detach(dmabuf, attach); return ERR_PTR(ret); } @@ -841,6 +864,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) dma_resv_lock(attach->dmabuf->resv, NULL); __unmap_dma_buf(attach, attach->sgt, attach->dir); + dma_buf_update_attachment_map_count(attach, -1 /* delta */); if (dma_buf_is_dynamic(attach->dmabuf)) { dmabuf->ops->unpin(attach); @@ -854,6 +878,7 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach) if (dmabuf->ops->detach) dmabuf->ops->detach(dmabuf, attach); + dma_buf_attach_stats_teardown(attach); kfree(attach); } EXPORT_SYMBOL_GPL(dma_buf_detach); @@ -926,6 +951,9 @@ EXPORT_SYMBOL_GPL(dma_buf_unpin); * the underlying backing storage is pinned for as long as a mapping exists, * therefore users/importers should not hold onto a mapping for undue amounts of * time. + * + * Important: Dynamic importers must wait for the exclusive fence of the struct + * dma_resv attached to the DMA-BUF first. */ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, enum dma_data_direction direction) @@ -993,6 +1021,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach, } #endif /* CONFIG_DMA_API_DEBUG */ + if (!IS_ERR(sg_table)) + dma_buf_update_attachment_map_count(attach, 1 /* delta */); + return sg_table; } EXPORT_SYMBOL_GPL(dma_buf_map_attachment); @@ -1030,6 +1061,8 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach, if (dma_buf_is_dynamic(attach->dmabuf) && !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) dma_buf_unpin(attach); + + dma_buf_update_attachment_map_count(attach, -1 /* delta */); } EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment); @@ -1469,6 +1502,12 @@ static inline void dma_buf_uninit_debugfs(void) static int __init dma_buf_init(void) { + int ret; + + ret = dma_buf_init_sysfs_statistics(); + if (ret) + return ret; + dma_buf_mnt = kern_mount(&dma_buf_fs_type); if (IS_ERR(dma_buf_mnt)) return PTR_ERR(dma_buf_mnt); @@ -1484,5 +1523,6 @@ static void __exit dma_buf_deinit(void) { dma_buf_uninit_debugfs(); kern_unmount(dma_buf_mnt); + dma_buf_uninit_sysfs_statistics(); } __exitcall(dma_buf_deinit); diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c index 7d129e68ac70..1b4cb3e5cec9 100644 --- a/drivers/dma-buf/dma-fence-chain.c +++ b/drivers/dma-buf/dma-fence-chain.c @@ -137,6 +137,7 @@ static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb) struct dma_fence_chain *chain; chain = container_of(cb, typeof(*chain), cb); + init_irq_work(&chain->work, dma_fence_chain_irq_work); irq_work_queue(&chain->work); dma_fence_put(f); } @@ -239,7 +240,6 @@ void dma_fence_chain_init(struct dma_fence_chain *chain, rcu_assign_pointer(chain->prev, prev); chain->fence = fence; chain->prev_seqno = 0; - init_irq_work(&chain->work, dma_fence_chain_irq_work); /* Try to reuse the context of the previous chain node. */ if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) { diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index f26c71747d43..e744fd87c63c 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -615,25 +615,21 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence) */ bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all) { - unsigned int seq, shared_count; + struct dma_fence *fence; + unsigned int seq; int ret; rcu_read_lock(); retry: ret = true; - shared_count = 0; seq = read_seqcount_begin(&obj->seq); if (test_all) { struct dma_resv_list *fobj = dma_resv_shared_list(obj); - unsigned int i; - - if (fobj) - shared_count = fobj->shared_count; + unsigned int i, shared_count; + shared_count = fobj ? fobj->shared_count : 0; for (i = 0; i < shared_count; ++i) { - struct dma_fence *fence; - fence = rcu_dereference(fobj->shared[i]); ret = dma_resv_test_signaled_single(fence); if (ret < 0) @@ -641,23 +637,18 @@ retry: else if (!ret) break; } + } - if (read_seqcount_retry(&obj->seq, seq)) + fence = dma_resv_excl_fence(obj); + if (ret && fence) { + ret = dma_resv_test_signaled_single(fence); + if (ret < 0) goto retry; + } - if (!shared_count) { - struct dma_fence *fence_excl = dma_resv_excl_fence(obj); - - if (fence_excl) { - ret = dma_resv_test_signaled_single(fence_excl); - if (ret < 0) - goto retry; - - if (read_seqcount_retry(&obj->seq, seq)) - goto retry; - } - } + if (read_seqcount_retry(&obj->seq, seq)) + goto retry; rcu_read_unlock(); return ret; diff --git a/drivers/dma-buf/st-dma-fence-chain.c b/drivers/dma-buf/st-dma-fence-chain.c index 9525f7f56119..8ce1ea59d31b 100644 --- a/drivers/dma-buf/st-dma-fence-chain.c +++ b/drivers/dma-buf/st-dma-fence-chain.c @@ -58,28 +58,20 @@ static struct dma_fence *mock_fence(void) return &f->base; } -static inline struct mock_chain { - struct dma_fence_chain base; -} *to_mock_chain(struct dma_fence *f) { - return container_of(f, struct mock_chain, base.base); -} - static struct dma_fence *mock_chain(struct dma_fence *prev, struct dma_fence *fence, u64 seqno) { - struct mock_chain *f; + struct dma_fence_chain *f; - f = kmalloc(sizeof(*f), GFP_KERNEL); + f = dma_fence_chain_alloc(); if (!f) return NULL; - dma_fence_chain_init(&f->base, - dma_fence_get(prev), - dma_fence_get(fence), + dma_fence_chain_init(f, dma_fence_get(prev), dma_fence_get(fence), seqno); - return &f->base.base; + return &f->base; } static int sanitycheck(void *arg) diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index db732f71e59a..8df761a10251 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -11,9 +11,15 @@ #include #include #include +#include -static const u32 list_limit = 1024; /* udmabuf_create_list->count limit */ -static const size_t size_limit_mb = 64; /* total dmabuf size, in megabytes */ +static int list_limit = 1024; +module_param(list_limit, int, 0644); +MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024."); + +static int size_limit_mb = 64; +module_param(size_limit_mb, int, 0644); +MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64."); struct udmabuf { pgoff_t pagecount; @@ -160,10 +166,13 @@ static long udmabuf_create(struct miscdevice *device, { DEFINE_DMA_BUF_EXPORT_INFO(exp_info); struct file *memfd = NULL; + struct address_space *mapping = NULL; struct udmabuf *ubuf; struct dma_buf *buf; pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit; - struct page *page; + struct page *page, *hpage = NULL; + pgoff_t subpgoff, maxsubpgs; + struct hstate *hpstate; int seals, ret = -EINVAL; u32 i, flags; @@ -194,7 +203,8 @@ static long udmabuf_create(struct miscdevice *device, memfd = fget(list[i].memfd); if (!memfd) goto err; - if (!shmem_mapping(file_inode(memfd)->i_mapping)) + mapping = file_inode(memfd)->i_mapping; + if (!shmem_mapping(mapping) && !is_file_hugepages(memfd)) goto err; seals = memfd_fcntl(memfd, F_GET_SEALS, 0); if (seals == -EINVAL) @@ -205,17 +215,48 @@ static long udmabuf_create(struct miscdevice *device, goto err; pgoff = list[i].offset >> PAGE_SHIFT; pgcnt = list[i].size >> PAGE_SHIFT; + if (is_file_hugepages(memfd)) { + hpstate = hstate_file(memfd); + pgoff = list[i].offset >> huge_page_shift(hpstate); + subpgoff = (list[i].offset & + ~huge_page_mask(hpstate)) >> PAGE_SHIFT; + maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT; + } for (pgidx = 0; pgidx < pgcnt; pgidx++) { - page = shmem_read_mapping_page( - file_inode(memfd)->i_mapping, pgoff + pgidx); - if (IS_ERR(page)) { - ret = PTR_ERR(page); - goto err; + if (is_file_hugepages(memfd)) { + if (!hpage) { + hpage = find_get_page_flags(mapping, pgoff, + FGP_ACCESSED); + if (IS_ERR(hpage)) { + ret = PTR_ERR(hpage); + goto err; + } + } + page = hpage + subpgoff; + get_page(page); + subpgoff++; + if (subpgoff == maxsubpgs) { + put_page(hpage); + hpage = NULL; + subpgoff = 0; + pgoff++; + } + } else { + page = shmem_read_mapping_page(mapping, + pgoff + pgidx); + if (IS_ERR(page)) { + ret = PTR_ERR(page); + goto err; + } } ubuf->pages[pgbuf++] = page; } fput(memfd); memfd = NULL; + if (hpage) { + put_page(hpage); + hpage = NULL; + } } exp_info.ops = &udmabuf_ops; diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 7ff89690a976..0d372354c2d0 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -35,6 +35,11 @@ config DRM_MIPI_DSI bool depends on DRM +config DRM_DP_AUX_BUS + tristate + depends on DRM + depends on OF + config DRM_DP_AUX_CHARDEV bool "DRM DP AUX Interface" depends on DRM @@ -317,8 +322,6 @@ source "drivers/gpu/drm/tilcdc/Kconfig" source "drivers/gpu/drm/qxl/Kconfig" -source "drivers/gpu/drm/bochs/Kconfig" - source "drivers/gpu/drm/virtio/Kconfig" source "drivers/gpu/drm/msm/Kconfig" diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index a118692a6df7..ad1112154898 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -33,6 +33,8 @@ drm-$(CONFIG_PCI) += drm_pci.o drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o +obj-$(CONFIG_DRM_DP_AUX_BUS) += drm_dp_aux_bus.o + drm_vram_helper-y := drm_gem_vram_helper.o obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o @@ -96,7 +98,6 @@ obj-y += omapdrm/ obj-$(CONFIG_DRM_SUN4I) += sun4i/ obj-y += tilcdc/ obj-$(CONFIG_DRM_QXL) += qxl/ -obj-$(CONFIG_DRM_BOCHS) += bochs/ obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio/ obj-$(CONFIG_DRM_MSM) += msm/ obj-$(CONFIG_DRM_TEGRA) += tegra/ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h index a130e766cbdb..c905a4cfc173 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h @@ -34,6 +34,7 @@ struct amdgpu_fpriv; struct amdgpu_bo_list_entry { struct ttm_validate_buffer tv; struct amdgpu_bo_va *bo_va; + struct dma_fence_chain *chain; uint32_t priority; struct page **user_pages; bool user_invalidated; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 30fa1f61e0e5..a152363b0254 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -572,6 +572,20 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, goto out; } + amdgpu_bo_list_for_each_entry(e, p->bo_list) { + struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); + + e->bo_va = amdgpu_vm_bo_find(vm, bo); + + if (bo->tbo.base.dma_buf && !amdgpu_bo_explicit_sync(bo)) { + e->chain = dma_fence_chain_alloc(); + if (!e->chain) { + r = -ENOMEM; + goto error_validate; + } + } + } + amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold, &p->bytes_moved_vis_threshold); p->bytes_moved = 0; @@ -599,15 +613,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, gws = p->bo_list->gws_obj; oa = p->bo_list->oa_obj; - amdgpu_bo_list_for_each_entry(e, p->bo_list) { - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); - - /* Make sure we use the exclusive slot for shared BOs */ - if (bo->prime_shared_count) - e->tv.num_shared = 0; - e->bo_va = amdgpu_vm_bo_find(vm, bo); - } - if (gds) { p->job->gds_base = amdgpu_bo_gpu_offset(gds) >> PAGE_SHIFT; p->job->gds_size = amdgpu_bo_size(gds) >> PAGE_SHIFT; @@ -629,8 +634,13 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, } error_validate: - if (r) + if (r) { + amdgpu_bo_list_for_each_entry(e, p->bo_list) { + dma_fence_chain_free(e->chain); + e->chain = NULL; + } ttm_eu_backoff_reservation(&p->ticket, &p->validated); + } out: return r; } @@ -670,9 +680,17 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, { unsigned i; - if (error && backoff) + if (error && backoff) { + struct amdgpu_bo_list_entry *e; + + amdgpu_bo_list_for_each_entry(e, parser->bo_list) { + dma_fence_chain_free(e->chain); + e->chain = NULL; + } + ttm_eu_backoff_reservation(&parser->ticket, &parser->validated); + } for (i = 0; i < parser->num_post_deps; i++) { drm_syncobj_put(parser->post_deps[i].syncobj); @@ -1109,7 +1127,7 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p dep->chain = NULL; if (syncobj_deps[i].point) { - dep->chain = kmalloc(sizeof(*dep->chain), GFP_KERNEL); + dep->chain = dma_fence_chain_alloc(); if (!dep->chain) return -ENOMEM; } @@ -1117,7 +1135,7 @@ static int amdgpu_cs_process_syncobj_timeline_out_dep(struct amdgpu_cs_parser *p dep->syncobj = drm_syncobj_find(p->filp, syncobj_deps[i].handle); if (!dep->syncobj) { - kfree(dep->chain); + dma_fence_chain_free(dep->chain); return -EINVAL; } dep->point = syncobj_deps[i].point; @@ -1245,6 +1263,28 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm); + amdgpu_bo_list_for_each_entry(e, p->bo_list) { + struct dma_resv *resv = e->tv.bo->base.resv; + struct dma_fence_chain *chain = e->chain; + + if (!chain) + continue; + + /* + * Work around dma_resv shortcommings by wrapping up the + * submission in a dma_fence_chain and add it as exclusive + * fence, but first add the submission as shared fence to make + * sure that shared fences never signal before the exclusive + * one. + */ + dma_fence_chain_init(chain, dma_resv_excl_fence(resv), + dma_fence_get(p->fence), 1); + + dma_resv_add_shared_fence(resv, p->fence); + rcu_assign_pointer(resv->fence_excl, &chain->base); + e->chain = NULL; + } + ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence); mutex_unlock(&p->adev->notifier_lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index a9475b207510..ae6ab93c868b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -42,48 +42,6 @@ #include #include -static int -__dma_resv_make_exclusive(struct dma_resv *obj) -{ - struct dma_fence **fences; - unsigned int count; - int r; - - if (!dma_resv_shared_list(obj)) /* no shared fences to convert */ - return 0; - - r = dma_resv_get_fences(obj, NULL, &count, &fences); - if (r) - return r; - - if (count == 0) { - /* Now that was unexpected. */ - } else if (count == 1) { - dma_resv_add_excl_fence(obj, fences[0]); - dma_fence_put(fences[0]); - kfree(fences); - } else { - struct dma_fence_array *array; - - array = dma_fence_array_create(count, fences, - dma_fence_context_alloc(1), 0, - false); - if (!array) - goto err_fences_put; - - dma_resv_add_excl_fence(obj, &array->base); - dma_fence_put(&array->base); - } - - return 0; - -err_fences_put: - while (count--) - dma_fence_put(fences[count]); - kfree(fences); - return -ENOMEM; -} - /** * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation * @@ -110,24 +68,6 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, if (r < 0) goto out; - r = amdgpu_bo_reserve(bo, false); - if (unlikely(r != 0)) - goto out; - - /* - * We only create shared fences for internal use, but importers - * of the dmabuf rely on exclusive fences for implicitly - * tracking write hazards. As any of the current fences may - * correspond to a write, we need to convert all existing - * fences on the reservation object into a single exclusive - * fence. - */ - r = __dma_resv_make_exclusive(bo->tbo.base.resv); - if (r) - goto out; - - bo->prime_shared_count++; - amdgpu_bo_unreserve(bo); return 0; out: @@ -150,9 +90,6 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf, struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count) - bo->prime_shared_count--; - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); } @@ -418,8 +355,6 @@ amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf) bo = gem_to_amdgpu_bo(gobj); bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; - if (dma_buf->ops != &amdgpu_dmabuf_ops) - bo->prime_shared_count = 1; dma_resv_unlock(resv); return gobj; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index abb928894eac..a959624d576b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1281,7 +1281,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, #endif /* Get rid of things like offb */ - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "amdgpudrmfb"); + ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &amdgpu_kms_driver); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 72d9b92b1754..d4547d195173 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -490,7 +490,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, r = drm_sched_init(&ring->sched, &amdgpu_sched_ops, num_hw_submission, amdgpu_job_hang_limit, - timeout, sched_score, ring->name); + timeout, NULL, sched_score, ring->name); if (r) { DRM_ERROR("Failed to create scheduler on ring %s.\n", ring->name); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index d0d9bc445d7b..a25160797b61 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -829,7 +829,8 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, break; } case AMDGPU_GEM_OP_SET_PLACEMENT: - if (robj->prime_shared_count && (args->value & AMDGPU_GEM_DOMAIN_VRAM)) { + if (robj->tbo.base.import_attach && + args->value & AMDGPU_GEM_DOMAIN_VRAM) { r = -EINVAL; amdgpu_bo_unreserve(robj); break; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index ec96e0b26b11..543000304a1c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -132,14 +132,11 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, struct amdgpu_gtt_node *node; int r; - spin_lock(&mgr->lock); - if (tbo->resource && tbo->resource->mem_type != TTM_PL_TT && - atomic64_read(&mgr->available) < num_pages) { - spin_unlock(&mgr->lock); + if (!(place->flags & TTM_PL_FLAG_TEMPORARY) && + atomic64_add_return(num_pages, &mgr->used) > man->size) { + atomic64_sub(num_pages, &mgr->used); return -ENOSPC; } - atomic64_sub(num_pages, &mgr->available); - spin_unlock(&mgr->lock); node = kzalloc(struct_size(node, base.mm_nodes, 1), GFP_KERNEL); if (!node) { @@ -175,7 +172,8 @@ err_free: kfree(node); err_out: - atomic64_add(num_pages, &mgr->available); + if (!(place->flags & TTM_PL_FLAG_TEMPORARY)) + atomic64_sub(num_pages, &mgr->used); return r; } @@ -198,7 +196,9 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, if (drm_mm_node_allocated(&node->base.mm_nodes[0])) drm_mm_remove_node(&node->base.mm_nodes[0]); spin_unlock(&mgr->lock); - atomic64_add(res->num_pages, &mgr->available); + + if (!(res->placement & TTM_PL_FLAG_TEMPORARY)) + atomic64_sub(res->num_pages, &mgr->used); kfree(node); } @@ -213,9 +213,8 @@ static void amdgpu_gtt_mgr_del(struct ttm_resource_manager *man, uint64_t amdgpu_gtt_mgr_usage(struct ttm_resource_manager *man) { struct amdgpu_gtt_mgr *mgr = to_gtt_mgr(man); - s64 result = man->size - atomic64_read(&mgr->available); - return (result > 0 ? result : 0) * PAGE_SIZE; + return atomic64_read(&mgr->used) * PAGE_SIZE; } /** @@ -265,9 +264,8 @@ static void amdgpu_gtt_mgr_debug(struct ttm_resource_manager *man, drm_mm_print(&mgr->mm, printer); spin_unlock(&mgr->lock); - drm_printf(printer, "man size:%llu pages, gtt available:%lld pages, usage:%lluMB\n", - man->size, (u64)atomic64_read(&mgr->available), - amdgpu_gtt_mgr_usage(man) >> 20); + drm_printf(printer, "man size:%llu pages, gtt used:%llu pages\n", + man->size, atomic64_read(&mgr->used)); } static const struct ttm_resource_manager_func amdgpu_gtt_mgr_func = { @@ -299,7 +297,7 @@ int amdgpu_gtt_mgr_init(struct amdgpu_device *adev, uint64_t gtt_size) size = (adev->gmc.gart_size >> PAGE_SHIFT) - start; drm_mm_init(&mgr->mm, start, size); spin_lock_init(&mgr->lock); - atomic64_set(&mgr->available, gtt_size >> PAGE_SHIFT); + atomic64_set(&mgr->used, 0); ttm_set_driver_manager(&adev->mman.bdev, TTM_PL_TT, &mgr->manager); ttm_resource_manager_set_used(man, true); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 83af307e97cd..0d01cfaca77e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -617,7 +617,7 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type) { - if (!adev_to_drm(adev)->irq_enabled) + if (!adev->irq.installed) return -ENOENT; if (type >= src->num_types) @@ -647,7 +647,7 @@ int amdgpu_irq_get(struct amdgpu_device *adev, struct amdgpu_irq_src *src, int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type) { - if (!adev_to_drm(adev)->irq_enabled) + if (!adev->irq.installed) return -ENOENT; if (type >= src->num_types) @@ -678,7 +678,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type) { - if (!adev_to_drm(adev)->irq_enabled) + if (!adev->irq.installed) return false; if (type >= src->num_types) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 795fa7445abe..261283f5b424 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -196,7 +196,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) c++; } - BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS); + BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS); placement->num_placement = c; placement->placement = places; @@ -913,7 +913,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, return -EINVAL; /* A shared bo cannot be migrated to VRAM */ - if (bo->prime_shared_count || bo->tbo.base.import_attach) { + if (bo->tbo.base.import_attach) { if (domain & AMDGPU_GEM_DOMAIN_GTT) domain = AMDGPU_GEM_DOMAIN_GTT; else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 38c834d0f930..e72f329e7f18 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -100,7 +100,6 @@ struct amdgpu_bo { struct ttm_buffer_object tbo; struct ttm_bo_kmap_obj kmap; u64 flags; - unsigned prime_shared_count; /* per VM structure for page tables and with virtual addresses */ struct amdgpu_vm_bo_base *vm_bo; /* Constant after initialization */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 1b2ceccaf5b0..862eb3c1c4c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -28,6 +28,8 @@ * Christian König */ +#include + #include "amdgpu.h" #include "amdgpu_trace.h" #include "amdgpu_amdkfd.h" @@ -186,6 +188,55 @@ int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence) return amdgpu_sync_fence(sync, fence); } +/* Determine based on the owner and mode if we should sync to a fence or not */ +static bool amdgpu_sync_test_fence(struct amdgpu_device *adev, + enum amdgpu_sync_mode mode, + void *owner, struct dma_fence *f) +{ + void *fence_owner = amdgpu_sync_get_owner(f); + + /* Always sync to moves, no matter what */ + if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED) + return true; + + /* We only want to trigger KFD eviction fences on + * evict or move jobs. Skip KFD fences otherwise. + */ + if (fence_owner == AMDGPU_FENCE_OWNER_KFD && + owner != AMDGPU_FENCE_OWNER_UNDEFINED) + return false; + + /* Never sync to VM updates either. */ + if (fence_owner == AMDGPU_FENCE_OWNER_VM && + owner != AMDGPU_FENCE_OWNER_UNDEFINED) + return false; + + /* Ignore fences depending on the sync mode */ + switch (mode) { + case AMDGPU_SYNC_ALWAYS: + return true; + + case AMDGPU_SYNC_NE_OWNER: + if (amdgpu_sync_same_dev(adev, f) && + fence_owner == owner) + return false; + break; + + case AMDGPU_SYNC_EQ_OWNER: + if (amdgpu_sync_same_dev(adev, f) && + fence_owner != owner) + return false; + break; + + case AMDGPU_SYNC_EXPLICIT: + return false; + } + + WARN(debug_evictions && fence_owner == AMDGPU_FENCE_OWNER_KFD, + "Adding eviction fence to sync obj"); + return true; +} + /** * amdgpu_sync_resv - sync to a reservation object * @@ -211,67 +262,34 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, /* always sync to the exclusive fence */ f = dma_resv_excl_fence(resv); - r = amdgpu_sync_fence(sync, f); + dma_fence_chain_for_each(f, f) { + struct dma_fence_chain *chain = to_dma_fence_chain(f); + + if (amdgpu_sync_test_fence(adev, mode, owner, chain ? + chain->fence : f)) { + r = amdgpu_sync_fence(sync, f); + dma_fence_put(f); + if (r) + return r; + break; + } + } flist = dma_resv_shared_list(resv); - if (!flist || r) - return r; + if (!flist) + return 0; for (i = 0; i < flist->shared_count; ++i) { - void *fence_owner; - f = rcu_dereference_protected(flist->shared[i], dma_resv_held(resv)); - fence_owner = amdgpu_sync_get_owner(f); - - /* Always sync to moves, no matter what */ - if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED) { + if (amdgpu_sync_test_fence(adev, mode, owner, f)) { r = amdgpu_sync_fence(sync, f); if (r) - break; + return r; } - - /* We only want to trigger KFD eviction fences on - * evict or move jobs. Skip KFD fences otherwise. - */ - if (fence_owner == AMDGPU_FENCE_OWNER_KFD && - owner != AMDGPU_FENCE_OWNER_UNDEFINED) - continue; - - /* Never sync to VM updates either. */ - if (fence_owner == AMDGPU_FENCE_OWNER_VM && - owner != AMDGPU_FENCE_OWNER_UNDEFINED) - continue; - - /* Ignore fences depending on the sync mode */ - switch (mode) { - case AMDGPU_SYNC_ALWAYS: - break; - - case AMDGPU_SYNC_NE_OWNER: - if (amdgpu_sync_same_dev(adev, f) && - fence_owner == owner) - continue; - break; - - case AMDGPU_SYNC_EQ_OWNER: - if (amdgpu_sync_same_dev(adev, f) && - fence_owner != owner) - continue; - break; - - case AMDGPU_SYNC_EXPLICIT: - continue; - } - - WARN(debug_evictions && fence_owner == AMDGPU_FENCE_OWNER_KFD, - "Adding eviction fence to sync obj"); - r = amdgpu_sync_fence(sync, f); - if (r) - break; } - return r; + return 0; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 3a55f08e00e1..acd95d3a4434 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -149,14 +149,16 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, * BOs to be evicted from VRAM */ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | - AMDGPU_GEM_DOMAIN_GTT); + AMDGPU_GEM_DOMAIN_GTT | + AMDGPU_GEM_DOMAIN_CPU); abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; abo->placements[0].lpfn = 0; abo->placement.busy_placement = &abo->placements[1]; abo->placement.num_busy_placement = 1; } else { /* Move to GTT memory */ - amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); + amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT | + AMDGPU_GEM_DOMAIN_CPU); } break; case TTM_PL_TT: @@ -521,7 +523,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, hop->fpfn = 0; hop->lpfn = 0; hop->mem_type = TTM_PL_TT; - hop->flags = 0; + hop->flags = TTM_PL_FLAG_TEMPORARY; return -EMULTIHOP; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index e69f3e8e06e5..3205fd520060 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -52,7 +52,7 @@ struct amdgpu_gtt_mgr { struct ttm_resource_manager manager; struct drm_mm mm; spinlock_t lock; - atomic64_t available; + atomic64_t used; }; struct amdgpu_preempt_mgr { diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c index ff45f23f3d56..93b7f09b96ca 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include @@ -301,8 +300,6 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev) if (err) goto free_component_binding; - drm->irq_enabled = true; - drm_kms_helper_poll_init(drm); err = drm_dev_register(drm, 0); @@ -313,7 +310,6 @@ struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev) free_interrupts: drm_kms_helper_poll_fini(drm); - drm->irq_enabled = false; free_component_binding: component_unbind_all(mdev->dev, drm); cleanup_mode_config: @@ -331,7 +327,6 @@ void komeda_kms_detach(struct komeda_kms_dev *kms) drm_dev_unregister(drm); drm_kms_helper_poll_fini(drm); drm_atomic_helper_shutdown(drm); - drm->irq_enabled = false; component_unbind_all(mdev->dev, drm); drm_mode_config_cleanup(drm); komeda_kms_cleanup_private_objs(kms); diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index de59f3302516..78d15b04b105 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -847,8 +847,6 @@ static int malidp_bind(struct device *dev) if (ret < 0) goto irq_init_fail; - drm->irq_enabled = true; - ret = drm_vblank_init(drm, drm->mode_config.num_crtc); if (ret < 0) { DRM_ERROR("failed to initialise vblank\n"); @@ -874,7 +872,6 @@ register_fail: vblank_fail: malidp_se_irq_fini(hwdev); malidp_de_irq_fini(hwdev); - drm->irq_enabled = false; irq_init_fail: drm_atomic_helper_shutdown(drm); component_unbind_all(dev, drm); @@ -909,7 +906,6 @@ static void malidp_unbind(struct device *dev) drm_atomic_helper_shutdown(drm); malidp_se_irq_fini(hwdev); malidp_de_irq_fini(hwdev); - drm->irq_enabled = false; component_unbind_all(dev, drm); of_node_put(malidp->crtc.port); malidp->crtc.port = NULL; diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index dab0a1f0983b..8e3e98f13db4 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -95,7 +95,7 @@ static int armada_drm_bind(struct device *dev) } /* Remove early framebuffers */ - ret = drm_aperture_remove_framebuffers(false, "armada-drm-fb"); + ret = drm_aperture_remove_framebuffers(false, &armada_drm_driver); if (ret) { dev_err(dev, "[" DRM_NAME ":%s] can't kick out simple-fb: %d\n", __func__, ret); @@ -130,8 +130,6 @@ static int armada_drm_bind(struct device *dev) if (ret) goto err_comp; - priv->drm.irq_enabled = true; - drm_mode_config_reset(&priv->drm); ret = armada_fbdev_init(&priv->drm); diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c index d3e3e5fdc390..424250535fed 100644 --- a/drivers/gpu/drm/armada/armada_overlay.c +++ b/drivers/gpu/drm/armada/armada_overlay.c @@ -247,8 +247,6 @@ static void armada_drm_overlay_plane_atomic_disable(struct drm_plane *plane, } static const struct drm_plane_helper_funcs armada_overlay_plane_helper_funcs = { - .prepare_fb = armada_drm_plane_prepare_fb, - .cleanup_fb = armada_drm_plane_cleanup_fb, .atomic_check = armada_drm_plane_atomic_check, .atomic_update = armada_drm_overlay_plane_atomic_update, .atomic_disable = armada_drm_overlay_plane_atomic_disable, diff --git a/drivers/gpu/drm/armada/armada_plane.c b/drivers/gpu/drm/armada/armada_plane.c index 40209e49f34a..959d7f0a5108 100644 --- a/drivers/gpu/drm/armada/armada_plane.c +++ b/drivers/gpu/drm/armada/armada_plane.c @@ -78,33 +78,6 @@ void armada_drm_plane_calc(struct drm_plane_state *state, u32 addrs[2][3], } } -int armada_drm_plane_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *state) -{ - DRM_DEBUG_KMS("[PLANE:%d:%s] [FB:%d]\n", - plane->base.id, plane->name, - state->fb ? state->fb->base.id : 0); - - /* - * Take a reference on the new framebuffer - we want to - * hold on to it while the hardware is displaying it. - */ - if (state->fb) - drm_framebuffer_get(state->fb); - return 0; -} - -void armada_drm_plane_cleanup_fb(struct drm_plane *plane, - struct drm_plane_state *old_state) -{ - DRM_DEBUG_KMS("[PLANE:%d:%s] [FB:%d]\n", - plane->base.id, plane->name, - old_state->fb ? old_state->fb->base.id : 0); - - if (old_state->fb) - drm_framebuffer_put(old_state->fb); -} - int armada_drm_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *state) { @@ -282,8 +255,6 @@ static void armada_drm_primary_plane_atomic_disable(struct drm_plane *plane, } static const struct drm_plane_helper_funcs armada_primary_plane_helper_funcs = { - .prepare_fb = armada_drm_plane_prepare_fb, - .cleanup_fb = armada_drm_plane_cleanup_fb, .atomic_check = armada_drm_plane_atomic_check, .atomic_update = armada_drm_primary_plane_atomic_update, .atomic_disable = armada_drm_primary_plane_atomic_disable, diff --git a/drivers/gpu/drm/armada/armada_plane.h b/drivers/gpu/drm/armada/armada_plane.h index 51dab8d8da22..368415c609a6 100644 --- a/drivers/gpu/drm/armada/armada_plane.h +++ b/drivers/gpu/drm/armada/armada_plane.h @@ -21,8 +21,6 @@ struct armada_plane_state { void armada_drm_plane_calc(struct drm_plane_state *state, u32 addrs[2][3], u16 pitches[3], bool interlaced); -int armada_drm_plane_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *state); void armada_drm_plane_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state); int armada_drm_plane_atomic_check(struct drm_plane *plane, diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c index 098f96d4d50d..827e62c1daba 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_crtc.c @@ -220,7 +220,6 @@ static const struct drm_simple_display_pipe_funcs aspeed_gfx_funcs = { .enable = aspeed_gfx_pipe_enable, .disable = aspeed_gfx_pipe_disable, .update = aspeed_gfx_pipe_update, - .prepare_fb = drm_gem_simple_display_pipe_prepare_fb, .enable_vblank = aspeed_gfx_enable_vblank, .disable_vblank = aspeed_gfx_disable_vblank, }; diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 5aa452b4efe6..86d5cd7b6318 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -100,7 +100,7 @@ static int ast_remove_conflicting_framebuffers(struct pci_dev *pdev) primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; #endif - return drm_aperture_remove_conflicting_framebuffers(base, size, primary, "astdrmfb"); + return drm_aperture_remove_conflicting_framebuffers(base, size, primary, &ast_driver); } static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 36d9575aa27b..f5d58c3088fe 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -612,8 +612,7 @@ ast_primary_plane_helper_atomic_disable(struct drm_plane *plane, } static const struct drm_plane_helper_funcs ast_primary_plane_helper_funcs = { - .prepare_fb = drm_gem_vram_plane_helper_prepare_fb, - .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb, + DRM_GEM_VRAM_PLANE_HELPER_FUNCS, .atomic_check = ast_primary_plane_helper_atomic_check, .atomic_update = ast_primary_plane_helper_atomic_update, .atomic_disable = ast_primary_plane_helper_atomic_disable, @@ -1293,6 +1292,18 @@ static enum drm_mode_status ast_mode_valid(struct drm_connector *connector, return flags; } +static enum drm_connector_status ast_connector_detect(struct drm_connector + *connector, bool force) +{ + int r; + + r = ast_get_modes(connector); + if (r < 0) + return connector_status_disconnected; + + return connector_status_connected; +} + static void ast_connector_destroy(struct drm_connector *connector) { struct ast_connector *ast_connector = to_ast_connector(connector); @@ -1307,6 +1318,7 @@ static const struct drm_connector_helper_funcs ast_connector_helper_funcs = { static const struct drm_connector_funcs ast_connector_funcs = { .reset = drm_atomic_helper_connector_reset, + .detect = ast_connector_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = ast_connector_destroy, .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, @@ -1334,7 +1346,8 @@ static int ast_connector_init(struct drm_device *dev) connector->interlace_allowed = 0; connector->doublescan_allowed = 0; - connector->polled = DRM_CONNECTOR_POLL_CONNECT; + connector->polled = DRM_CONNECTOR_POLL_CONNECT | + DRM_CONNECTOR_POLL_DISCONNECT; drm_connector_attach_encoder(connector, encoder); @@ -1403,6 +1416,8 @@ int ast_mode_config_init(struct ast_private *ast) drm_mode_config_reset(dev); + drm_kms_helper_poll_init(dev); + return 0; } diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig deleted file mode 100644 index 7bcdf294fed8..000000000000 --- a/drivers/gpu/drm/bochs/Kconfig +++ /dev/null @@ -1,11 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -config DRM_BOCHS - tristate "DRM Support for bochs dispi vga interface (qemu stdvga)" - depends on DRM && PCI && MMU - select DRM_KMS_HELPER - select DRM_VRAM_HELPER - select DRM_TTM - select DRM_TTM_HELPER - help - Choose this option for qemu. - If M is selected the module will be called bochs-drm. diff --git a/drivers/gpu/drm/bochs/Makefile b/drivers/gpu/drm/bochs/Makefile deleted file mode 100644 index 55473371300f..000000000000 --- a/drivers/gpu/drm/bochs/Makefile +++ /dev/null @@ -1,4 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -bochs-drm-y := bochs_drv.o bochs_mm.o bochs_kms.o bochs_hw.o - -obj-$(CONFIG_DRM_BOCHS) += bochs-drm.o diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h deleted file mode 100644 index e9645c612aff..000000000000 --- a/drivers/gpu/drm/bochs/bochs.h +++ /dev/null @@ -1,98 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ - -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -/* ---------------------------------------------------------------------- */ - -#define VBE_DISPI_IOPORT_INDEX 0x01CE -#define VBE_DISPI_IOPORT_DATA 0x01CF - -#define VBE_DISPI_INDEX_ID 0x0 -#define VBE_DISPI_INDEX_XRES 0x1 -#define VBE_DISPI_INDEX_YRES 0x2 -#define VBE_DISPI_INDEX_BPP 0x3 -#define VBE_DISPI_INDEX_ENABLE 0x4 -#define VBE_DISPI_INDEX_BANK 0x5 -#define VBE_DISPI_INDEX_VIRT_WIDTH 0x6 -#define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7 -#define VBE_DISPI_INDEX_X_OFFSET 0x8 -#define VBE_DISPI_INDEX_Y_OFFSET 0x9 -#define VBE_DISPI_INDEX_VIDEO_MEMORY_64K 0xa - -#define VBE_DISPI_ID0 0xB0C0 -#define VBE_DISPI_ID1 0xB0C1 -#define VBE_DISPI_ID2 0xB0C2 -#define VBE_DISPI_ID3 0xB0C3 -#define VBE_DISPI_ID4 0xB0C4 -#define VBE_DISPI_ID5 0xB0C5 - -#define VBE_DISPI_DISABLED 0x00 -#define VBE_DISPI_ENABLED 0x01 -#define VBE_DISPI_GETCAPS 0x02 -#define VBE_DISPI_8BIT_DAC 0x20 -#define VBE_DISPI_LFB_ENABLED 0x40 -#define VBE_DISPI_NOCLEARMEM 0x80 - -/* ---------------------------------------------------------------------- */ - -enum bochs_types { - BOCHS_QEMU_STDVGA, - BOCHS_UNKNOWN, -}; - -struct bochs_device { - /* hw */ - void __iomem *mmio; - int ioports; - void __iomem *fb_map; - unsigned long fb_base; - unsigned long fb_size; - unsigned long qext_size; - - /* mode */ - u16 xres; - u16 yres; - u16 yres_virtual; - u32 stride; - u32 bpp; - struct edid *edid; - - /* drm */ - struct drm_device *dev; - struct drm_simple_display_pipe pipe; - struct drm_connector connector; -}; - -/* ---------------------------------------------------------------------- */ - -/* bochs_hw.c */ -int bochs_hw_init(struct drm_device *dev); -void bochs_hw_fini(struct drm_device *dev); - -void bochs_hw_blank(struct bochs_device *bochs, bool blank); -void bochs_hw_setmode(struct bochs_device *bochs, - struct drm_display_mode *mode); -void bochs_hw_setformat(struct bochs_device *bochs, - const struct drm_format_info *format); -void bochs_hw_setbase(struct bochs_device *bochs, - int x, int y, int stride, u64 addr); -int bochs_hw_load_edid(struct bochs_device *bochs); - -/* bochs_mm.c */ -int bochs_mm_init(struct bochs_device *bochs); -void bochs_mm_fini(struct bochs_device *bochs); - -/* bochs_kms.c */ -int bochs_kms_init(struct bochs_device *bochs); - -/* bochs_fbdev.c */ -extern const struct drm_mode_config_funcs bochs_mode_funcs; diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c deleted file mode 100644 index c828cadbabff..000000000000 --- a/drivers/gpu/drm/bochs/bochs_drv.c +++ /dev/null @@ -1,205 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - */ - -#include -#include - -#include -#include -#include -#include - -#include "bochs.h" - -static int bochs_modeset = -1; -module_param_named(modeset, bochs_modeset, int, 0444); -MODULE_PARM_DESC(modeset, "enable/disable kernel modesetting"); - -/* ---------------------------------------------------------------------- */ -/* drm interface */ - -static void bochs_unload(struct drm_device *dev) -{ - struct bochs_device *bochs = dev->dev_private; - - bochs_mm_fini(bochs); -} - -static int bochs_load(struct drm_device *dev) -{ - struct bochs_device *bochs; - int ret; - - bochs = drmm_kzalloc(dev, sizeof(*bochs), GFP_KERNEL); - if (bochs == NULL) - return -ENOMEM; - dev->dev_private = bochs; - bochs->dev = dev; - - ret = bochs_hw_init(dev); - if (ret) - goto err; - - ret = bochs_mm_init(bochs); - if (ret) - goto err; - - ret = bochs_kms_init(bochs); - if (ret) - goto err; - - return 0; - -err: - bochs_unload(dev); - return ret; -} - -DEFINE_DRM_GEM_FOPS(bochs_fops); - -static const struct drm_driver bochs_driver = { - .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, - .fops = &bochs_fops, - .name = "bochs-drm", - .desc = "bochs dispi vga interface (qemu stdvga)", - .date = "20130925", - .major = 1, - .minor = 0, - DRM_GEM_VRAM_DRIVER, - .release = bochs_unload, -}; - -/* ---------------------------------------------------------------------- */ -/* pm interface */ - -#ifdef CONFIG_PM_SLEEP -static int bochs_pm_suspend(struct device *dev) -{ - struct drm_device *drm_dev = dev_get_drvdata(dev); - - return drm_mode_config_helper_suspend(drm_dev); -} - -static int bochs_pm_resume(struct device *dev) -{ - struct drm_device *drm_dev = dev_get_drvdata(dev); - - return drm_mode_config_helper_resume(drm_dev); -} -#endif - -static const struct dev_pm_ops bochs_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(bochs_pm_suspend, - bochs_pm_resume) -}; - -/* ---------------------------------------------------------------------- */ -/* pci interface */ - -static int bochs_pci_probe(struct pci_dev *pdev, - const struct pci_device_id *ent) -{ - struct drm_device *dev; - unsigned long fbsize; - int ret; - - fbsize = pci_resource_len(pdev, 0); - if (fbsize < 4 * 1024 * 1024) { - DRM_ERROR("less than 4 MB video memory, ignoring device\n"); - return -ENOMEM; - } - - ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "bochsdrmfb"); - if (ret) - return ret; - - dev = drm_dev_alloc(&bochs_driver, &pdev->dev); - if (IS_ERR(dev)) - return PTR_ERR(dev); - - ret = pci_enable_device(pdev); - if (ret) - goto err_free_dev; - - pci_set_drvdata(pdev, dev); - - ret = bochs_load(dev); - if (ret) - goto err_free_dev; - - ret = drm_dev_register(dev, 0); - if (ret) - goto err_unload; - - drm_fbdev_generic_setup(dev, 32); - return ret; - -err_unload: - bochs_unload(dev); -err_free_dev: - drm_dev_put(dev); - return ret; -} - -static void bochs_pci_remove(struct pci_dev *pdev) -{ - struct drm_device *dev = pci_get_drvdata(pdev); - - drm_dev_unplug(dev); - drm_atomic_helper_shutdown(dev); - bochs_hw_fini(dev); - drm_dev_put(dev); -} - -static const struct pci_device_id bochs_pci_tbl[] = { - { - .vendor = 0x1234, - .device = 0x1111, - .subvendor = PCI_SUBVENDOR_ID_REDHAT_QUMRANET, - .subdevice = PCI_SUBDEVICE_ID_QEMU, - .driver_data = BOCHS_QEMU_STDVGA, - }, - { - .vendor = 0x1234, - .device = 0x1111, - .subvendor = PCI_ANY_ID, - .subdevice = PCI_ANY_ID, - .driver_data = BOCHS_UNKNOWN, - }, - { /* end of list */ } -}; - -static struct pci_driver bochs_pci_driver = { - .name = "bochs-drm", - .id_table = bochs_pci_tbl, - .probe = bochs_pci_probe, - .remove = bochs_pci_remove, - .driver.pm = &bochs_pm_ops, -}; - -/* ---------------------------------------------------------------------- */ -/* module init/exit */ - -static int __init bochs_init(void) -{ - if (vgacon_text_force() && bochs_modeset == -1) - return -EINVAL; - - if (bochs_modeset == 0) - return -EINVAL; - - return pci_register_driver(&bochs_pci_driver); -} - -static void __exit bochs_exit(void) -{ - pci_unregister_driver(&bochs_pci_driver); -} - -module_init(bochs_init); -module_exit(bochs_exit); - -MODULE_DEVICE_TABLE(pci, bochs_pci_tbl); -MODULE_AUTHOR("Gerd Hoffmann "); -MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c deleted file mode 100644 index 7d3426d8cc69..000000000000 --- a/drivers/gpu/drm/bochs/bochs_hw.c +++ /dev/null @@ -1,323 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - */ - -#include - -#include -#include - -#include