linux/drivers/gpu/drm/nouveau/nouveau_vga.c
Christoph Hellwig bf44e8cecc vgaarb: don't pass a cookie to vga_client_register
The VGA arbitration is entirely based on pci_dev structures, so just pass
that back to the set_vga_decode callback.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20210716061634.2446357-8-hch@lst.de
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
2021-07-21 10:29:10 +02:00

137 lines
3.5 KiB
C

// SPDX-License-Identifier: MIT
#include <linux/vgaarb.h>
#include <linux/vga_switcheroo.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include "nouveau_drv.h"
#include "nouveau_acpi.h"
#include "nouveau_fbcon.h"
#include "nouveau_vga.h"
static unsigned int
nouveau_vga_set_decode(struct pci_dev *pdev, bool state)
{
struct nouveau_drm *drm = nouveau_drm(pci_get_drvdata(pdev));
struct nvif_object *device = &drm->client.device.object;
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE &&
drm->client.device.info.chipset >= 0x4c)
nvif_wr32(device, 0x088060, state);
else
if (drm->client.device.info.chipset >= 0x40)
nvif_wr32(device, 0x088054, state);
else
nvif_wr32(device, 0x001854, state);
if (state)
return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
else
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
static void
nouveau_switcheroo_set_state(struct pci_dev *pdev,
enum vga_switcheroo_state state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
if ((nouveau_is_optimus() || nouveau_is_v1_dsm()) && state == VGA_SWITCHEROO_OFF)
return;
if (state == VGA_SWITCHEROO_ON) {
pr_err("VGA switcheroo: switched nouveau on\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
nouveau_pmops_resume(&pdev->dev);
dev->switch_power_state = DRM_SWITCH_POWER_ON;
} else {
pr_err("VGA switcheroo: switched nouveau off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
nouveau_switcheroo_optimus_dsm();
nouveau_pmops_suspend(&pdev->dev);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
}
static void
nouveau_switcheroo_reprobe(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
drm_fb_helper_output_poll_changed(dev);
}
static bool
nouveau_switcheroo_can_switch(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
/*
* FIXME: open_count is protected by drm_global_mutex but that would lead to
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
return atomic_read(&dev->open_count) == 0;
}
static const struct vga_switcheroo_client_ops
nouveau_switcheroo_ops = {
.set_gpu_state = nouveau_switcheroo_set_state,
.reprobe = nouveau_switcheroo_reprobe,
.can_switch = nouveau_switcheroo_can_switch,
};
void
nouveau_vga_init(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
bool runtime = nouveau_pmops_runtime();
struct pci_dev *pdev;
/* only relevant for PCI devices */
if (!dev_is_pci(dev->dev))
return;
pdev = to_pci_dev(dev->dev);
vga_client_register(pdev, nouveau_vga_set_decode);
/* don't register Thunderbolt eGPU with vga_switcheroo */
if (pci_is_thunderbolt_attached(pdev))
return;
vga_switcheroo_register_client(pdev, &nouveau_switcheroo_ops, runtime);
if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
vga_switcheroo_init_domain_pm_ops(drm->dev->dev, &drm->vga_pm_domain);
}
void
nouveau_vga_fini(struct nouveau_drm *drm)
{
struct drm_device *dev = drm->dev;
bool runtime = nouveau_pmops_runtime();
struct pci_dev *pdev;
/* only relevant for PCI devices */
if (!dev_is_pci(dev->dev))
return;
pdev = to_pci_dev(dev->dev);
vga_client_unregister(pdev);
if (pci_is_thunderbolt_attached(pdev))
return;
vga_switcheroo_unregister_client(pdev);
if (runtime && nouveau_is_v1_dsm() && !nouveau_is_optimus())
vga_switcheroo_fini_domain_pm_ops(drm->dev->dev);
}
void
nouveau_vga_lastclose(struct drm_device *dev)
{
vga_switcheroo_process_delayed_switch();
}