linux/drivers/gpu/drm/nouveau/nouveau_svm.c
Linus Torvalds 8186749621 drm next for 5.9-rc1
core:
 - add user def flag to cmd line modes
 - dma_fence_wait added might_sleep
 - dma-fence lockdep annotations
 - indefinite fences are bad documentation
 - gem CMA functions used in more drivers
 - struct mutex removal
 - more drm_ debug macro usage
 - set/drop master api fixes
 - fix for drm/mm hole size comparison
 - drm/mm remove invalid entry optimization
 - optimise drm/mm hole handling
 - VRR debugfs added
 - uncompressed AFBC modifier support
 - multiple display id blocks in EDID
 - multiple driver sg handling fixes
 - __drm_atomic_helper_crtc_reset in all drivers
 - managed vram helpers
 
 ttm:
 - ttm_mem_reg handling cleanup
 - remove bo offset field
 - drop CMA memtype flag
 - drop mappable flag
 
 xilinx:
 - New Xilinx ZynqMP DisplayPort Subsystem driver
 
 nouveau:
 - add CRC support
 - start using NVIDIA published class header files
 - convert all push buffer emission to new macros
 - Proper push buffer space management for EVO/NVD channels.
 - firmware loading fixes
 - 2MiB system memory pages support on Pascal and newer
 
 vkms:
 - larget cursor support
 
 i915:
 - Rocketlake platform enablement
 - Early DG1 enablement
 - Numerous GEM refactorings
 - DP MST fixes
 - FBC, PSR, Cursor, Color, Gamma fixes
 - TGL, RKL, EHL workaround updates
 - TGL 8K display support fixes
 - SDVO/HDMI/DVI fixes
 
 amdgpu:
 - Initial support for Sienna Cichlid GPU
 - Initial support for Navy Flounder GPU
 - SI UVD/VCE support
 - expose rotation property
 - Add support for unique id on Arcturus
 - Enable runtime PM on vega10 boards that support BACO
 - Skip BAR resizing if the bios already did id
 - Major swSMU code cleanup
 - Fixes for DCN bandwidth calculations
 
 amdkfd:
 - Track SDMA usage per process
 - SMI events interface
 
 radeon:
 - Default to on chip GART for AGP boards on all arches
 - Runtime PM reference count fixes
 
 msm:
 - headers regenerated causing churn
 - a650/a640 display and GPU enablement
 - dpu dither support for 6bpc panels
 - dpu cursor fix
 - dsi/mdp5 enablement for sdm630/sdm636/sdm66
 
 tegra:
 - video capture prep support
 - reflection support
 
 mediatek:
 - convert mtk_dsi to bridge API
 
 meson:
 - FBC support
 
 sun4i:
 - iommu support
 
 rockchip:
 - register locking fix
 - per-pixel alpha support PX30 VOP
 
 -
 mgag200:
 - ported to simple and shmem helpers
 - device init cleanups
 - use managed pci functions
 - dropped hw cursor support
 
 ast:
 - use managed pci functions
 - use managed VRAM helpers
 - rework cursor support
 
 malidp:
 - dev_groups support
 
 hibmc:
 - refactor hibmc_drv_vdac:
 
 vc4:
 - create TXP CRTC
 
 imx:
 - error path fixes and cleanups
 
 etnaviv:
 - clock handling and error handling cleanups
 - use pin_user_pages
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJfK1atAAoJEAx081l5xIa+vDkQAJvl/mjbEA7fDy8Ysa0cgPLI
 8nI4Bo/MaxkyRfUcP8+f/n3QQrRME37C0xa/Mn6SG1oFAdlovPwDqmDr5kjhkrMI
 geo8oJb2Q+AsrJr+ejpuF+iq0FxWi64bLbwJFJ2nBet+lHTMzoPceWeq3gG1Vvfl
 h6PV4B/9TjrnbhcKLIQSEmJ0kZp9uMkDBF/iynVn4+AKAkG1rQNjigdTH48IFPoz
 28KuqG0B4NWu648zYXhjsN0kD3Dxjv3YOH+FsoWQpQa9icCTySYbySsQ7l0/XvA3
 4BPtP3rWMhU46FHTBkWF72WQR4F0B4wm5DJJKMeG4vb1mXakOqAKcAq7JAbka+wL
 PBIiU+AcAKRSiwHmYDuDWtDoSpvYncuo0p3IvNP5hhih+7hqCnLIULDWS+V8AUzW
 39usS/DXsVKk/HGYIYC89cRwsqWYD4c7edzOBdPQxW4LNYCD2gXezLJ5TeeR2lih
 y9JIVnPiluWleOovs4W3BoZNRuLc1rHBO6COToXjlme/48Z+sRHBAoge6UZurqRN
 jr+e60cS7n/DOeJQuNf4UHZnK48Pc24+3kVfMHlX+OKn8VuKPGr+USkeHV/NYL/B
 USiKCAxkkZM0dxerSb1/Ra9kGnchf0QBpA6Fsem8kV61Z4GVc+K6xJWg7KXB6n/3
 7ZyalUKLwlOCz9sYsCCe
 =Yvtd
 -----END PGP SIGNATURE-----

Merge tag 'drm-next-2020-08-06' of git://anongit.freedesktop.org/drm/drm

Pull drm updates from Dave Airlie:
 "New xilinx displayport driver, AMD support for two new GPUs (more
  header files), i915 initial support for RocketLake and some work on
  their DG1 (discrete chip).

  The core also grew some lockdep annotations to try and constrain what
  drivers do with dma-fences, and added some documentation on why the
  idea of indefinite fences doesn't work.

  The long list is below.

  I do have some fixes trees outstanding, but I'll follow up with those
  later.

  core:
   - add user def flag to cmd line modes
   - dma_fence_wait added might_sleep
   - dma-fence lockdep annotations
   - indefinite fences are bad documentation
   - gem CMA functions used in more drivers
   - struct mutex removal
   - more drm_ debug macro usage
   - set/drop master api fixes
   - fix for drm/mm hole size comparison
   - drm/mm remove invalid entry optimization
   - optimise drm/mm hole handling
   - VRR debugfs added
   - uncompressed AFBC modifier support
   - multiple display id blocks in EDID
   - multiple driver sg handling fixes
   - __drm_atomic_helper_crtc_reset in all drivers
   - managed vram helpers

  ttm:
   - ttm_mem_reg handling cleanup
   - remove bo offset field
   - drop CMA memtype flag
   - drop mappable flag

  xilinx:
   - New Xilinx ZynqMP DisplayPort Subsystem driver

  nouveau:
   - add CRC support
   - start using NVIDIA published class header files
   - convert all push buffer emission to new macros
   - Proper push buffer space management for EVO/NVD channels.
   - firmware loading fixes
   - 2MiB system memory pages support on Pascal and newer

  vkms:
   - larger cursor support

  i915:
   - Rocketlake platform enablement
   - Early DG1 enablement
   - Numerous GEM refactorings
   - DP MST fixes
   - FBC, PSR, Cursor, Color, Gamma fixes
   - TGL, RKL, EHL workaround updates
   - TGL 8K display support fixes
   - SDVO/HDMI/DVI fixes

  amdgpu:
   - Initial support for Sienna Cichlid GPU
   - Initial support for Navy Flounder GPU
   - SI UVD/VCE support
   - expose rotation property
   - Add support for unique id on Arcturus
   - Enable runtime PM on vega10 boards that support BACO
   - Skip BAR resizing if the bios already did id
   - Major swSMU code cleanup
   - Fixes for DCN bandwidth calculations

  amdkfd:
   - Track SDMA usage per process
   - SMI events interface

  radeon:
   - Default to on chip GART for AGP boards on all arches
   - Runtime PM reference count fixes

  msm:
   - headers regenerated causing churn
   - a650/a640 display and GPU enablement
   - dpu dither support for 6bpc panels
   - dpu cursor fix
   - dsi/mdp5 enablement for sdm630/sdm636/sdm66

  tegra:
   - video capture prep support
   - reflection support

  mediatek:
   - convert mtk_dsi to bridge API

  meson:
   - FBC support

  sun4i:
   - iommu support

  rockchip:
   - register locking fix
   - per-pixel alpha support PX30 VOP

  mgag200:
   - ported to simple and shmem helpers
   - device init cleanups
   - use managed pci functions
   - dropped hw cursor support

  ast:
   - use managed pci functions
   - use managed VRAM helpers
   - rework cursor support

  malidp:
   - dev_groups support

  hibmc:
   - refactor hibmc_drv_vdac:

  vc4:
   - create TXP CRTC

  imx:
   - error path fixes and cleanups

  etnaviv:
   - clock handling and error handling cleanups
   - use pin_user_pages"

* tag 'drm-next-2020-08-06' of git://anongit.freedesktop.org/drm/drm: (1747 commits)
  drm/msm: use kthread_create_worker instead of kthread_run
  drm/msm/mdp5: Add MDP5 configuration for SDM636/660
  drm/msm/dsi: Add DSI configuration for SDM660
  drm/msm/mdp5: Add MDP5 configuration for SDM630
  drm/msm/dsi: Add phy configuration for SDM630/636/660
  drm/msm/a6xx: add A640/A650 hwcg
  drm/msm/a6xx: hwcg tables in gpulist
  drm/msm/dpu: add SM8250 to hw catalog
  drm/msm/dpu: add SM8150 to hw catalog
  drm/msm/dpu: intf timing path for displayport
  drm/msm/dpu: set missing flush bits for INTF_2 and INTF_3
  drm/msm/dpu: don't use INTF_INPUT_CTRL feature on sdm845
  drm/msm/dpu: move some sspp caps to dpu_caps
  drm/msm/dpu: update UBWC config for sm8150 and sm8250
  drm/msm/dpu: use right setup_blend_config for sm8150 and sm8250
  drm/msm/a6xx: set ubwc config for A640 and A650
  drm/msm/adreno: un-open-code some packets
  drm/msm: sync generated headers
  drm/msm/a6xx: add build_bw_table for A640/A650
  drm/msm/a6xx: fix crashstate capture for A650
  ...
2020-08-05 19:50:06 -07:00

972 lines
26 KiB
C

/*
* Copyright 2018 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "nouveau_svm.h"
#include "nouveau_drv.h"
#include "nouveau_chan.h"
#include "nouveau_dmem.h"
#include <nvif/notify.h>
#include <nvif/object.h>
#include <nvif/vmm.h>
#include <nvif/class.h>
#include <nvif/clb069.h>
#include <nvif/ifc00d.h>
#include <linux/sched/mm.h>
#include <linux/sort.h>
#include <linux/hmm.h>
struct nouveau_svm {
struct nouveau_drm *drm;
struct mutex mutex;
struct list_head inst;
struct nouveau_svm_fault_buffer {
int id;
struct nvif_object object;
u32 entries;
u32 getaddr;
u32 putaddr;
u32 get;
u32 put;
struct nvif_notify notify;
struct nouveau_svm_fault {
u64 inst;
u64 addr;
u64 time;
u32 engine;
u8 gpc;
u8 hub;
u8 access;
u8 client;
u8 fault;
struct nouveau_svmm *svmm;
} **fault;
int fault_nr;
} buffer[1];
};
#define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
#define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
struct nouveau_pfnmap_args {
struct nvif_ioctl_v0 i;
struct nvif_ioctl_mthd_v0 m;
struct nvif_vmm_pfnmap_v0 p;
};
struct nouveau_ivmm {
struct nouveau_svmm *svmm;
u64 inst;
struct list_head head;
};
static struct nouveau_ivmm *
nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst)
{
struct nouveau_ivmm *ivmm;
list_for_each_entry(ivmm, &svm->inst, head) {
if (ivmm->inst == inst)
return ivmm;
}
return NULL;
}
#define SVMM_DBG(s,f,a...) \
NV_DEBUG((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
#define SVMM_ERR(s,f,a...) \
NV_WARN((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
int
nouveau_svmm_bind(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct drm_nouveau_svm_bind *args = data;
unsigned target, cmd, priority;
unsigned long addr, end, size;
struct mm_struct *mm;
args->va_start &= PAGE_MASK;
args->va_end &= PAGE_MASK;
/* Sanity check arguments */
if (args->reserved0 || args->reserved1)
return -EINVAL;
if (args->header & (~NOUVEAU_SVM_BIND_VALID_MASK))
return -EINVAL;
if (args->va_start >= args->va_end)
return -EINVAL;
if (!args->npages)
return -EINVAL;
cmd = args->header >> NOUVEAU_SVM_BIND_COMMAND_SHIFT;
cmd &= NOUVEAU_SVM_BIND_COMMAND_MASK;
switch (cmd) {
case NOUVEAU_SVM_BIND_COMMAND__MIGRATE:
break;
default:
return -EINVAL;
}
priority = args->header >> NOUVEAU_SVM_BIND_PRIORITY_SHIFT;
priority &= NOUVEAU_SVM_BIND_PRIORITY_MASK;
/* FIXME support CPU target ie all target value < GPU_VRAM */
target = args->header >> NOUVEAU_SVM_BIND_TARGET_SHIFT;
target &= NOUVEAU_SVM_BIND_TARGET_MASK;
switch (target) {
case NOUVEAU_SVM_BIND_TARGET__GPU_VRAM:
break;
default:
return -EINVAL;
}
/*
* FIXME: For now refuse non 0 stride, we need to change the migrate
* kernel function to handle stride to avoid to create a mess within
* each device driver.
*/
if (args->stride)
return -EINVAL;
size = ((unsigned long)args->npages) << PAGE_SHIFT;
if ((args->va_start + size) <= args->va_start)
return -EINVAL;
if ((args->va_start + size) > args->va_end)
return -EINVAL;
/*
* Ok we are ask to do something sane, for now we only support migrate
* commands but we will add things like memory policy (what to do on
* page fault) and maybe some other commands.
*/
mm = get_task_mm(current);
mmap_read_lock(mm);
if (!cli->svm.svmm) {
mmap_read_unlock(mm);
return -EINVAL;
}
for (addr = args->va_start, end = args->va_start + size; addr < end;) {
struct vm_area_struct *vma;
unsigned long next;
vma = find_vma_intersection(mm, addr, end);
if (!vma)
break;
addr = max(addr, vma->vm_start);
next = min(vma->vm_end, end);
/* This is a best effort so we ignore errors */
nouveau_dmem_migrate_vma(cli->drm, cli->svm.svmm, vma, addr,
next);
addr = next;
}
/*
* FIXME Return the number of page we have migrated, again we need to
* update the migrate API to return that information so that we can
* report it to user space.
*/
args->result = 0;
mmap_read_unlock(mm);
mmput(mm);
return 0;
}
/* Unlink channel instance from SVMM. */
void
nouveau_svmm_part(struct nouveau_svmm *svmm, u64 inst)
{
struct nouveau_ivmm *ivmm;
if (svmm) {
mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
ivmm = nouveau_ivmm_find(svmm->vmm->cli->drm->svm, inst);
if (ivmm) {
list_del(&ivmm->head);
kfree(ivmm);
}
mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
}
}
/* Link channel instance to SVMM. */
int
nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst)
{
struct nouveau_ivmm *ivmm;
if (svmm) {
if (!(ivmm = kmalloc(sizeof(*ivmm), GFP_KERNEL)))
return -ENOMEM;
ivmm->svmm = svmm;
ivmm->inst = inst;
mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
list_add(&ivmm->head, &svmm->vmm->cli->drm->svm->inst);
mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
}
return 0;
}
/* Invalidate SVMM address-range on GPU. */
void
nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
{
if (limit > start) {
bool super = svmm->vmm->vmm.object.client->super;
svmm->vmm->vmm.object.client->super = true;
nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR,
&(struct nvif_vmm_pfnclr_v0) {
.addr = start,
.size = limit - start,
}, sizeof(struct nvif_vmm_pfnclr_v0));
svmm->vmm->vmm.object.client->super = super;
}
}
static int
nouveau_svmm_invalidate_range_start(struct mmu_notifier *mn,
const struct mmu_notifier_range *update)
{
struct nouveau_svmm *svmm =
container_of(mn, struct nouveau_svmm, notifier);
unsigned long start = update->start;
unsigned long limit = update->end;
if (!mmu_notifier_range_blockable(update))
return -EAGAIN;
SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
mutex_lock(&svmm->mutex);
if (unlikely(!svmm->vmm))
goto out;
/*
* Ignore invalidation callbacks for device private pages since
* the invalidation is handled as part of the migration process.
*/
if (update->event == MMU_NOTIFY_MIGRATE &&
update->migrate_pgmap_owner == svmm->vmm->cli->drm->dev)
goto out;
if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
if (start < svmm->unmanaged.start) {
nouveau_svmm_invalidate(svmm, start,
svmm->unmanaged.limit);
}
start = svmm->unmanaged.limit;
}
nouveau_svmm_invalidate(svmm, start, limit);
out:
mutex_unlock(&svmm->mutex);
return 0;
}
static void nouveau_svmm_free_notifier(struct mmu_notifier *mn)
{
kfree(container_of(mn, struct nouveau_svmm, notifier));
}
static const struct mmu_notifier_ops nouveau_mn_ops = {
.invalidate_range_start = nouveau_svmm_invalidate_range_start,
.free_notifier = nouveau_svmm_free_notifier,
};
void
nouveau_svmm_fini(struct nouveau_svmm **psvmm)
{
struct nouveau_svmm *svmm = *psvmm;
if (svmm) {
mutex_lock(&svmm->mutex);
svmm->vmm = NULL;
mutex_unlock(&svmm->mutex);
mmu_notifier_put(&svmm->notifier);
*psvmm = NULL;
}
}
int
nouveau_svmm_init(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_svmm *svmm;
struct drm_nouveau_svm_init *args = data;
int ret;
/* Allocate tracking for SVM-enabled VMM. */
if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
return -ENOMEM;
svmm->vmm = &cli->svm;
svmm->unmanaged.start = args->unmanaged_addr;
svmm->unmanaged.limit = args->unmanaged_addr + args->unmanaged_size;
mutex_init(&svmm->mutex);
/* Check that SVM isn't already enabled for the client. */
mutex_lock(&cli->mutex);
if (cli->svm.cli) {
ret = -EBUSY;
goto out_free;
}
/* Allocate a new GPU VMM that can support SVM (managed by the
* client, with replayable faults enabled).
*
* All future channel/memory allocations will make use of this
* VMM instead of the standard one.
*/
ret = nvif_vmm_ctor(&cli->mmu, "svmVmm",
cli->vmm.vmm.object.oclass, true,
args->unmanaged_addr, args->unmanaged_size,
&(struct gp100_vmm_v0) {
.fault_replay = true,
}, sizeof(struct gp100_vmm_v0), &cli->svm.vmm);
if (ret)
goto out_free;
mmap_write_lock(current->mm);
svmm->notifier.ops = &nouveau_mn_ops;
ret = __mmu_notifier_register(&svmm->notifier, current->mm);
if (ret)
goto out_mm_unlock;
/* Note, ownership of svmm transfers to mmu_notifier */
cli->svm.svmm = svmm;
cli->svm.cli = cli;
mmap_write_unlock(current->mm);
mutex_unlock(&cli->mutex);
return 0;
out_mm_unlock:
mmap_write_unlock(current->mm);
out_free:
mutex_unlock(&cli->mutex);
kfree(svmm);
return ret;
}
/* Issue fault replay for GPU to retry accesses that faulted previously. */
static void
nouveau_svm_fault_replay(struct nouveau_svm *svm)
{
SVM_DBG(svm, "replay");
WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
GP100_VMM_VN_FAULT_REPLAY,
&(struct gp100_vmm_fault_replay_vn) {},
sizeof(struct gp100_vmm_fault_replay_vn)));
}
/* Cancel a replayable fault that could not be handled.
*
* Cancelling the fault will trigger recovery to reset the engine
* and kill the offending channel (ie. GPU SIGSEGV).
*/
static void
nouveau_svm_fault_cancel(struct nouveau_svm *svm,
u64 inst, u8 hub, u8 gpc, u8 client)
{
SVM_DBG(svm, "cancel %016llx %d %02x %02x", inst, hub, gpc, client);
WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
GP100_VMM_VN_FAULT_CANCEL,
&(struct gp100_vmm_fault_cancel_v0) {
.hub = hub,
.gpc = gpc,
.client = client,
.inst = inst,
}, sizeof(struct gp100_vmm_fault_cancel_v0)));
}
static void
nouveau_svm_fault_cancel_fault(struct nouveau_svm *svm,
struct nouveau_svm_fault *fault)
{
nouveau_svm_fault_cancel(svm, fault->inst,
fault->hub,
fault->gpc,
fault->client);
}
static int
nouveau_svm_fault_cmp(const void *a, const void *b)
{
const struct nouveau_svm_fault *fa = *(struct nouveau_svm_fault **)a;
const struct nouveau_svm_fault *fb = *(struct nouveau_svm_fault **)b;
int ret;
if ((ret = (s64)fa->inst - fb->inst))
return ret;
if ((ret = (s64)fa->addr - fb->addr))
return ret;
/*XXX: atomic? */
return (fa->access == 0 || fa->access == 3) -
(fb->access == 0 || fb->access == 3);
}
static void
nouveau_svm_fault_cache(struct nouveau_svm *svm,
struct nouveau_svm_fault_buffer *buffer, u32 offset)
{
struct nvif_object *memory = &buffer->object;
const u32 instlo = nvif_rd32(memory, offset + 0x00);
const u32 insthi = nvif_rd32(memory, offset + 0x04);
const u32 addrlo = nvif_rd32(memory, offset + 0x08);
const u32 addrhi = nvif_rd32(memory, offset + 0x0c);
const u32 timelo = nvif_rd32(memory, offset + 0x10);
const u32 timehi = nvif_rd32(memory, offset + 0x14);
const u32 engine = nvif_rd32(memory, offset + 0x18);
const u32 info = nvif_rd32(memory, offset + 0x1c);
const u64 inst = (u64)insthi << 32 | instlo;
const u8 gpc = (info & 0x1f000000) >> 24;
const u8 hub = (info & 0x00100000) >> 20;
const u8 client = (info & 0x00007f00) >> 8;
struct nouveau_svm_fault *fault;
//XXX: i think we're supposed to spin waiting */
if (WARN_ON(!(info & 0x80000000)))
return;
nvif_mask(memory, offset + 0x1c, 0x80000000, 0x00000000);
if (!buffer->fault[buffer->fault_nr]) {
fault = kmalloc(sizeof(*fault), GFP_KERNEL);
if (WARN_ON(!fault)) {
nouveau_svm_fault_cancel(svm, inst, hub, gpc, client);
return;
}
buffer->fault[buffer->fault_nr] = fault;
}
fault = buffer->fault[buffer->fault_nr++];
fault->inst = inst;
fault->addr = (u64)addrhi << 32 | addrlo;
fault->time = (u64)timehi << 32 | timelo;
fault->engine = engine;
fault->gpc = gpc;
fault->hub = hub;
fault->access = (info & 0x000f0000) >> 16;
fault->client = client;
fault->fault = (info & 0x0000001f);
SVM_DBG(svm, "fault %016llx %016llx %02x",
fault->inst, fault->addr, fault->access);
}
struct svm_notifier {
struct mmu_interval_notifier notifier;
struct nouveau_svmm *svmm;
};
static bool nouveau_svm_range_invalidate(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq)
{
struct svm_notifier *sn =
container_of(mni, struct svm_notifier, notifier);
/*
* serializes the update to mni->invalidate_seq done by caller and
* prevents invalidation of the PTE from progressing while HW is being
* programmed. This is very hacky and only works because the normal
* notifier that does invalidation is always called after the range
* notifier.
*/
if (mmu_notifier_range_blockable(range))
mutex_lock(&sn->svmm->mutex);
else if (!mutex_trylock(&sn->svmm->mutex))
return false;
mmu_interval_set_seq(mni, cur_seq);
mutex_unlock(&sn->svmm->mutex);
return true;
}
static const struct mmu_interval_notifier_ops nouveau_svm_mni_ops = {
.invalidate = nouveau_svm_range_invalidate,
};
static void nouveau_hmm_convert_pfn(struct nouveau_drm *drm,
struct hmm_range *range,
struct nouveau_pfnmap_args *args)
{
struct page *page;
/*
* The address prepared here is passed through nvif_object_ioctl()
* to an eventual DMA map in something like gp100_vmm_pgt_pfn()
*
* This is all just encoding the internal hmm representation into a
* different nouveau internal representation.
*/
if (!(range->hmm_pfns[0] & HMM_PFN_VALID)) {
args->p.phys[0] = 0;
return;
}
page = hmm_pfn_to_page(range->hmm_pfns[0]);
/*
* Only map compound pages to the GPU if the CPU is also mapping the
* page as a compound page. Otherwise, the PTE protections might not be
* consistent (e.g., CPU only maps part of a compound page).
* Note that the underlying page might still be larger than the
* CPU mapping (e.g., a PUD sized compound page partially mapped with
* a PMD sized page table entry).
*/
if (hmm_pfn_to_map_order(range->hmm_pfns[0])) {
unsigned long addr = args->p.addr;
args->p.page = hmm_pfn_to_map_order(range->hmm_pfns[0]) +
PAGE_SHIFT;
args->p.size = 1UL << args->p.page;
args->p.addr &= ~(args->p.size - 1);
page -= (addr - args->p.addr) >> PAGE_SHIFT;
}
if (is_device_private_page(page))
args->p.phys[0] = nouveau_dmem_page_addr(page) |
NVIF_VMM_PFNMAP_V0_V |
NVIF_VMM_PFNMAP_V0_VRAM;
else
args->p.phys[0] = page_to_phys(page) |
NVIF_VMM_PFNMAP_V0_V |
NVIF_VMM_PFNMAP_V0_HOST;
if (range->hmm_pfns[0] & HMM_PFN_WRITE)
args->p.phys[0] |= NVIF_VMM_PFNMAP_V0_W;
}
static int nouveau_range_fault(struct nouveau_svmm *svmm,
struct nouveau_drm *drm,
struct nouveau_pfnmap_args *args, u32 size,
unsigned long hmm_flags,
struct svm_notifier *notifier)
{
unsigned long timeout =
jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
/* Have HMM fault pages within the fault window to the GPU. */
unsigned long hmm_pfns[1];
struct hmm_range range = {
.notifier = &notifier->notifier,
.start = notifier->notifier.interval_tree.start,
.end = notifier->notifier.interval_tree.last + 1,
.default_flags = hmm_flags,
.hmm_pfns = hmm_pfns,
.dev_private_owner = drm->dev,
};
struct mm_struct *mm = notifier->notifier.mm;
int ret;
while (true) {
if (time_after(jiffies, timeout))
return -EBUSY;
range.notifier_seq = mmu_interval_read_begin(range.notifier);
mmap_read_lock(mm);
ret = hmm_range_fault(&range);
mmap_read_unlock(mm);
if (ret) {
if (ret == -EBUSY)
continue;
return ret;
}
mutex_lock(&svmm->mutex);
if (mmu_interval_read_retry(range.notifier,
range.notifier_seq)) {
mutex_unlock(&svmm->mutex);
continue;
}
break;
}
nouveau_hmm_convert_pfn(drm, &range, args);
svmm->vmm->vmm.object.client->super = true;
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
svmm->vmm->vmm.object.client->super = false;
mutex_unlock(&svmm->mutex);
return ret;
}
static int
nouveau_svm_fault(struct nvif_notify *notify)
{
struct nouveau_svm_fault_buffer *buffer =
container_of(notify, typeof(*buffer), notify);
struct nouveau_svm *svm =
container_of(buffer, typeof(*svm), buffer[buffer->id]);
struct nvif_object *device = &svm->drm->client.device.object;
struct nouveau_svmm *svmm;
struct {
struct nouveau_pfnmap_args i;
u64 phys[1];
} args;
unsigned long hmm_flags;
u64 inst, start, limit;
int fi, fn;
int replay = 0, ret;
/* Parse available fault buffer entries into a cache, and update
* the GET pointer so HW can reuse the entries.
*/
SVM_DBG(svm, "fault handler");
if (buffer->get == buffer->put) {
buffer->put = nvif_rd32(device, buffer->putaddr);
buffer->get = nvif_rd32(device, buffer->getaddr);
if (buffer->get == buffer->put)
return NVIF_NOTIFY_KEEP;
}
buffer->fault_nr = 0;
SVM_DBG(svm, "get %08x put %08x", buffer->get, buffer->put);
while (buffer->get != buffer->put) {
nouveau_svm_fault_cache(svm, buffer, buffer->get * 0x20);
if (++buffer->get == buffer->entries)
buffer->get = 0;
}
nvif_wr32(device, buffer->getaddr, buffer->get);
SVM_DBG(svm, "%d fault(s) pending", buffer->fault_nr);
/* Sort parsed faults by instance pointer to prevent unnecessary
* instance to SVMM translations, followed by address and access
* type to reduce the amount of work when handling the faults.
*/
sort(buffer->fault, buffer->fault_nr, sizeof(*buffer->fault),
nouveau_svm_fault_cmp, NULL);
/* Lookup SVMM structure for each unique instance pointer. */
mutex_lock(&svm->mutex);
for (fi = 0, svmm = NULL; fi < buffer->fault_nr; fi++) {
if (!svmm || buffer->fault[fi]->inst != inst) {
struct nouveau_ivmm *ivmm =
nouveau_ivmm_find(svm, buffer->fault[fi]->inst);
svmm = ivmm ? ivmm->svmm : NULL;
inst = buffer->fault[fi]->inst;
SVM_DBG(svm, "inst %016llx -> svm-%p", inst, svmm);
}
buffer->fault[fi]->svmm = svmm;
}
mutex_unlock(&svm->mutex);
/* Process list of faults. */
args.i.i.version = 0;
args.i.i.type = NVIF_IOCTL_V0_MTHD;
args.i.m.version = 0;
args.i.m.method = NVIF_VMM_V0_PFNMAP;
args.i.p.version = 0;
for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
struct svm_notifier notifier;
struct mm_struct *mm;
/* Cancel any faults from non-SVM channels. */
if (!(svmm = buffer->fault[fi]->svmm)) {
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
continue;
}
SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
/* We try and group handling of faults within a small
* window into a single update.
*/
start = buffer->fault[fi]->addr;
limit = start + PAGE_SIZE;
if (start < svmm->unmanaged.limit)
limit = min_t(u64, limit, svmm->unmanaged.start);
/*
* Prepare the GPU-side update of all pages within the
* fault window, determining required pages and access
* permissions based on pending faults.
*/
args.i.p.addr = start;
args.i.p.page = PAGE_SHIFT;
args.i.p.size = PAGE_SIZE;
/*
* Determine required permissions based on GPU fault
* access flags.
* XXX: atomic?
*/
switch (buffer->fault[fi]->access) {
case 0: /* READ. */
hmm_flags = HMM_PFN_REQ_FAULT;
break;
case 3: /* PREFETCH. */
hmm_flags = 0;
break;
default:
hmm_flags = HMM_PFN_REQ_FAULT | HMM_PFN_REQ_WRITE;
break;
}
mm = svmm->notifier.mm;
if (!mmget_not_zero(mm)) {
nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
continue;
}
notifier.svmm = svmm;
ret = mmu_interval_notifier_insert(&notifier.notifier, mm,
args.i.p.addr, args.i.p.size,
&nouveau_svm_mni_ops);
if (!ret) {
ret = nouveau_range_fault(svmm, svm->drm, &args.i,
sizeof(args), hmm_flags, &notifier);
mmu_interval_notifier_remove(&notifier.notifier);
}
mmput(mm);
limit = args.i.p.addr + args.i.p.size;
for (fn = fi; ++fn < buffer->fault_nr; ) {
/* It's okay to skip over duplicate addresses from the
* same SVMM as faults are ordered by access type such
* that only the first one needs to be handled.
*
* ie. WRITE faults appear first, thus any handling of
* pending READ faults will already be satisfied.
* But if a large page is mapped, make sure subsequent
* fault addresses have sufficient access permission.
*/
if (buffer->fault[fn]->svmm != svmm ||
buffer->fault[fn]->addr >= limit ||
(buffer->fault[fi]->access == 0 /* READ. */ &&
!(args.phys[0] & NVIF_VMM_PFNMAP_V0_V)) ||
(buffer->fault[fi]->access != 0 /* READ. */ &&
buffer->fault[fi]->access != 3 /* PREFETCH. */ &&
!(args.phys[0] & NVIF_VMM_PFNMAP_V0_W)))
break;
}
/* If handling failed completely, cancel all faults. */
if (ret) {
while (fi < fn) {
struct nouveau_svm_fault *fault =
buffer->fault[fi++];
nouveau_svm_fault_cancel_fault(svm, fault);
}
} else
replay++;
}
/* Issue fault replay to the GPU. */
if (replay)
nouveau_svm_fault_replay(svm);
return NVIF_NOTIFY_KEEP;
}
static struct nouveau_pfnmap_args *
nouveau_pfns_to_args(void *pfns)
{
return container_of(pfns, struct nouveau_pfnmap_args, p.phys);
}
u64 *
nouveau_pfns_alloc(unsigned long npages)
{
struct nouveau_pfnmap_args *args;
args = kzalloc(struct_size(args, p.phys, npages), GFP_KERNEL);
if (!args)
return NULL;
args->i.type = NVIF_IOCTL_V0_MTHD;
args->m.method = NVIF_VMM_V0_PFNMAP;
args->p.page = PAGE_SHIFT;
return args->p.phys;
}
void
nouveau_pfns_free(u64 *pfns)
{
struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
kfree(args);
}
void
nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
unsigned long addr, u64 *pfns, unsigned long npages)
{
struct nouveau_pfnmap_args *args = nouveau_pfns_to_args(pfns);
int ret;
args->p.addr = addr;
args->p.size = npages << PAGE_SHIFT;
mutex_lock(&svmm->mutex);
svmm->vmm->vmm.object.client->super = true;
ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, sizeof(*args) +
npages * sizeof(args->p.phys[0]), NULL);
svmm->vmm->vmm.object.client->super = false;
mutex_unlock(&svmm->mutex);
}
static void
nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id)
{
struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
nvif_notify_put(&buffer->notify);
}
static int
nouveau_svm_fault_buffer_init(struct nouveau_svm *svm, int id)
{
struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
struct nvif_object *device = &svm->drm->client.device.object;
buffer->get = nvif_rd32(device, buffer->getaddr);
buffer->put = nvif_rd32(device, buffer->putaddr);
SVM_DBG(svm, "get %08x put %08x (init)", buffer->get, buffer->put);
return nvif_notify_get(&buffer->notify);
}
static void
nouveau_svm_fault_buffer_dtor(struct nouveau_svm *svm, int id)
{
struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
int i;
if (buffer->fault) {
for (i = 0; buffer->fault[i] && i < buffer->entries; i++)
kfree(buffer->fault[i]);
kvfree(buffer->fault);
}
nouveau_svm_fault_buffer_fini(svm, id);
nvif_notify_dtor(&buffer->notify);
nvif_object_dtor(&buffer->object);
}
static int
nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
{
struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
struct nouveau_drm *drm = svm->drm;
struct nvif_object *device = &drm->client.device.object;
struct nvif_clb069_v0 args = {};
int ret;
buffer->id = id;
ret = nvif_object_ctor(device, "svmFaultBuffer", 0, oclass, &args,
sizeof(args), &buffer->object);
if (ret < 0) {
SVM_ERR(svm, "Fault buffer allocation failed: %d", ret);
return ret;
}
nvif_object_map(&buffer->object, NULL, 0);
buffer->entries = args.entries;
buffer->getaddr = args.get;
buffer->putaddr = args.put;
ret = nvif_notify_ctor(&buffer->object, "svmFault", nouveau_svm_fault,
true, NVB069_V0_NTFY_FAULT, NULL, 0, 0,
&buffer->notify);
if (ret)
return ret;
buffer->fault = kvzalloc(sizeof(*buffer->fault) * buffer->entries, GFP_KERNEL);
if (!buffer->fault)
return -ENOMEM;
return nouveau_svm_fault_buffer_init(svm, id);
}
void
nouveau_svm_resume(struct nouveau_drm *drm)
{
struct nouveau_svm *svm = drm->svm;
if (svm)
nouveau_svm_fault_buffer_init(svm, 0);
}
void
nouveau_svm_suspend(struct nouveau_drm *drm)
{
struct nouveau_svm *svm = drm->svm;
if (svm)
nouveau_svm_fault_buffer_fini(svm, 0);
}
void
nouveau_svm_fini(struct nouveau_drm *drm)
{
struct nouveau_svm *svm = drm->svm;
if (svm) {
nouveau_svm_fault_buffer_dtor(svm, 0);
kfree(drm->svm);
drm->svm = NULL;
}
}
void
nouveau_svm_init(struct nouveau_drm *drm)
{
static const struct nvif_mclass buffers[] = {
{ VOLTA_FAULT_BUFFER_A, 0 },
{ MAXWELL_FAULT_BUFFER_A, 0 },
{}
};
struct nouveau_svm *svm;
int ret;
/* Disable on Volta and newer until channel recovery is fixed,
* otherwise clients will have a trivial way to trash the GPU
* for everyone.
*/
if (drm->client.device.info.family > NV_DEVICE_INFO_V0_PASCAL)
return;
if (!(drm->svm = svm = kzalloc(sizeof(*drm->svm), GFP_KERNEL)))
return;
drm->svm->drm = drm;
mutex_init(&drm->svm->mutex);
INIT_LIST_HEAD(&drm->svm->inst);
ret = nvif_mclass(&drm->client.device.object, buffers);
if (ret < 0) {
SVM_DBG(svm, "No supported fault buffer class");
nouveau_svm_fini(drm);
return;
}
ret = nouveau_svm_fault_buffer_ctor(svm, buffers[ret].oclass, 0);
if (ret) {
nouveau_svm_fini(drm);
return;
}
SVM_DBG(svm, "Initialised");
}