forked from Minki/linux
a50ecc54ff
The dead circular lock senario captured is as followed. The idea of the fix is moving read_user_wptr outside of acquire_queue...release_queue critical section [ 63.477482] WARNING: possible circular locking dependency detected [ 63.484091] 4.12.0-kfd-ozeng #3 Not tainted [ 63.488531] ------------------------------------------------------ [ 63.495146] HelloWorldLoop/2526 is trying to acquire lock: [ 63.501011] (&mm->mmap_sem){++++++}, at: [<ffffffff911898ce>] __might_fault+0x3e/0x90 [ 63.509472] but task is already holding lock: [ 63.515716] (&adev->srbm_mutex){+.+...}, at: [<ffffffffc0484feb>] lock_srbm+0x2b/0x50 [amdgpu] [ 63.525099] which lock already depends on the new lock. [ 63.533841] the existing dependency chain (in reverse order) is: [ 63.541839] -> #2 (&adev->srbm_mutex){+.+...}: [ 63.548178] lock_acquire+0x6d/0x90 [ 63.552461] __mutex_lock+0x70/0x8c0 [ 63.556826] mutex_lock_nested+0x16/0x20 [ 63.561603] gfx_v8_0_kiq_resume+0x1039/0x14a0 [amdgpu] [ 63.567817] gfx_v8_0_hw_init+0x204d/0x2210 [amdgpu] [ 63.573675] amdgpu_device_init+0xdea/0x1790 [amdgpu] [ 63.579640] amdgpu_driver_load_kms+0x63/0x220 [amdgpu] [ 63.585743] drm_dev_register+0x145/0x1e0 [ 63.590605] amdgpu_pci_probe+0x11e/0x160 [amdgpu] [ 63.596266] local_pci_probe+0x40/0xa0 [ 63.600803] pci_device_probe+0x134/0x150 [ 63.605650] driver_probe_device+0x2a1/0x460 [ 63.610785] __driver_attach+0xdc/0xe0 [ 63.615321] bus_for_each_dev+0x5f/0x90 [ 63.619984] driver_attach+0x19/0x20 [ 63.624337] bus_add_driver+0x40/0x270 [ 63.628908] driver_register+0x5b/0xe0 [ 63.633446] __pci_register_driver+0x5b/0x60 [ 63.638586] rtsx_pci_switch_output_voltage+0x1d/0x20 [rtsx_pci] [ 63.645564] do_one_initcall+0x4c/0x1b0 [ 63.650205] do_init_module+0x56/0x1ea [ 63.654767] load_module+0x208c/0x27d0 [ 63.659335] SYSC_finit_module+0x96/0xd0 [ 63.664058] SyS_finit_module+0x9/0x10 [ 63.668629] entry_SYSCALL_64_fastpath+0x1f/0xbe [ 63.674088] -> #1 (reservation_ww_class_mutex){+.+.+.}: [ 63.681257] lock_acquire+0x6d/0x90 [ 63.685551] __ww_mutex_lock.constprop.11+0x8c/0xed0 [ 63.691426] ww_mutex_lock+0x67/0x70 [ 63.695802] amdgpu_verify_access+0x6d/0x100 [amdgpu] [ 63.701743] ttm_bo_mmap+0x8e/0x100 [ttm] [ 63.706615] amdgpu_bo_mmap+0xd/0x60 [amdgpu] [ 63.711814] amdgpu_mmap+0x35/0x40 [amdgpu] [ 63.716904] mmap_region+0x3b5/0x5a0 [ 63.721255] do_mmap+0x400/0x4d0 [ 63.725260] vm_mmap_pgoff+0xb0/0xf0 [ 63.729625] SyS_mmap_pgoff+0x19e/0x260 [ 63.734292] SyS_mmap+0x1d/0x20 [ 63.738199] entry_SYSCALL_64_fastpath+0x1f/0xbe [ 63.743681] -> #0 (&mm->mmap_sem){++++++}: [ 63.749641] __lock_acquire+0x1401/0x1420 [ 63.754491] lock_acquire+0x6d/0x90 [ 63.758750] __might_fault+0x6b/0x90 [ 63.763176] kgd_hqd_load+0x24f/0x270 [amdgpu] [ 63.768432] load_mqd+0x4b/0x50 [amdkfd] [ 63.773192] create_queue_nocpsch+0x535/0x620 [amdkfd] [ 63.779237] pqm_create_queue+0x34d/0x4f0 [amdkfd] [ 63.784835] kfd_ioctl_create_queue+0x282/0x670 [amdkfd] [ 63.790973] kfd_ioctl+0x310/0x4d0 [amdkfd] [ 63.795944] do_vfs_ioctl+0x90/0x6e0 [ 63.800268] SyS_ioctl+0x74/0x80 [ 63.804207] entry_SYSCALL_64_fastpath+0x1f/0xbe [ 63.809607] other info that might help us debug this: [ 63.818026] Chain exists of: &mm->mmap_sem --> reservation_ww_class_mutex --> &adev->srbm_mutex [ 63.830382] Possible unsafe locking scenario: [ 63.836605] CPU0 CPU1 [ 63.841364] ---- ---- [ 63.846123] lock(&adev->srbm_mutex); [ 63.850061] lock(reservation_ww_class_mutex); [ 63.857475] lock(&adev->srbm_mutex); [ 63.864084] lock(&mm->mmap_sem); [ 63.867657] *** DEADLOCK *** [ 63.873884] 3 locks held by HelloWorldLoop/2526: [ 63.878739] #0: (&process->mutex){+.+.+.}, at: [<ffffffffc06e1a9a>] kfd_ioctl_create_queue+0x24a/0x670 [amdkfd] [ 63.889543] #1: (&dqm->lock){+.+...}, at: [<ffffffffc06eedeb>] create_queue_nocpsch+0x3b/0x620 [amdkfd] [ 63.899684] #2: (&adev->srbm_mutex){+.+...}, at: [<ffffffffc0484feb>] lock_srbm+0x2b/0x50 [amdgpu] [ 63.909500] stack backtrace: [ 63.914187] CPU: 3 PID: 2526 Comm: HelloWorldLoop Not tainted 4.12.0-kfd-ozeng #3 [ 63.922184] Hardware name: AMD Carrizo/Gardenia, BIOS WGA5819N_Weekly_15_08_1 08/19/2015 [ 63.930865] Call Trace: [ 63.933464] dump_stack+0x85/0xc9 [ 63.936999] print_circular_bug+0x1f9/0x207 [ 63.941442] __lock_acquire+0x1401/0x1420 [ 63.945745] ? lock_srbm+0x2b/0x50 [amdgpu] [ 63.950185] lock_acquire+0x6d/0x90 [ 63.953885] ? __might_fault+0x3e/0x90 [ 63.957899] __might_fault+0x6b/0x90 [ 63.961699] ? __might_fault+0x3e/0x90 [ 63.965755] kgd_hqd_load+0x24f/0x270 [amdgpu] [ 63.970577] load_mqd+0x4b/0x50 [amdkfd] [ 63.974745] create_queue_nocpsch+0x535/0x620 [amdkfd] [ 63.980242] pqm_create_queue+0x34d/0x4f0 [amdkfd] [ 63.985320] kfd_ioctl_create_queue+0x282/0x670 [amdkfd] [ 63.991021] kfd_ioctl+0x310/0x4d0 [amdkfd] [ 63.995499] ? kfd_ioctl_destroy_queue+0x70/0x70 [amdkfd] [ 64.001234] do_vfs_ioctl+0x90/0x6e0 [ 64.005065] ? up_read+0x1a/0x40 [ 64.008496] SyS_ioctl+0x74/0x80 [ 64.011955] entry_SYSCALL_64_fastpath+0x1f/0xbe [ 64.016863] RIP: 0033:0x7f4b3bd35f07 [ 64.020696] RSP: 002b:00007ffe7689ec38 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 [ 64.028786] RAX: ffffffffffffffda RBX: 00000000002a2000 RCX: 00007f4b3bd35f07 [ 64.036414] RDX: 00007ffe7689ecb0 RSI: 00000000c0584b02 RDI: 0000000000000005 [ 64.044045] RBP: 00007f4a3212d000 R08: 00007f4b3c919000 R09: 0000000000080000 [ 64.051674] R10: 00007f4b376b64b8 R11: 0000000000000246 R12: 00007f4a3212d000 [ 64.059324] R13: 0000000000000015 R14: 0000000000000064 R15: 00007ffe7689ef50 Signed-off-by: Oak Zeng <Oak.Zeng@amd.com> Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
683 lines
19 KiB
C
683 lines
19 KiB
C
/*
|
|
* Copyright 2014 Advanced Micro Devices, Inc.
|
|
*
|
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
* copy of this software and associated documentation files (the "Software"),
|
|
* to deal in the Software without restriction, including without limitation
|
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
* and/or sell copies of the Software, and to permit persons to whom the
|
|
* Software is furnished to do so, subject to the following conditions:
|
|
*
|
|
* The above copyright notice and this permission notice shall be included in
|
|
* all copies or substantial portions of the Software.
|
|
*
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
* OTHER DEALINGS IN THE SOFTWARE.
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/fdtable.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/firmware.h>
|
|
#include <drm/drmP.h>
|
|
#include "amdgpu.h"
|
|
#include "amdgpu_amdkfd.h"
|
|
#include "amdgpu_ucode.h"
|
|
#include "gfx_v8_0.h"
|
|
#include "gca/gfx_8_0_sh_mask.h"
|
|
#include "gca/gfx_8_0_d.h"
|
|
#include "gca/gfx_8_0_enum.h"
|
|
#include "oss/oss_3_0_sh_mask.h"
|
|
#include "oss/oss_3_0_d.h"
|
|
#include "gmc/gmc_8_1_sh_mask.h"
|
|
#include "gmc/gmc_8_1_d.h"
|
|
#include "vi_structs.h"
|
|
#include "vid.h"
|
|
|
|
enum hqd_dequeue_request_type {
|
|
NO_ACTION = 0,
|
|
DRAIN_PIPE,
|
|
RESET_WAVES
|
|
};
|
|
|
|
struct cik_sdma_rlc_registers;
|
|
|
|
/*
|
|
* Register access functions
|
|
*/
|
|
|
|
static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
|
|
uint32_t sh_mem_config,
|
|
uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit,
|
|
uint32_t sh_mem_bases);
|
|
static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
|
|
unsigned int vmid);
|
|
static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
|
|
uint32_t hpd_size, uint64_t hpd_gpu_addr);
|
|
static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
|
|
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
|
|
uint32_t queue_id, uint32_t __user *wptr,
|
|
uint32_t wptr_shift, uint32_t wptr_mask,
|
|
struct mm_struct *mm);
|
|
static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
|
|
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
|
|
uint32_t pipe_id, uint32_t queue_id);
|
|
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
|
|
static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
|
|
enum kfd_preempt_type reset_type,
|
|
unsigned int utimeout, uint32_t pipe_id,
|
|
uint32_t queue_id);
|
|
static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
|
|
unsigned int utimeout);
|
|
static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
|
|
static int kgd_address_watch_disable(struct kgd_dev *kgd);
|
|
static int kgd_address_watch_execute(struct kgd_dev *kgd,
|
|
unsigned int watch_point_id,
|
|
uint32_t cntl_val,
|
|
uint32_t addr_hi,
|
|
uint32_t addr_lo);
|
|
static int kgd_wave_control_execute(struct kgd_dev *kgd,
|
|
uint32_t gfx_index_val,
|
|
uint32_t sq_cmd);
|
|
static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
|
|
unsigned int watch_point_id,
|
|
unsigned int reg_offset);
|
|
|
|
static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
|
|
uint8_t vmid);
|
|
static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
|
|
uint8_t vmid);
|
|
static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
|
|
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
|
|
static void set_scratch_backing_va(struct kgd_dev *kgd,
|
|
uint64_t va, uint32_t vmid);
|
|
|
|
/* Because of REG_GET_FIELD() being used, we put this function in the
|
|
* asic specific file.
|
|
*/
|
|
static int get_tile_config(struct kgd_dev *kgd,
|
|
struct tile_config *config)
|
|
{
|
|
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
|
|
|
|
config->gb_addr_config = adev->gfx.config.gb_addr_config;
|
|
config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
|
|
MC_ARB_RAMCFG, NOOFBANK);
|
|
config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
|
|
MC_ARB_RAMCFG, NOOFRANKS);
|
|
|
|
config->tile_config_ptr = adev->gfx.config.tile_mode_array;
|
|
config->num_tile_configs =
|
|
ARRAY_SIZE(adev->gfx.config.tile_mode_array);
|
|
config->macro_tile_config_ptr =
|
|
adev->gfx.config.macrotile_mode_array;
|
|
config->num_macro_tile_configs =
|
|
ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static const struct kfd2kgd_calls kfd2kgd = {
|
|
.init_gtt_mem_allocation = alloc_gtt_mem,
|
|
.free_gtt_mem = free_gtt_mem,
|
|
.get_vmem_size = get_vmem_size,
|
|
.get_gpu_clock_counter = get_gpu_clock_counter,
|
|
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
|
|
.alloc_pasid = amdgpu_vm_alloc_pasid,
|
|
.free_pasid = amdgpu_vm_free_pasid,
|
|
.program_sh_mem_settings = kgd_program_sh_mem_settings,
|
|
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
|
|
.init_pipeline = kgd_init_pipeline,
|
|
.init_interrupts = kgd_init_interrupts,
|
|
.hqd_load = kgd_hqd_load,
|
|
.hqd_sdma_load = kgd_hqd_sdma_load,
|
|
.hqd_is_occupied = kgd_hqd_is_occupied,
|
|
.hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
|
|
.hqd_destroy = kgd_hqd_destroy,
|
|
.hqd_sdma_destroy = kgd_hqd_sdma_destroy,
|
|
.address_watch_disable = kgd_address_watch_disable,
|
|
.address_watch_execute = kgd_address_watch_execute,
|
|
.wave_control_execute = kgd_wave_control_execute,
|
|
.address_watch_get_offset = kgd_address_watch_get_offset,
|
|
.get_atc_vmid_pasid_mapping_pasid =
|
|
get_atc_vmid_pasid_mapping_pasid,
|
|
.get_atc_vmid_pasid_mapping_valid =
|
|
get_atc_vmid_pasid_mapping_valid,
|
|
.write_vmid_invalidate_request = write_vmid_invalidate_request,
|
|
.get_fw_version = get_fw_version,
|
|
.set_scratch_backing_va = set_scratch_backing_va,
|
|
.get_tile_config = get_tile_config,
|
|
};
|
|
|
|
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
|
|
{
|
|
return (struct kfd2kgd_calls *)&kfd2kgd;
|
|
}
|
|
|
|
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
|
|
{
|
|
return (struct amdgpu_device *)kgd;
|
|
}
|
|
|
|
static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
|
|
uint32_t queue, uint32_t vmid)
|
|
{
|
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
|
uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
|
|
|
|
mutex_lock(&adev->srbm_mutex);
|
|
WREG32(mmSRBM_GFX_CNTL, value);
|
|
}
|
|
|
|
static void unlock_srbm(struct kgd_dev *kgd)
|
|
{
|
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
|
|
|
WREG32(mmSRBM_GFX_CNTL, 0);
|
|
mutex_unlock(&adev->srbm_mutex);
|
|
}
|
|
|
|
static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
|
|
uint32_t queue_id)
|
|
{
|
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
|
|
|
uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
|
|
uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
|
|
|
|
lock_srbm(kgd, mec, pipe, queue_id, 0);
|
|
}
|
|
|
|
static void release_queue(struct kgd_dev *kgd)
|
|
{
|
|
unlock_srbm(kgd);
|
|
}
|
|
|
|
static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
|
|
uint32_t sh_mem_config,
|
|
uint32_t sh_mem_ape1_base,
|
|
uint32_t sh_mem_ape1_limit,
|
|
uint32_t sh_mem_bases)
|
|
{
|
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
|
|
|
lock_srbm(kgd, 0, 0, 0, vmid);
|
|
|
|
WREG32(mmSH_MEM_CONFIG, sh_mem_config);
|
|
WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
|
|
WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
|
|
WREG32(mmSH_MEM_BASES, sh_mem_bases);
|
|
|
|
unlock_srbm(kgd);
|
|
}
|
|
|
|
static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
|
|
unsigned int vmid)
|
|
{
|
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
|
|
|
/*
|
|
* We have to assume that there is no outstanding mapping.
|
|
* The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
|
|
* a mapping is in progress or because a mapping finished
|
|
* and the SW cleared it.
|
|
* So the protocol is to always wait & clear.
|
|
*/
|
|
uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
|
|
ATC_VMID0_PASID_MAPPING__VALID_MASK;
|
|
|
|
WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
|
|
|
|
while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
|
|
cpu_relax();
|
|
WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
|
|
|
|
/* Mapping vmid to pasid also for IH block */
|
|
WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
|
|
uint32_t hpd_size, uint64_t hpd_gpu_addr)
|
|
{
|
|
/* amdgpu owns the per-pipe state */
|
|
return 0;
|
|
}
|
|
|
|
static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
|
|
{
|
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
|
uint32_t mec;
|
|
uint32_t pipe;
|
|
|
|
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
|
|
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
|
|
|
|
lock_srbm(kgd, mec, pipe, 0, 0);
|
|
|
|
WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK);
|
|
|
|
unlock_srbm(kgd);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline struct vi_mqd *get_mqd(void *mqd)
|
|
{
|
|
return (struct vi_mqd *)mqd;
|
|
}
|
|
|
|
static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
|
|
{
|
|
return (struct cik_sdma_rlc_registers *)mqd;
|
|
}
|
|
|
|
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
|
|
uint32_t queue_id, uint32_t __user *wptr,
|
|
uint32_t wptr_shift, uint32_t wptr_mask,
|
|
struct mm_struct *mm)
|
|
{
|
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
|
struct vi_mqd *m;
|
|
uint32_t *mqd_hqd;
|
|
uint32_t reg, wptr_val, data;
|
|
bool valid_wptr = false;
|
|
|
|
m = get_mqd(mqd);
|
|
|
|
acquire_queue(kgd, pipe_id, queue_id);
|
|
|
|
/* HIQ is set during driver init period with vmid set to 0*/
|
|
if (m->cp_hqd_vmid == 0) {
|
|
uint32_t value, mec, pipe;
|
|
|
|
mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
|
|
pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
|
|
|
|
pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n",
|
|
mec, pipe, queue_id);
|
|
value = RREG32(mmRLC_CP_SCHEDULERS);
|
|
value = REG_SET_FIELD(value, RLC_CP_SCHEDULERS, scheduler1,
|
|
((mec << 5) | (pipe << 3) | queue_id | 0x80));
|
|
WREG32(mmRLC_CP_SCHEDULERS, value);
|
|
}
|
|
|
|
/* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */
|
|
mqd_hqd = &m->cp_mqd_base_addr_lo;
|
|
|
|
for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_HQD_EOP_CONTROL; reg++)
|
|
WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
|
|
|
|
/* Tonga errata: EOP RPTR/WPTR should be left unmodified.
|
|
* This is safe since EOP RPTR==WPTR for any inactive HQD
|
|
* on ASICs that do not support context-save.
|
|
* EOP writes/reads can start anywhere in the ring.
|
|
*/
|
|
if (get_amdgpu_device(kgd)->asic_type != CHIP_TONGA) {
|
|
WREG32(mmCP_HQD_EOP_RPTR, m->cp_hqd_eop_rptr);
|
|
WREG32(mmCP_HQD_EOP_WPTR, m->cp_hqd_eop_wptr);
|
|
WREG32(mmCP_HQD_EOP_WPTR_MEM, m->cp_hqd_eop_wptr_mem);
|
|
}
|
|
|
|
for (reg = mmCP_HQD_EOP_EVENTS; reg <= mmCP_HQD_ERROR; reg++)
|
|
WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
|
|
|
|
/* Copy userspace write pointer value to register.
|
|
* Activate doorbell logic to monitor subsequent changes.
|
|
*/
|
|
data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
|
|
CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
|
|
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
|
|
|
|
/* read_user_ptr may take the mm->mmap_sem.
|
|
* release srbm_mutex to avoid circular dependency between
|
|
* srbm_mutex->mm_sem->reservation_ww_class_mutex->srbm_mutex.
|
|
*/
|
|
release_queue(kgd);
|
|
valid_wptr = read_user_wptr(mm, wptr, wptr_val);
|
|
acquire_queue(kgd, pipe_id, queue_id);
|
|
if (valid_wptr)
|
|
WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
|
|
|
|
data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
|
|
WREG32(mmCP_HQD_ACTIVE, data);
|
|
|
|
release_queue(kgd);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
|
|
uint32_t pipe_id, uint32_t queue_id)
|
|
{
|
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
|
uint32_t act;
|
|
bool retval = false;
|
|
uint32_t low, high;
|
|
|
|
acquire_queue(kgd, pipe_id, queue_id);
|
|
act = RREG32(mmCP_HQD_ACTIVE);
|
|
if (act) {
|
|
low = lower_32_bits(queue_address >> 8);
|
|
high = upper_32_bits(queue_address >> 8);
|
|
|
|
if (low == RREG32(mmCP_HQD_PQ_BASE) &&
|
|
high == RREG32(mmCP_HQD_PQ_BASE_HI))
|
|
retval = true;
|
|
}
|
|
release_queue(kgd);
|
|
return retval;
|
|
}
|
|
|
|
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
|
|
{
|
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
|
struct cik_sdma_rlc_registers *m;
|
|
uint32_t sdma_base_addr;
|
|
uint32_t sdma_rlc_rb_cntl;
|
|
|
|
m = get_sdma_mqd(mqd);
|
|
sdma_base_addr = get_sdma_base_addr(m);
|
|
|
|
sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
|
|
|
|
if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
|
|
return true;
|
|
|
|
return false;
|
|
}
|
|
|
|
static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
|
|
enum kfd_preempt_type reset_type,
|
|
unsigned int utimeout, uint32_t pipe_id,
|
|
uint32_t queue_id)
|
|
{
|
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
|
uint32_t temp;
|
|
enum hqd_dequeue_request_type type;
|
|
unsigned long flags, end_jiffies;
|
|
int retry;
|
|
struct vi_mqd *m = get_mqd(mqd);
|
|
|
|
acquire_queue(kgd, pipe_id, queue_id);
|
|
|
|
if (m->cp_hqd_vmid == 0)
|
|
WREG32_FIELD(RLC_CP_SCHEDULERS, scheduler1, 0);
|
|
|
|
switch (reset_type) {
|
|
case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
|
|
type = DRAIN_PIPE;
|
|
break;
|
|
case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
|
|
type = RESET_WAVES;
|
|
break;
|
|
default:
|
|
type = DRAIN_PIPE;
|
|
break;
|
|
}
|
|
|
|
/* Workaround: If IQ timer is active and the wait time is close to or
|
|
* equal to 0, dequeueing is not safe. Wait until either the wait time
|
|
* is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
|
|
* cleared before continuing. Also, ensure wait times are set to at
|
|
* least 0x3.
|
|
*/
|
|
local_irq_save(flags);
|
|
preempt_disable();
|
|
retry = 5000; /* wait for 500 usecs at maximum */
|
|
while (true) {
|
|
temp = RREG32(mmCP_HQD_IQ_TIMER);
|
|
if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
|
|
pr_debug("HW is processing IQ\n");
|
|
goto loop;
|
|
}
|
|
if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
|
|
if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
|
|
== 3) /* SEM-rearm is safe */
|
|
break;
|
|
/* Wait time 3 is safe for CP, but our MMIO read/write
|
|
* time is close to 1 microsecond, so check for 10 to
|
|
* leave more buffer room
|
|
*/
|
|
if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
|
|
>= 10)
|
|
break;
|
|
pr_debug("IQ timer is active\n");
|
|
} else
|
|
break;
|
|
loop:
|
|
if (!retry) {
|
|
pr_err("CP HQD IQ timer status time out\n");
|
|
break;
|
|
}
|
|
ndelay(100);
|
|
--retry;
|
|
}
|
|
retry = 1000;
|
|
while (true) {
|
|
temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
|
|
if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
|
|
break;
|
|
pr_debug("Dequeue request is pending\n");
|
|
|
|
if (!retry) {
|
|
pr_err("CP HQD dequeue request time out\n");
|
|
break;
|
|
}
|
|
ndelay(100);
|
|
--retry;
|
|
}
|
|
local_irq_restore(flags);
|
|
preempt_enable();
|
|
|
|
WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
|
|
|
|
end_jiffies = (utimeout * HZ / 1000) + jiffies;
|
|
while (true) {
|
|
temp = RREG32(mmCP_HQD_ACTIVE);
|
|
if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
|
|
break;
|
|
if (time_after(jiffies, end_jiffies)) {
|
|
pr_err("cp queue preemption time out.\n");
|
|
release_queue(kgd);
|
|
return -ETIME;
|
|
}
|
|
usleep_range(500, 1000);
|
|
}
|
|
|
|
release_queue(kgd);
|
|
return 0;
|
|
}
|
|
|
|
static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
|
|
unsigned int utimeout)
|
|
{
|
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
|
struct cik_sdma_rlc_registers *m;
|
|
uint32_t sdma_base_addr;
|
|
uint32_t temp;
|
|
int timeout = utimeout;
|
|
|
|
m = get_sdma_mqd(mqd);
|
|
sdma_base_addr = get_sdma_base_addr(m);
|
|
|
|
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
|
|
temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
|
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
|
|
|
|
while (true) {
|
|
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
|
|
if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
|
|
break;
|
|
if (timeout <= 0)
|
|
return -ETIME;
|
|
msleep(20);
|
|
timeout -= 20;
|
|
}
|
|
|
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
|
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
|
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
|
|
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
|
|
uint8_t vmid)
|
|
{
|
|
uint32_t reg;
|
|
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
|
|
|
reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
|
|
return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
|
|
}
|
|
|
|
static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
|
|
uint8_t vmid)
|
|
{
|
|
uint32_t reg;
|
|
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
|
|
|
reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
|
|
return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
|
|
}
|
|
|
|
static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
|
|
{
|
|
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
|
|
|
WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
|
|
}
|
|
|
|
static int kgd_address_watch_disable(struct kgd_dev *kgd)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static int kgd_address_watch_execute(struct kgd_dev *kgd,
|
|
unsigned int watch_point_id,
|
|
uint32_t cntl_val,
|
|
uint32_t addr_hi,
|
|
uint32_t addr_lo)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static int kgd_wave_control_execute(struct kgd_dev *kgd,
|
|
uint32_t gfx_index_val,
|
|
uint32_t sq_cmd)
|
|
{
|
|
struct amdgpu_device *adev = get_amdgpu_device(kgd);
|
|
uint32_t data = 0;
|
|
|
|
mutex_lock(&adev->grbm_idx_mutex);
|
|
|
|
WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
|
|
WREG32(mmSQ_CMD, sq_cmd);
|
|
|
|
data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
|
|
INSTANCE_BROADCAST_WRITES, 1);
|
|
data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
|
|
SH_BROADCAST_WRITES, 1);
|
|
data = REG_SET_FIELD(data, GRBM_GFX_INDEX,
|
|
SE_BROADCAST_WRITES, 1);
|
|
|
|
WREG32(mmGRBM_GFX_INDEX, data);
|
|
mutex_unlock(&adev->grbm_idx_mutex);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
|
|
unsigned int watch_point_id,
|
|
unsigned int reg_offset)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static void set_scratch_backing_va(struct kgd_dev *kgd,
|
|
uint64_t va, uint32_t vmid)
|
|
{
|
|
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
|
|
|
lock_srbm(kgd, 0, 0, 0, vmid);
|
|
WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
|
|
unlock_srbm(kgd);
|
|
}
|
|
|
|
static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
|
|
{
|
|
struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
|
|
const union amdgpu_firmware_header *hdr;
|
|
|
|
BUG_ON(kgd == NULL);
|
|
|
|
switch (type) {
|
|
case KGD_ENGINE_PFP:
|
|
hdr = (const union amdgpu_firmware_header *)
|
|
adev->gfx.pfp_fw->data;
|
|
break;
|
|
|
|
case KGD_ENGINE_ME:
|
|
hdr = (const union amdgpu_firmware_header *)
|
|
adev->gfx.me_fw->data;
|
|
break;
|
|
|
|
case KGD_ENGINE_CE:
|
|
hdr = (const union amdgpu_firmware_header *)
|
|
adev->gfx.ce_fw->data;
|
|
break;
|
|
|
|
case KGD_ENGINE_MEC1:
|
|
hdr = (const union amdgpu_firmware_header *)
|
|
adev->gfx.mec_fw->data;
|
|
break;
|
|
|
|
case KGD_ENGINE_MEC2:
|
|
hdr = (const union amdgpu_firmware_header *)
|
|
adev->gfx.mec2_fw->data;
|
|
break;
|
|
|
|
case KGD_ENGINE_RLC:
|
|
hdr = (const union amdgpu_firmware_header *)
|
|
adev->gfx.rlc_fw->data;
|
|
break;
|
|
|
|
case KGD_ENGINE_SDMA1:
|
|
hdr = (const union amdgpu_firmware_header *)
|
|
adev->sdma.instance[0].fw->data;
|
|
break;
|
|
|
|
case KGD_ENGINE_SDMA2:
|
|
hdr = (const union amdgpu_firmware_header *)
|
|
adev->sdma.instance[1].fw->data;
|
|
break;
|
|
|
|
default:
|
|
return 0;
|
|
}
|
|
|
|
if (hdr == NULL)
|
|
return 0;
|
|
|
|
/* Only 12 bit in use*/
|
|
return hdr->common.ucode_version;
|
|
}
|