mirror of
https://github.com/torvalds/linux.git
synced 2024-11-29 23:51:37 +00:00
Merge branch 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/linux into drm-next
First feature pull for 4.15. Highlights: - Per VM BO support - Lots of powerplay cleanups - Powerplay support for CI - pasid mgr for kfd - interrupt infrastructure for recoverable page faults - SR-IOV fixes - initial GPU reset for vega10 - prime mmap support - ttm page table debugging improvements - lots of bug fixes * 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/linux: (232 commits) drm/amdgpu: clarify license in amdgpu_trace_points.c drm/amdgpu: Add gem_prime_mmap support drm/amd/powerplay: delete dead code in smumgr drm/amd/powerplay: delete SMUM_FIELD_MASK drm/amd/powerplay: delete SMUM_WAIT_INDIRECT_FIELD drm/amd/powerplay: delete SMUM_READ_FIELD drm/amd/powerplay: delete SMUM_SET_FIELD drm/amd/powerplay: delete SMUM_READ_VFPF_INDIRECT_FIELD drm/amd/powerplay: delete SMUM_WRITE_VFPF_INDIRECT_FIELD drm/amd/powerplay: delete SMUM_WRITE_FIELD drm/amd/powerplay: delete SMU_WRITE_INDIRECT_FIELD drm/amd/powerplay: move macros to hwmgr.h drm/amd/powerplay: move PHM_WAIT_VFPF_INDIRECT_FIELD to hwmgr.h drm/amd/powerplay: move SMUM_WAIT_VFPF_INDIRECT_FIELD_UNEQUAL to hwmgr.h drm/amd/powerplay: move SMUM_WAIT_INDIRECT_FIELD_UNEQUAL to hwmgr.h drm/amd/powerplay: add new helper functions in hwmgr.h drm/amd/powerplay: use SMU_IND_INDEX/DATA_11 pair drm/amd/powerplay: refine powerplay code. drm/amd/powerplay: delete dead code in hwmgr.h drm/amd/powerplay: refine interface in struct pp_smumgr_func ...
This commit is contained in:
commit
754270c7c5
@ -184,6 +184,7 @@ config DRM_AMDGPU
|
|||||||
select BACKLIGHT_CLASS_DEVICE
|
select BACKLIGHT_CLASS_DEVICE
|
||||||
select BACKLIGHT_LCD_SUPPORT
|
select BACKLIGHT_LCD_SUPPORT
|
||||||
select INTERVAL_TREE
|
select INTERVAL_TREE
|
||||||
|
select CHASH
|
||||||
help
|
help
|
||||||
Choose this option if you have a recent AMD Radeon graphics card.
|
Choose this option if you have a recent AMD Radeon graphics card.
|
||||||
|
|
||||||
@ -191,6 +192,8 @@ config DRM_AMDGPU
|
|||||||
|
|
||||||
source "drivers/gpu/drm/amd/amdgpu/Kconfig"
|
source "drivers/gpu/drm/amd/amdgpu/Kconfig"
|
||||||
|
|
||||||
|
source "drivers/gpu/drm/amd/lib/Kconfig"
|
||||||
|
|
||||||
source "drivers/gpu/drm/nouveau/Kconfig"
|
source "drivers/gpu/drm/nouveau/Kconfig"
|
||||||
|
|
||||||
source "drivers/gpu/drm/i915/Kconfig"
|
source "drivers/gpu/drm/i915/Kconfig"
|
||||||
|
@ -50,6 +50,7 @@ obj-$(CONFIG_DRM_ARM) += arm/
|
|||||||
obj-$(CONFIG_DRM_TTM) += ttm/
|
obj-$(CONFIG_DRM_TTM) += ttm/
|
||||||
obj-$(CONFIG_DRM_TDFX) += tdfx/
|
obj-$(CONFIG_DRM_TDFX) += tdfx/
|
||||||
obj-$(CONFIG_DRM_R128) += r128/
|
obj-$(CONFIG_DRM_R128) += r128/
|
||||||
|
obj-y += amd/lib/
|
||||||
obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
|
obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
|
||||||
obj-$(CONFIG_DRM_RADEON)+= radeon/
|
obj-$(CONFIG_DRM_RADEON)+= radeon/
|
||||||
obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
|
obj-$(CONFIG_DRM_AMDGPU)+= amd/amdgpu/
|
||||||
|
@ -133,5 +133,3 @@ include $(FULL_AMD_PATH)/powerplay/Makefile
|
|||||||
amdgpu-y += $(AMD_POWERPLAY_FILES)
|
amdgpu-y += $(AMD_POWERPLAY_FILES)
|
||||||
|
|
||||||
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
|
obj-$(CONFIG_DRM_AMDGPU)+= amdgpu.o
|
||||||
|
|
||||||
CFLAGS_amdgpu_trace_points.o := -I$(src)
|
|
||||||
|
@ -65,6 +65,7 @@
|
|||||||
#include "amdgpu_uvd.h"
|
#include "amdgpu_uvd.h"
|
||||||
#include "amdgpu_vce.h"
|
#include "amdgpu_vce.h"
|
||||||
#include "amdgpu_vcn.h"
|
#include "amdgpu_vcn.h"
|
||||||
|
#include "amdgpu_mn.h"
|
||||||
|
|
||||||
#include "gpu_scheduler.h"
|
#include "gpu_scheduler.h"
|
||||||
#include "amdgpu_virt.h"
|
#include "amdgpu_virt.h"
|
||||||
@ -91,7 +92,7 @@ extern int amdgpu_dpm;
|
|||||||
extern int amdgpu_fw_load_type;
|
extern int amdgpu_fw_load_type;
|
||||||
extern int amdgpu_aspm;
|
extern int amdgpu_aspm;
|
||||||
extern int amdgpu_runtime_pm;
|
extern int amdgpu_runtime_pm;
|
||||||
extern unsigned amdgpu_ip_block_mask;
|
extern uint amdgpu_ip_block_mask;
|
||||||
extern int amdgpu_bapm;
|
extern int amdgpu_bapm;
|
||||||
extern int amdgpu_deep_color;
|
extern int amdgpu_deep_color;
|
||||||
extern int amdgpu_vm_size;
|
extern int amdgpu_vm_size;
|
||||||
@ -104,14 +105,14 @@ extern int amdgpu_sched_jobs;
|
|||||||
extern int amdgpu_sched_hw_submission;
|
extern int amdgpu_sched_hw_submission;
|
||||||
extern int amdgpu_no_evict;
|
extern int amdgpu_no_evict;
|
||||||
extern int amdgpu_direct_gma_size;
|
extern int amdgpu_direct_gma_size;
|
||||||
extern unsigned amdgpu_pcie_gen_cap;
|
extern uint amdgpu_pcie_gen_cap;
|
||||||
extern unsigned amdgpu_pcie_lane_cap;
|
extern uint amdgpu_pcie_lane_cap;
|
||||||
extern unsigned amdgpu_cg_mask;
|
extern uint amdgpu_cg_mask;
|
||||||
extern unsigned amdgpu_pg_mask;
|
extern uint amdgpu_pg_mask;
|
||||||
extern unsigned amdgpu_sdma_phase_quantum;
|
extern uint amdgpu_sdma_phase_quantum;
|
||||||
extern char *amdgpu_disable_cu;
|
extern char *amdgpu_disable_cu;
|
||||||
extern char *amdgpu_virtual_display;
|
extern char *amdgpu_virtual_display;
|
||||||
extern unsigned amdgpu_pp_feature_mask;
|
extern uint amdgpu_pp_feature_mask;
|
||||||
extern int amdgpu_vram_page_split;
|
extern int amdgpu_vram_page_split;
|
||||||
extern int amdgpu_ngg;
|
extern int amdgpu_ngg;
|
||||||
extern int amdgpu_prim_buf_per_se;
|
extern int amdgpu_prim_buf_per_se;
|
||||||
@ -178,6 +179,7 @@ struct amdgpu_cs_parser;
|
|||||||
struct amdgpu_job;
|
struct amdgpu_job;
|
||||||
struct amdgpu_irq_src;
|
struct amdgpu_irq_src;
|
||||||
struct amdgpu_fpriv;
|
struct amdgpu_fpriv;
|
||||||
|
struct amdgpu_bo_va_mapping;
|
||||||
|
|
||||||
enum amdgpu_cp_irq {
|
enum amdgpu_cp_irq {
|
||||||
AMDGPU_CP_IRQ_GFX_EOP = 0,
|
AMDGPU_CP_IRQ_GFX_EOP = 0,
|
||||||
@ -292,14 +294,25 @@ struct amdgpu_buffer_funcs {
|
|||||||
|
|
||||||
/* provided by hw blocks that can write ptes, e.g., sdma */
|
/* provided by hw blocks that can write ptes, e.g., sdma */
|
||||||
struct amdgpu_vm_pte_funcs {
|
struct amdgpu_vm_pte_funcs {
|
||||||
|
/* number of dw to reserve per operation */
|
||||||
|
unsigned copy_pte_num_dw;
|
||||||
|
|
||||||
/* copy pte entries from GART */
|
/* copy pte entries from GART */
|
||||||
void (*copy_pte)(struct amdgpu_ib *ib,
|
void (*copy_pte)(struct amdgpu_ib *ib,
|
||||||
uint64_t pe, uint64_t src,
|
uint64_t pe, uint64_t src,
|
||||||
unsigned count);
|
unsigned count);
|
||||||
|
|
||||||
/* write pte one entry at a time with addr mapping */
|
/* write pte one entry at a time with addr mapping */
|
||||||
void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
|
void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe,
|
||||||
uint64_t value, unsigned count,
|
uint64_t value, unsigned count,
|
||||||
uint32_t incr);
|
uint32_t incr);
|
||||||
|
|
||||||
|
/* maximum nums of PTEs/PDEs in a single operation */
|
||||||
|
uint32_t set_max_nums_pte_pde;
|
||||||
|
|
||||||
|
/* number of dw to reserve per operation */
|
||||||
|
unsigned set_pte_pde_num_dw;
|
||||||
|
|
||||||
/* for linear pte/pde updates without addr mapping */
|
/* for linear pte/pde updates without addr mapping */
|
||||||
void (*set_pte_pde)(struct amdgpu_ib *ib,
|
void (*set_pte_pde)(struct amdgpu_ib *ib,
|
||||||
uint64_t pe,
|
uint64_t pe,
|
||||||
@ -332,6 +345,7 @@ struct amdgpu_gart_funcs {
|
|||||||
struct amdgpu_ih_funcs {
|
struct amdgpu_ih_funcs {
|
||||||
/* ring read/write ptr handling, called from interrupt context */
|
/* ring read/write ptr handling, called from interrupt context */
|
||||||
u32 (*get_wptr)(struct amdgpu_device *adev);
|
u32 (*get_wptr)(struct amdgpu_device *adev);
|
||||||
|
bool (*prescreen_iv)(struct amdgpu_device *adev);
|
||||||
void (*decode_iv)(struct amdgpu_device *adev,
|
void (*decode_iv)(struct amdgpu_device *adev,
|
||||||
struct amdgpu_iv_entry *entry);
|
struct amdgpu_iv_entry *entry);
|
||||||
void (*set_rptr)(struct amdgpu_device *adev);
|
void (*set_rptr)(struct amdgpu_device *adev);
|
||||||
@ -399,6 +413,7 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
|
|||||||
struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
|
struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
|
||||||
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
|
void *amdgpu_gem_prime_vmap(struct drm_gem_object *obj);
|
||||||
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
|
void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
|
||||||
|
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
|
||||||
int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
|
int amdgpu_gem_debugfs_init(struct amdgpu_device *adev);
|
||||||
|
|
||||||
/* sub-allocation manager, it has to be protected by another lock.
|
/* sub-allocation manager, it has to be protected by another lock.
|
||||||
@ -455,9 +470,10 @@ struct amdgpu_sa_bo {
|
|||||||
*/
|
*/
|
||||||
void amdgpu_gem_force_release(struct amdgpu_device *adev);
|
void amdgpu_gem_force_release(struct amdgpu_device *adev);
|
||||||
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
|
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
|
||||||
int alignment, u32 initial_domain,
|
int alignment, u32 initial_domain,
|
||||||
u64 flags, bool kernel,
|
u64 flags, bool kernel,
|
||||||
struct drm_gem_object **obj);
|
struct reservation_object *resv,
|
||||||
|
struct drm_gem_object **obj);
|
||||||
|
|
||||||
int amdgpu_mode_dumb_create(struct drm_file *file_priv,
|
int amdgpu_mode_dumb_create(struct drm_file *file_priv,
|
||||||
struct drm_device *dev,
|
struct drm_device *dev,
|
||||||
@ -731,8 +747,8 @@ struct amdgpu_ctx_mgr {
|
|||||||
struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
|
struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
|
||||||
int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
|
int amdgpu_ctx_put(struct amdgpu_ctx *ctx);
|
||||||
|
|
||||||
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
|
int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
|
||||||
struct dma_fence *fence);
|
struct dma_fence *fence, uint64_t *seq);
|
||||||
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
|
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
|
||||||
struct amdgpu_ring *ring, uint64_t seq);
|
struct amdgpu_ring *ring, uint64_t seq);
|
||||||
|
|
||||||
@ -1014,7 +1030,6 @@ struct amdgpu_gfx {
|
|||||||
/* reset mask */
|
/* reset mask */
|
||||||
uint32_t grbm_soft_reset;
|
uint32_t grbm_soft_reset;
|
||||||
uint32_t srbm_soft_reset;
|
uint32_t srbm_soft_reset;
|
||||||
bool in_reset;
|
|
||||||
/* s3/s4 mask */
|
/* s3/s4 mask */
|
||||||
bool in_suspend;
|
bool in_suspend;
|
||||||
/* NGG */
|
/* NGG */
|
||||||
@ -1056,6 +1071,7 @@ struct amdgpu_cs_parser {
|
|||||||
/* buffer objects */
|
/* buffer objects */
|
||||||
struct ww_acquire_ctx ticket;
|
struct ww_acquire_ctx ticket;
|
||||||
struct amdgpu_bo_list *bo_list;
|
struct amdgpu_bo_list *bo_list;
|
||||||
|
struct amdgpu_mn *mn;
|
||||||
struct amdgpu_bo_list_entry vm_pd;
|
struct amdgpu_bo_list_entry vm_pd;
|
||||||
struct list_head validated;
|
struct list_head validated;
|
||||||
struct dma_fence *fence;
|
struct dma_fence *fence;
|
||||||
@ -1183,6 +1199,9 @@ struct amdgpu_firmware {
|
|||||||
|
|
||||||
/* gpu info firmware data pointer */
|
/* gpu info firmware data pointer */
|
||||||
const struct firmware *gpu_info_fw;
|
const struct firmware *gpu_info_fw;
|
||||||
|
|
||||||
|
void *fw_buf_ptr;
|
||||||
|
uint64_t fw_buf_mc;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1196,20 +1215,6 @@ void amdgpu_benchmark(struct amdgpu_device *adev, int test_number);
|
|||||||
*/
|
*/
|
||||||
void amdgpu_test_moves(struct amdgpu_device *adev);
|
void amdgpu_test_moves(struct amdgpu_device *adev);
|
||||||
|
|
||||||
/*
|
|
||||||
* MMU Notifier
|
|
||||||
*/
|
|
||||||
#if defined(CONFIG_MMU_NOTIFIER)
|
|
||||||
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
|
|
||||||
void amdgpu_mn_unregister(struct amdgpu_bo *bo);
|
|
||||||
#else
|
|
||||||
static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
|
|
||||||
{
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Debugfs
|
* Debugfs
|
||||||
*/
|
*/
|
||||||
@ -1592,6 +1597,7 @@ struct amdgpu_device {
|
|||||||
|
|
||||||
/* record last mm index being written through WREG32*/
|
/* record last mm index being written through WREG32*/
|
||||||
unsigned long last_mm_index;
|
unsigned long last_mm_index;
|
||||||
|
bool in_sriov_reset;
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
|
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
|
||||||
@ -1759,6 +1765,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
|
|||||||
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
|
#define amdgpu_ring_init_cond_exec(r) (r)->funcs->init_cond_exec((r))
|
||||||
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
|
#define amdgpu_ring_patch_cond_exec(r,o) (r)->funcs->patch_cond_exec((r),(o))
|
||||||
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
|
#define amdgpu_ih_get_wptr(adev) (adev)->irq.ih_funcs->get_wptr((adev))
|
||||||
|
#define amdgpu_ih_prescreen_iv(adev) (adev)->irq.ih_funcs->prescreen_iv((adev))
|
||||||
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
|
#define amdgpu_ih_decode_iv(adev, iv) (adev)->irq.ih_funcs->decode_iv((adev), (iv))
|
||||||
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
|
#define amdgpu_ih_set_rptr(adev) (adev)->irq.ih_funcs->set_rptr((adev))
|
||||||
#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
|
#define amdgpu_display_vblank_get_counter(adev, crtc) (adev)->mode_info.funcs->vblank_get_counter((adev), (crtc))
|
||||||
@ -1791,18 +1798,6 @@ void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
|
|||||||
u64 num_vis_bytes);
|
u64 num_vis_bytes);
|
||||||
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
|
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
|
||||||
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
|
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
|
||||||
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
|
|
||||||
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
|
|
||||||
uint32_t flags);
|
|
||||||
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
|
|
||||||
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
|
|
||||||
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
|
|
||||||
unsigned long end);
|
|
||||||
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
|
|
||||||
int *last_invalidated);
|
|
||||||
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
|
|
||||||
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
|
|
||||||
struct ttm_mem_reg *mem);
|
|
||||||
void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
|
void amdgpu_vram_location(struct amdgpu_device *adev, struct amdgpu_mc *mc, u64 base);
|
||||||
void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
|
void amdgpu_gart_location(struct amdgpu_device *adev, struct amdgpu_mc *mc);
|
||||||
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
|
void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size);
|
||||||
@ -1885,10 +1880,9 @@ static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; }
|
|||||||
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
|
static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct amdgpu_bo_va_mapping *
|
int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
|
||||||
amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
|
uint64_t addr, struct amdgpu_bo **bo,
|
||||||
uint64_t addr, struct amdgpu_bo **bo);
|
struct amdgpu_bo_va_mapping **mapping);
|
||||||
int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser);
|
|
||||||
|
|
||||||
#include "amdgpu_object.h"
|
#include "amdgpu_object.h"
|
||||||
#endif
|
#endif
|
||||||
|
@ -169,6 +169,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
|
|||||||
.get_vmem_size = get_vmem_size,
|
.get_vmem_size = get_vmem_size,
|
||||||
.get_gpu_clock_counter = get_gpu_clock_counter,
|
.get_gpu_clock_counter = get_gpu_clock_counter,
|
||||||
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
|
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
|
||||||
|
.alloc_pasid = amdgpu_vm_alloc_pasid,
|
||||||
|
.free_pasid = amdgpu_vm_free_pasid,
|
||||||
.program_sh_mem_settings = kgd_program_sh_mem_settings,
|
.program_sh_mem_settings = kgd_program_sh_mem_settings,
|
||||||
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
|
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
|
||||||
.init_pipeline = kgd_init_pipeline,
|
.init_pipeline = kgd_init_pipeline,
|
||||||
|
@ -128,6 +128,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
|
|||||||
.get_vmem_size = get_vmem_size,
|
.get_vmem_size = get_vmem_size,
|
||||||
.get_gpu_clock_counter = get_gpu_clock_counter,
|
.get_gpu_clock_counter = get_gpu_clock_counter,
|
||||||
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
|
.get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
|
||||||
|
.alloc_pasid = amdgpu_vm_alloc_pasid,
|
||||||
|
.free_pasid = amdgpu_vm_free_pasid,
|
||||||
.program_sh_mem_settings = kgd_program_sh_mem_settings,
|
.program_sh_mem_settings = kgd_program_sh_mem_settings,
|
||||||
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
|
.set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
|
||||||
.init_pipeline = kgd_init_pipeline,
|
.init_pipeline = kgd_init_pipeline,
|
||||||
|
@ -45,7 +45,6 @@ struct amdgpu_cgs_device {
|
|||||||
static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
|
static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
|
||||||
enum cgs_gpu_mem_type type,
|
enum cgs_gpu_mem_type type,
|
||||||
uint64_t size, uint64_t align,
|
uint64_t size, uint64_t align,
|
||||||
uint64_t min_offset, uint64_t max_offset,
|
|
||||||
cgs_handle_t *handle)
|
cgs_handle_t *handle)
|
||||||
{
|
{
|
||||||
CGS_FUNC_ADEV;
|
CGS_FUNC_ADEV;
|
||||||
@ -53,13 +52,6 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
|
|||||||
int ret = 0;
|
int ret = 0;
|
||||||
uint32_t domain = 0;
|
uint32_t domain = 0;
|
||||||
struct amdgpu_bo *obj;
|
struct amdgpu_bo *obj;
|
||||||
struct ttm_placement placement;
|
|
||||||
struct ttm_place place;
|
|
||||||
|
|
||||||
if (min_offset > max_offset) {
|
|
||||||
BUG_ON(1);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* fail if the alignment is not a power of 2 */
|
/* fail if the alignment is not a power of 2 */
|
||||||
if (((align != 1) && (align & (align - 1)))
|
if (((align != 1) && (align & (align - 1)))
|
||||||
@ -73,41 +65,19 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
|
|||||||
flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
||||||
domain = AMDGPU_GEM_DOMAIN_VRAM;
|
domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||||
if (max_offset > adev->mc.real_vram_size)
|
|
||||||
return -EINVAL;
|
|
||||||
place.fpfn = min_offset >> PAGE_SHIFT;
|
|
||||||
place.lpfn = max_offset >> PAGE_SHIFT;
|
|
||||||
place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
|
|
||||||
TTM_PL_FLAG_VRAM;
|
|
||||||
break;
|
break;
|
||||||
case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
|
case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB:
|
||||||
case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
|
case CGS_GPU_MEM_TYPE__INVISIBLE_FB:
|
||||||
flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
|
flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
|
||||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
||||||
domain = AMDGPU_GEM_DOMAIN_VRAM;
|
domain = AMDGPU_GEM_DOMAIN_VRAM;
|
||||||
if (adev->mc.visible_vram_size < adev->mc.real_vram_size) {
|
|
||||||
place.fpfn =
|
|
||||||
max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT;
|
|
||||||
place.lpfn =
|
|
||||||
min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT;
|
|
||||||
place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
|
|
||||||
TTM_PL_FLAG_VRAM;
|
|
||||||
}
|
|
||||||
|
|
||||||
break;
|
break;
|
||||||
case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
|
case CGS_GPU_MEM_TYPE__GART_CACHEABLE:
|
||||||
domain = AMDGPU_GEM_DOMAIN_GTT;
|
domain = AMDGPU_GEM_DOMAIN_GTT;
|
||||||
place.fpfn = min_offset >> PAGE_SHIFT;
|
|
||||||
place.lpfn = max_offset >> PAGE_SHIFT;
|
|
||||||
place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
|
|
||||||
break;
|
break;
|
||||||
case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
|
case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE:
|
||||||
flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
|
flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC;
|
||||||
domain = AMDGPU_GEM_DOMAIN_GTT;
|
domain = AMDGPU_GEM_DOMAIN_GTT;
|
||||||
place.fpfn = min_offset >> PAGE_SHIFT;
|
|
||||||
place.lpfn = max_offset >> PAGE_SHIFT;
|
|
||||||
place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT |
|
|
||||||
TTM_PL_FLAG_UNCACHED;
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -116,15 +86,8 @@ static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device *cgs_device,
|
|||||||
|
|
||||||
*handle = 0;
|
*handle = 0;
|
||||||
|
|
||||||
placement.placement = &place;
|
ret = amdgpu_bo_create(adev, size, align, true, domain, flags,
|
||||||
placement.num_placement = 1;
|
NULL, NULL, 0, &obj);
|
||||||
placement.busy_placement = &place;
|
|
||||||
placement.num_busy_placement = 1;
|
|
||||||
|
|
||||||
ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE,
|
|
||||||
true, domain, flags,
|
|
||||||
NULL, &placement, NULL,
|
|
||||||
0, &obj);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("(%d) bo create failed\n", ret);
|
DRM_ERROR("(%d) bo create failed\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
@ -155,19 +118,14 @@ static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device *cgs_device, cgs_handle_t h
|
|||||||
uint64_t *mcaddr)
|
uint64_t *mcaddr)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
u64 min_offset, max_offset;
|
|
||||||
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
|
struct amdgpu_bo *obj = (struct amdgpu_bo *)handle;
|
||||||
|
|
||||||
WARN_ON_ONCE(obj->placement.num_placement > 1);
|
WARN_ON_ONCE(obj->placement.num_placement > 1);
|
||||||
|
|
||||||
min_offset = obj->placements[0].fpfn << PAGE_SHIFT;
|
|
||||||
max_offset = obj->placements[0].lpfn << PAGE_SHIFT;
|
|
||||||
|
|
||||||
r = amdgpu_bo_reserve(obj, true);
|
r = amdgpu_bo_reserve(obj, true);
|
||||||
if (unlikely(r != 0))
|
if (unlikely(r != 0))
|
||||||
return r;
|
return r;
|
||||||
r = amdgpu_bo_pin_restricted(obj, obj->preferred_domains,
|
r = amdgpu_bo_pin(obj, obj->preferred_domains, mcaddr);
|
||||||
min_offset, max_offset, mcaddr);
|
|
||||||
amdgpu_bo_unreserve(obj);
|
amdgpu_bo_unreserve(obj);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
@ -675,6 +633,85 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
|
|||||||
|
|
||||||
if (!adev->pm.fw) {
|
if (!adev->pm.fw) {
|
||||||
switch (adev->asic_type) {
|
switch (adev->asic_type) {
|
||||||
|
case CHIP_TAHITI:
|
||||||
|
strcpy(fw_name, "radeon/tahiti_smc.bin");
|
||||||
|
break;
|
||||||
|
case CHIP_PITCAIRN:
|
||||||
|
if ((adev->pdev->revision == 0x81) &&
|
||||||
|
((adev->pdev->device == 0x6810) ||
|
||||||
|
(adev->pdev->device == 0x6811))) {
|
||||||
|
info->is_kicker = true;
|
||||||
|
strcpy(fw_name, "radeon/pitcairn_k_smc.bin");
|
||||||
|
} else {
|
||||||
|
strcpy(fw_name, "radeon/pitcairn_smc.bin");
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case CHIP_VERDE:
|
||||||
|
if (((adev->pdev->device == 0x6820) &&
|
||||||
|
((adev->pdev->revision == 0x81) ||
|
||||||
|
(adev->pdev->revision == 0x83))) ||
|
||||||
|
((adev->pdev->device == 0x6821) &&
|
||||||
|
((adev->pdev->revision == 0x83) ||
|
||||||
|
(adev->pdev->revision == 0x87))) ||
|
||||||
|
((adev->pdev->revision == 0x87) &&
|
||||||
|
((adev->pdev->device == 0x6823) ||
|
||||||
|
(adev->pdev->device == 0x682b)))) {
|
||||||
|
info->is_kicker = true;
|
||||||
|
strcpy(fw_name, "radeon/verde_k_smc.bin");
|
||||||
|
} else {
|
||||||
|
strcpy(fw_name, "radeon/verde_smc.bin");
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case CHIP_OLAND:
|
||||||
|
if (((adev->pdev->revision == 0x81) &&
|
||||||
|
((adev->pdev->device == 0x6600) ||
|
||||||
|
(adev->pdev->device == 0x6604) ||
|
||||||
|
(adev->pdev->device == 0x6605) ||
|
||||||
|
(adev->pdev->device == 0x6610))) ||
|
||||||
|
((adev->pdev->revision == 0x83) &&
|
||||||
|
(adev->pdev->device == 0x6610))) {
|
||||||
|
info->is_kicker = true;
|
||||||
|
strcpy(fw_name, "radeon/oland_k_smc.bin");
|
||||||
|
} else {
|
||||||
|
strcpy(fw_name, "radeon/oland_smc.bin");
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case CHIP_HAINAN:
|
||||||
|
if (((adev->pdev->revision == 0x81) &&
|
||||||
|
(adev->pdev->device == 0x6660)) ||
|
||||||
|
((adev->pdev->revision == 0x83) &&
|
||||||
|
((adev->pdev->device == 0x6660) ||
|
||||||
|
(adev->pdev->device == 0x6663) ||
|
||||||
|
(adev->pdev->device == 0x6665) ||
|
||||||
|
(adev->pdev->device == 0x6667)))) {
|
||||||
|
info->is_kicker = true;
|
||||||
|
strcpy(fw_name, "radeon/hainan_k_smc.bin");
|
||||||
|
} else if ((adev->pdev->revision == 0xc3) &&
|
||||||
|
(adev->pdev->device == 0x6665)) {
|
||||||
|
info->is_kicker = true;
|
||||||
|
strcpy(fw_name, "radeon/banks_k_2_smc.bin");
|
||||||
|
} else {
|
||||||
|
strcpy(fw_name, "radeon/hainan_smc.bin");
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case CHIP_BONAIRE:
|
||||||
|
if ((adev->pdev->revision == 0x80) ||
|
||||||
|
(adev->pdev->revision == 0x81) ||
|
||||||
|
(adev->pdev->device == 0x665f)) {
|
||||||
|
info->is_kicker = true;
|
||||||
|
strcpy(fw_name, "radeon/bonaire_k_smc.bin");
|
||||||
|
} else {
|
||||||
|
strcpy(fw_name, "radeon/bonaire_smc.bin");
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case CHIP_HAWAII:
|
||||||
|
if (adev->pdev->revision == 0x80) {
|
||||||
|
info->is_kicker = true;
|
||||||
|
strcpy(fw_name, "radeon/hawaii_k_smc.bin");
|
||||||
|
} else {
|
||||||
|
strcpy(fw_name, "radeon/hawaii_smc.bin");
|
||||||
|
}
|
||||||
|
break;
|
||||||
case CHIP_TOPAZ:
|
case CHIP_TOPAZ:
|
||||||
if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
|
if (((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x81)) ||
|
||||||
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
|
((adev->pdev->device == 0x6900) && (adev->pdev->revision == 0x83)) ||
|
||||||
@ -838,6 +875,9 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device,
|
|||||||
case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
|
case CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID:
|
||||||
sys_info->value = adev->pdev->subsystem_vendor;
|
sys_info->value = adev->pdev->subsystem_vendor;
|
||||||
break;
|
break;
|
||||||
|
case CGS_SYSTEM_INFO_PCIE_BUS_DEVFN:
|
||||||
|
sys_info->value = adev->pdev->devfn;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
@ -346,10 +346,8 @@ static void amdgpu_connector_free_edid(struct drm_connector *connector)
|
|||||||
{
|
{
|
||||||
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
|
||||||
|
|
||||||
if (amdgpu_connector->edid) {
|
kfree(amdgpu_connector->edid);
|
||||||
kfree(amdgpu_connector->edid);
|
amdgpu_connector->edid = NULL;
|
||||||
amdgpu_connector->edid = NULL;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
|
static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
|
||||||
|
@ -473,11 +473,16 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
|
|||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
/* Check if we have user pages and nobody bound the BO already */
|
/* Check if we have user pages and nobody bound the BO already */
|
||||||
if (lobj->user_pages && bo->tbo.ttm->state != tt_bound) {
|
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
|
||||||
size_t size = sizeof(struct page *);
|
lobj->user_pages) {
|
||||||
|
amdgpu_ttm_placement_from_domain(bo,
|
||||||
size *= bo->tbo.ttm->num_pages;
|
AMDGPU_GEM_DOMAIN_CPU);
|
||||||
memcpy(bo->tbo.ttm->pages, lobj->user_pages, size);
|
r = ttm_bo_validate(&bo->tbo, &bo->placement, true,
|
||||||
|
false);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
|
||||||
|
lobj->user_pages);
|
||||||
binding_userptr = true;
|
binding_userptr = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -502,7 +507,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|||||||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||||
struct amdgpu_bo_list_entry *e;
|
struct amdgpu_bo_list_entry *e;
|
||||||
struct list_head duplicates;
|
struct list_head duplicates;
|
||||||
bool need_mmap_lock = false;
|
|
||||||
unsigned i, tries = 10;
|
unsigned i, tries = 10;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
@ -510,9 +514,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|||||||
|
|
||||||
p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
|
p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
|
||||||
if (p->bo_list) {
|
if (p->bo_list) {
|
||||||
need_mmap_lock = p->bo_list->first_userptr !=
|
|
||||||
p->bo_list->num_entries;
|
|
||||||
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
|
amdgpu_bo_list_get_list(p->bo_list, &p->validated);
|
||||||
|
if (p->bo_list->first_userptr != p->bo_list->num_entries)
|
||||||
|
p->mn = amdgpu_mn_get(p->adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
INIT_LIST_HEAD(&duplicates);
|
INIT_LIST_HEAD(&duplicates);
|
||||||
@ -521,9 +525,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|||||||
if (p->uf_entry.robj)
|
if (p->uf_entry.robj)
|
||||||
list_add(&p->uf_entry.tv.head, &p->validated);
|
list_add(&p->uf_entry.tv.head, &p->validated);
|
||||||
|
|
||||||
if (need_mmap_lock)
|
|
||||||
down_read(¤t->mm->mmap_sem);
|
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
struct list_head need_pages;
|
struct list_head need_pages;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
@ -543,23 +544,25 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|||||||
INIT_LIST_HEAD(&need_pages);
|
INIT_LIST_HEAD(&need_pages);
|
||||||
for (i = p->bo_list->first_userptr;
|
for (i = p->bo_list->first_userptr;
|
||||||
i < p->bo_list->num_entries; ++i) {
|
i < p->bo_list->num_entries; ++i) {
|
||||||
|
struct amdgpu_bo *bo;
|
||||||
|
|
||||||
e = &p->bo_list->array[i];
|
e = &p->bo_list->array[i];
|
||||||
|
bo = e->robj;
|
||||||
|
|
||||||
if (amdgpu_ttm_tt_userptr_invalidated(e->robj->tbo.ttm,
|
if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
|
||||||
&e->user_invalidated) && e->user_pages) {
|
&e->user_invalidated) && e->user_pages) {
|
||||||
|
|
||||||
/* We acquired a page array, but somebody
|
/* We acquired a page array, but somebody
|
||||||
* invalidated it. Free it and try again
|
* invalidated it. Free it and try again
|
||||||
*/
|
*/
|
||||||
release_pages(e->user_pages,
|
release_pages(e->user_pages,
|
||||||
e->robj->tbo.ttm->num_pages,
|
bo->tbo.ttm->num_pages,
|
||||||
false);
|
false);
|
||||||
kvfree(e->user_pages);
|
kvfree(e->user_pages);
|
||||||
e->user_pages = NULL;
|
e->user_pages = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (e->robj->tbo.ttm->state != tt_bound &&
|
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
|
||||||
!e->user_pages) {
|
!e->user_pages) {
|
||||||
list_del(&e->tv.head);
|
list_del(&e->tv.head);
|
||||||
list_add(&e->tv.head, &need_pages);
|
list_add(&e->tv.head, &need_pages);
|
||||||
@ -636,9 +639,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
|
|||||||
|
|
||||||
amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
|
amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
|
||||||
p->bytes_moved_vis);
|
p->bytes_moved_vis);
|
||||||
fpriv->vm.last_eviction_counter =
|
|
||||||
atomic64_read(&p->adev->num_evictions);
|
|
||||||
|
|
||||||
if (p->bo_list) {
|
if (p->bo_list) {
|
||||||
struct amdgpu_bo *gds = p->bo_list->gds_obj;
|
struct amdgpu_bo *gds = p->bo_list->gds_obj;
|
||||||
struct amdgpu_bo *gws = p->bo_list->gws_obj;
|
struct amdgpu_bo *gws = p->bo_list->gws_obj;
|
||||||
@ -679,9 +679,6 @@ error_validate:
|
|||||||
|
|
||||||
error_free_pages:
|
error_free_pages:
|
||||||
|
|
||||||
if (need_mmap_lock)
|
|
||||||
up_read(¤t->mm->mmap_sem);
|
|
||||||
|
|
||||||
if (p->bo_list) {
|
if (p->bo_list) {
|
||||||
for (i = p->bo_list->first_userptr;
|
for (i = p->bo_list->first_userptr;
|
||||||
i < p->bo_list->num_entries; ++i) {
|
i < p->bo_list->num_entries; ++i) {
|
||||||
@ -728,11 +725,7 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
|
|||||||
{
|
{
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
if (!error)
|
if (error && backoff)
|
||||||
ttm_eu_fence_buffer_objects(&parser->ticket,
|
|
||||||
&parser->validated,
|
|
||||||
parser->fence);
|
|
||||||
else if (backoff)
|
|
||||||
ttm_eu_backoff_reservation(&parser->ticket,
|
ttm_eu_backoff_reservation(&parser->ticket,
|
||||||
&parser->validated);
|
&parser->validated);
|
||||||
|
|
||||||
@ -768,10 +761,6 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_dir_update);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
r = amdgpu_vm_clear_freed(adev, vm, NULL);
|
r = amdgpu_vm_clear_freed(adev, vm, NULL);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
@ -825,7 +814,13 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_vm_clear_moved(adev, vm, &p->job->sync);
|
r = amdgpu_vm_handle_moved(adev, vm);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
r = amdgpu_sync_fence(adev, &p->job->sync, vm->last_update);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
if (amdgpu_vm_debug && p->bo_list) {
|
if (amdgpu_vm_debug && p->bo_list) {
|
||||||
/* Invalidate all BOs to test for userspace bugs */
|
/* Invalidate all BOs to test for userspace bugs */
|
||||||
@ -835,7 +830,7 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
|
|||||||
if (!bo)
|
if (!bo)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
amdgpu_vm_bo_invalidate(adev, bo);
|
amdgpu_vm_bo_invalidate(adev, bo, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -860,7 +855,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (p->job->vm) {
|
if (p->job->vm) {
|
||||||
p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.bo);
|
p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->root.base.bo);
|
||||||
|
|
||||||
r = amdgpu_bo_vm_update_pte(p);
|
r = amdgpu_bo_vm_update_pte(p);
|
||||||
if (r)
|
if (r)
|
||||||
@ -928,11 +923,11 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
|
|||||||
uint64_t offset;
|
uint64_t offset;
|
||||||
uint8_t *kptr;
|
uint8_t *kptr;
|
||||||
|
|
||||||
m = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
|
r = amdgpu_cs_find_mapping(parser, chunk_ib->va_start,
|
||||||
&aobj);
|
&aobj, &m);
|
||||||
if (!aobj) {
|
if (r) {
|
||||||
DRM_ERROR("IB va_start is invalid\n");
|
DRM_ERROR("IB va_start is invalid\n");
|
||||||
return -EINVAL;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
|
if ((chunk_ib->va_start + chunk_ib->ib_bytes) >
|
||||||
@ -1133,14 +1128,31 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
|||||||
struct amdgpu_ring *ring = p->job->ring;
|
struct amdgpu_ring *ring = p->job->ring;
|
||||||
struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
|
struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
|
||||||
struct amdgpu_job *job;
|
struct amdgpu_job *job;
|
||||||
|
unsigned i;
|
||||||
|
uint64_t seq;
|
||||||
|
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
amdgpu_mn_lock(p->mn);
|
||||||
|
if (p->bo_list) {
|
||||||
|
for (i = p->bo_list->first_userptr;
|
||||||
|
i < p->bo_list->num_entries; ++i) {
|
||||||
|
struct amdgpu_bo *bo = p->bo_list->array[i].robj;
|
||||||
|
|
||||||
|
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
|
||||||
|
amdgpu_mn_unlock(p->mn);
|
||||||
|
return -ERESTARTSYS;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
job = p->job;
|
job = p->job;
|
||||||
p->job = NULL;
|
p->job = NULL;
|
||||||
|
|
||||||
r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
|
r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp);
|
||||||
if (r) {
|
if (r) {
|
||||||
amdgpu_job_free(job);
|
amdgpu_job_free(job);
|
||||||
|
amdgpu_mn_unlock(p->mn);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1148,14 +1160,28 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
|||||||
job->fence_ctx = entity->fence_context;
|
job->fence_ctx = entity->fence_context;
|
||||||
p->fence = dma_fence_get(&job->base.s_fence->finished);
|
p->fence = dma_fence_get(&job->base.s_fence->finished);
|
||||||
|
|
||||||
|
r = amdgpu_ctx_add_fence(p->ctx, ring, p->fence, &seq);
|
||||||
|
if (r) {
|
||||||
|
dma_fence_put(p->fence);
|
||||||
|
dma_fence_put(&job->base.s_fence->finished);
|
||||||
|
amdgpu_job_free(job);
|
||||||
|
amdgpu_mn_unlock(p->mn);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
amdgpu_cs_post_dependencies(p);
|
amdgpu_cs_post_dependencies(p);
|
||||||
|
|
||||||
cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence);
|
cs->out.handle = seq;
|
||||||
job->uf_sequence = cs->out.handle;
|
job->uf_sequence = seq;
|
||||||
|
|
||||||
amdgpu_job_free_resources(job);
|
amdgpu_job_free_resources(job);
|
||||||
|
|
||||||
trace_amdgpu_cs_ioctl(job);
|
trace_amdgpu_cs_ioctl(job);
|
||||||
amd_sched_entity_push_job(&job->base);
|
amd_sched_entity_push_job(&job->base);
|
||||||
|
|
||||||
|
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
|
||||||
|
amdgpu_mn_unlock(p->mn);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1383,6 +1409,7 @@ static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev,
|
|||||||
array[i] = fence;
|
array[i] = fence;
|
||||||
} else { /* NULL, the fence has been already signaled */
|
} else { /* NULL, the fence has been already signaled */
|
||||||
r = 1;
|
r = 1;
|
||||||
|
first = i;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1462,78 +1489,36 @@ err_free_fences:
|
|||||||
* virtual memory address. Returns allocation structure when found, NULL
|
* virtual memory address. Returns allocation structure when found, NULL
|
||||||
* otherwise.
|
* otherwise.
|
||||||
*/
|
*/
|
||||||
struct amdgpu_bo_va_mapping *
|
int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
|
||||||
amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
|
uint64_t addr, struct amdgpu_bo **bo,
|
||||||
uint64_t addr, struct amdgpu_bo **bo)
|
struct amdgpu_bo_va_mapping **map)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_fpriv *fpriv = parser->filp->driver_priv;
|
||||||
|
struct amdgpu_vm *vm = &fpriv->vm;
|
||||||
struct amdgpu_bo_va_mapping *mapping;
|
struct amdgpu_bo_va_mapping *mapping;
|
||||||
unsigned i;
|
int r;
|
||||||
|
|
||||||
if (!parser->bo_list)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
addr /= AMDGPU_GPU_PAGE_SIZE;
|
addr /= AMDGPU_GPU_PAGE_SIZE;
|
||||||
|
|
||||||
for (i = 0; i < parser->bo_list->num_entries; i++) {
|
mapping = amdgpu_vm_bo_lookup_mapping(vm, addr);
|
||||||
struct amdgpu_bo_list_entry *lobj;
|
if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
lobj = &parser->bo_list->array[i];
|
*bo = mapping->bo_va->base.bo;
|
||||||
if (!lobj->bo_va)
|
*map = mapping;
|
||||||
continue;
|
|
||||||
|
|
||||||
list_for_each_entry(mapping, &lobj->bo_va->valids, list) {
|
/* Double check that the BO is reserved by this CS */
|
||||||
if (mapping->start > addr ||
|
if (READ_ONCE((*bo)->tbo.resv->lock.ctx) != &parser->ticket)
|
||||||
addr > mapping->last)
|
return -EINVAL;
|
||||||
continue;
|
|
||||||
|
|
||||||
*bo = lobj->bo_va->base.bo;
|
r = amdgpu_ttm_bind(&(*bo)->tbo, &(*bo)->tbo.mem);
|
||||||
return mapping;
|
if (unlikely(r))
|
||||||
}
|
return r;
|
||||||
|
|
||||||
list_for_each_entry(mapping, &lobj->bo_va->invalids, list) {
|
if ((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
|
||||||
if (mapping->start > addr ||
|
|
||||||
addr > mapping->last)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
*bo = lobj->bo_va->base.bo;
|
|
||||||
return mapping;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* amdgpu_cs_sysvm_access_required - make BOs accessible by the system VM
|
|
||||||
*
|
|
||||||
* @parser: command submission parser context
|
|
||||||
*
|
|
||||||
* Helper for UVD/VCE VM emulation, make sure BOs are accessible by the system VM.
|
|
||||||
*/
|
|
||||||
int amdgpu_cs_sysvm_access_required(struct amdgpu_cs_parser *parser)
|
|
||||||
{
|
|
||||||
unsigned i;
|
|
||||||
int r;
|
|
||||||
|
|
||||||
if (!parser->bo_list)
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
for (i = 0; i < parser->bo_list->num_entries; i++) {
|
(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
||||||
struct amdgpu_bo *bo = parser->bo_list->array[i].robj;
|
amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
|
||||||
|
return ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, false, false);
|
||||||
r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
|
|
||||||
if (unlikely(r))
|
|
||||||
return r;
|
|
||||||
|
|
||||||
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
|
||||||
amdgpu_ttm_placement_from_domain(bo, bo->allowed_domains);
|
|
||||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
|
|
||||||
if (unlikely(r))
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
@ -246,8 +246,8 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
|
int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
|
||||||
struct dma_fence *fence)
|
struct dma_fence *fence, uint64_t* handler)
|
||||||
{
|
{
|
||||||
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
|
struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];
|
||||||
uint64_t seq = cring->sequence;
|
uint64_t seq = cring->sequence;
|
||||||
@ -258,9 +258,9 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
|
|||||||
other = cring->fences[idx];
|
other = cring->fences[idx];
|
||||||
if (other) {
|
if (other) {
|
||||||
signed long r;
|
signed long r;
|
||||||
r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
|
r = dma_fence_wait_timeout(other, true, MAX_SCHEDULE_TIMEOUT);
|
||||||
if (r < 0)
|
if (r < 0)
|
||||||
DRM_ERROR("Error (%ld) waiting for fence!\n", r);
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_fence_get(fence);
|
dma_fence_get(fence);
|
||||||
@ -271,8 +271,10 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
|
|||||||
spin_unlock(&ctx->ring_lock);
|
spin_unlock(&ctx->ring_lock);
|
||||||
|
|
||||||
dma_fence_put(other);
|
dma_fence_put(other);
|
||||||
|
if (handler)
|
||||||
|
*handler = seq;
|
||||||
|
|
||||||
return seq;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
|
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
|
||||||
|
@ -65,6 +65,7 @@ MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin");
|
|||||||
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
|
static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
|
||||||
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
|
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
|
||||||
static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
|
static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev);
|
||||||
|
static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev);
|
||||||
|
|
||||||
static const char *amdgpu_asic_name[] = {
|
static const char *amdgpu_asic_name[] = {
|
||||||
"TAHITI",
|
"TAHITI",
|
||||||
@ -402,6 +403,15 @@ void amdgpu_pci_config_reset(struct amdgpu_device *adev)
|
|||||||
*/
|
*/
|
||||||
static int amdgpu_doorbell_init(struct amdgpu_device *adev)
|
static int amdgpu_doorbell_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
/* No doorbell on SI hardware generation */
|
||||||
|
if (adev->asic_type < CHIP_BONAIRE) {
|
||||||
|
adev->doorbell.base = 0;
|
||||||
|
adev->doorbell.size = 0;
|
||||||
|
adev->doorbell.num_doorbells = 0;
|
||||||
|
adev->doorbell.ptr = NULL;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/* doorbell bar mapping */
|
/* doorbell bar mapping */
|
||||||
adev->doorbell.base = pci_resource_start(adev->pdev, 2);
|
adev->doorbell.base = pci_resource_start(adev->pdev, 2);
|
||||||
adev->doorbell.size = pci_resource_len(adev->pdev, 2);
|
adev->doorbell.size = pci_resource_len(adev->pdev, 2);
|
||||||
@ -887,6 +897,20 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
|
||||||
|
struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||||
|
struct amdgpu_device *adev = ddev->dev_private;
|
||||||
|
struct atom_context *ctx = adev->mode_info.atom_context;
|
||||||
|
|
||||||
|
return snprintf(buf, PAGE_SIZE, "%s\n", ctx->vbios_version);
|
||||||
|
}
|
||||||
|
|
||||||
|
static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
|
||||||
|
NULL);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_atombios_fini - free the driver info and callbacks for atombios
|
* amdgpu_atombios_fini - free the driver info and callbacks for atombios
|
||||||
*
|
*
|
||||||
@ -906,6 +930,7 @@ static void amdgpu_atombios_fini(struct amdgpu_device *adev)
|
|||||||
adev->mode_info.atom_context = NULL;
|
adev->mode_info.atom_context = NULL;
|
||||||
kfree(adev->mode_info.atom_card_info);
|
kfree(adev->mode_info.atom_card_info);
|
||||||
adev->mode_info.atom_card_info = NULL;
|
adev->mode_info.atom_card_info = NULL;
|
||||||
|
device_remove_file(adev->dev, &dev_attr_vbios_version);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -922,6 +947,7 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev)
|
|||||||
{
|
{
|
||||||
struct card_info *atom_card_info =
|
struct card_info *atom_card_info =
|
||||||
kzalloc(sizeof(struct card_info), GFP_KERNEL);
|
kzalloc(sizeof(struct card_info), GFP_KERNEL);
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (!atom_card_info)
|
if (!atom_card_info)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -958,6 +984,13 @@ static int amdgpu_atombios_init(struct amdgpu_device *adev)
|
|||||||
amdgpu_atombios_scratch_regs_init(adev);
|
amdgpu_atombios_scratch_regs_init(adev);
|
||||||
amdgpu_atombios_allocate_fb_scratch(adev);
|
amdgpu_atombios_allocate_fb_scratch(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = device_create_file(adev->dev, &dev_attr_vbios_version);
|
||||||
|
if (ret) {
|
||||||
|
DRM_ERROR("Failed to create device file for VBIOS version\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1757,10 +1790,8 @@ static int amdgpu_fini(struct amdgpu_device *adev)
|
|||||||
adev->ip_blocks[i].status.late_initialized = false;
|
adev->ip_blocks[i].status.late_initialized = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev)) {
|
if (amdgpu_sriov_vf(adev))
|
||||||
amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
|
|
||||||
amdgpu_virt_release_full_gpu(adev, false);
|
amdgpu_virt_release_full_gpu(adev, false);
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -2051,9 +2082,8 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||||||
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
|
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
|
||||||
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
|
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
|
||||||
|
|
||||||
if (adev->asic_type >= CHIP_BONAIRE)
|
/* doorbell bar mapping */
|
||||||
/* doorbell bar mapping */
|
amdgpu_doorbell_init(adev);
|
||||||
amdgpu_doorbell_init(adev);
|
|
||||||
|
|
||||||
/* io port mapping */
|
/* io port mapping */
|
||||||
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
|
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
|
||||||
@ -2201,6 +2231,10 @@ int amdgpu_device_init(struct amdgpu_device *adev,
|
|||||||
if (r)
|
if (r)
|
||||||
DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
|
DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
|
||||||
|
|
||||||
|
r = amdgpu_debugfs_vbios_dump_init(adev);
|
||||||
|
if (r)
|
||||||
|
DRM_ERROR("Creating vbios dump debugfs failed (%d).\n", r);
|
||||||
|
|
||||||
if ((amdgpu_testing & 1)) {
|
if ((amdgpu_testing & 1)) {
|
||||||
if (adev->accel_working)
|
if (adev->accel_working)
|
||||||
amdgpu_test_moves(adev);
|
amdgpu_test_moves(adev);
|
||||||
@ -2276,8 +2310,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
|
|||||||
adev->rio_mem = NULL;
|
adev->rio_mem = NULL;
|
||||||
iounmap(adev->rmmio);
|
iounmap(adev->rmmio);
|
||||||
adev->rmmio = NULL;
|
adev->rmmio = NULL;
|
||||||
if (adev->asic_type >= CHIP_BONAIRE)
|
amdgpu_doorbell_fini(adev);
|
||||||
amdgpu_doorbell_fini(adev);
|
|
||||||
amdgpu_debugfs_regs_cleanup(adev);
|
amdgpu_debugfs_regs_cleanup(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2546,7 +2579,8 @@ static bool amdgpu_need_full_reset(struct amdgpu_device *adev)
|
|||||||
if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
|
if ((adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) ||
|
||||||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
|
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) ||
|
||||||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
|
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_ACP) ||
|
||||||
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)) {
|
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) ||
|
||||||
|
adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) {
|
||||||
if (adev->ip_blocks[i].status.hang) {
|
if (adev->ip_blocks[i].status.hang) {
|
||||||
DRM_INFO("Some block need full reset!\n");
|
DRM_INFO("Some block need full reset!\n");
|
||||||
return true;
|
return true;
|
||||||
@ -2654,7 +2688,7 @@ int amdgpu_sriov_gpu_reset(struct amdgpu_device *adev, struct amdgpu_job *job)
|
|||||||
|
|
||||||
mutex_lock(&adev->virt.lock_reset);
|
mutex_lock(&adev->virt.lock_reset);
|
||||||
atomic_inc(&adev->gpu_reset_counter);
|
atomic_inc(&adev->gpu_reset_counter);
|
||||||
adev->gfx.in_reset = true;
|
adev->in_sriov_reset = true;
|
||||||
|
|
||||||
/* block TTM */
|
/* block TTM */
|
||||||
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
|
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
|
||||||
@ -2765,7 +2799,7 @@ give_up_reset:
|
|||||||
dev_info(adev->dev, "GPU reset successed!\n");
|
dev_info(adev->dev, "GPU reset successed!\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
adev->gfx.in_reset = false;
|
adev->in_sriov_reset = false;
|
||||||
mutex_unlock(&adev->virt.lock_reset);
|
mutex_unlock(&adev->virt.lock_reset);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
@ -3463,10 +3497,7 @@ static ssize_t amdgpu_debugfs_sensor_read(struct file *f, char __user *buf,
|
|||||||
|
|
||||||
valuesize = sizeof(values);
|
valuesize = sizeof(values);
|
||||||
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
|
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor)
|
||||||
r = adev->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, idx, &values[0], &valuesize);
|
r = amdgpu_dpm_read_sensor(adev, idx, &values[0], &valuesize);
|
||||||
else if (adev->pm.funcs && adev->pm.funcs->read_sensor)
|
|
||||||
r = adev->pm.funcs->read_sensor(adev, idx, &values[0],
|
|
||||||
&valuesize);
|
|
||||||
else
|
else
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
@ -3754,6 +3785,28 @@ int amdgpu_debugfs_init(struct drm_minor *minor)
|
|||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int amdgpu_debugfs_get_vbios_dump(struct seq_file *m, void *data)
|
||||||
|
{
|
||||||
|
struct drm_info_node *node = (struct drm_info_node *) m->private;
|
||||||
|
struct drm_device *dev = node->minor->dev;
|
||||||
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
|
|
||||||
|
seq_write(m, adev->bios, adev->bios_size);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct drm_info_list amdgpu_vbios_dump_list[] = {
|
||||||
|
{"amdgpu_vbios",
|
||||||
|
amdgpu_debugfs_get_vbios_dump,
|
||||||
|
0, NULL},
|
||||||
|
};
|
||||||
|
|
||||||
|
static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
return amdgpu_debugfs_add_files(adev,
|
||||||
|
amdgpu_vbios_dump_list, 1);
|
||||||
|
}
|
||||||
#else
|
#else
|
||||||
static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
|
static int amdgpu_debugfs_test_ib_ring_init(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
@ -3763,5 +3816,9 @@ static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
|
|||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
static int amdgpu_debugfs_vbios_dump_init(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
|
static void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
|
||||||
#endif
|
#endif
|
||||||
|
@ -960,8 +960,10 @@ u8 amdgpu_encode_pci_lane_width(u32 lanes)
|
|||||||
}
|
}
|
||||||
|
|
||||||
struct amd_vce_state*
|
struct amd_vce_state*
|
||||||
amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx)
|
amdgpu_get_vce_clock_state(void *handle, u32 idx)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
if (idx < adev->pm.dpm.num_of_vce_states)
|
if (idx < adev->pm.dpm.num_of_vce_states)
|
||||||
return &adev->pm.dpm.vce_states[idx];
|
return &adev->pm.dpm.vce_states[idx];
|
||||||
|
|
||||||
|
@ -241,177 +241,119 @@ enum amdgpu_pcie_gen {
|
|||||||
AMDGPU_PCIE_GEN_INVALID = 0xffff
|
AMDGPU_PCIE_GEN_INVALID = 0xffff
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_dpm_funcs {
|
#define amdgpu_dpm_pre_set_power_state(adev) \
|
||||||
int (*get_temperature)(struct amdgpu_device *adev);
|
((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle))
|
||||||
int (*pre_set_power_state)(struct amdgpu_device *adev);
|
|
||||||
int (*set_power_state)(struct amdgpu_device *adev);
|
|
||||||
void (*post_set_power_state)(struct amdgpu_device *adev);
|
|
||||||
void (*display_configuration_changed)(struct amdgpu_device *adev);
|
|
||||||
u32 (*get_sclk)(struct amdgpu_device *adev, bool low);
|
|
||||||
u32 (*get_mclk)(struct amdgpu_device *adev, bool low);
|
|
||||||
void (*print_power_state)(struct amdgpu_device *adev, struct amdgpu_ps *ps);
|
|
||||||
void (*debugfs_print_current_performance_level)(struct amdgpu_device *adev, struct seq_file *m);
|
|
||||||
int (*force_performance_level)(struct amdgpu_device *adev, enum amd_dpm_forced_level level);
|
|
||||||
bool (*vblank_too_short)(struct amdgpu_device *adev);
|
|
||||||
void (*powergate_uvd)(struct amdgpu_device *adev, bool gate);
|
|
||||||
void (*powergate_vce)(struct amdgpu_device *adev, bool gate);
|
|
||||||
void (*enable_bapm)(struct amdgpu_device *adev, bool enable);
|
|
||||||
void (*set_fan_control_mode)(struct amdgpu_device *adev, u32 mode);
|
|
||||||
u32 (*get_fan_control_mode)(struct amdgpu_device *adev);
|
|
||||||
int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed);
|
|
||||||
int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed);
|
|
||||||
int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask);
|
|
||||||
int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf);
|
|
||||||
int (*get_sclk_od)(struct amdgpu_device *adev);
|
|
||||||
int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value);
|
|
||||||
int (*get_mclk_od)(struct amdgpu_device *adev);
|
|
||||||
int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value);
|
|
||||||
int (*check_state_equal)(struct amdgpu_device *adev,
|
|
||||||
struct amdgpu_ps *cps,
|
|
||||||
struct amdgpu_ps *rps,
|
|
||||||
bool *equal);
|
|
||||||
int (*read_sensor)(struct amdgpu_device *adev, int idx, void *value,
|
|
||||||
int *size);
|
|
||||||
|
|
||||||
struct amd_vce_state* (*get_vce_clock_state)(struct amdgpu_device *adev, unsigned idx);
|
#define amdgpu_dpm_set_power_state(adev) \
|
||||||
int (*reset_power_profile_state)(struct amdgpu_device *adev,
|
((adev)->powerplay.pp_funcs->set_power_state((adev)->powerplay.pp_handle))
|
||||||
struct amd_pp_profile *request);
|
|
||||||
int (*get_power_profile_state)(struct amdgpu_device *adev,
|
|
||||||
struct amd_pp_profile *query);
|
|
||||||
int (*set_power_profile_state)(struct amdgpu_device *adev,
|
|
||||||
struct amd_pp_profile *request);
|
|
||||||
int (*switch_power_profile)(struct amdgpu_device *adev,
|
|
||||||
enum amd_pp_profile_type type);
|
|
||||||
};
|
|
||||||
|
|
||||||
#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
|
#define amdgpu_dpm_post_set_power_state(adev) \
|
||||||
#define amdgpu_dpm_set_power_state(adev) (adev)->pm.funcs->set_power_state((adev))
|
((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle))
|
||||||
#define amdgpu_dpm_post_set_power_state(adev) (adev)->pm.funcs->post_set_power_state((adev))
|
|
||||||
#define amdgpu_dpm_display_configuration_changed(adev) (adev)->pm.funcs->display_configuration_changed((adev))
|
#define amdgpu_dpm_display_configuration_changed(adev) \
|
||||||
#define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps))
|
((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle))
|
||||||
#define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev))
|
|
||||||
#define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e))
|
#define amdgpu_dpm_print_power_state(adev, ps) \
|
||||||
|
((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps)))
|
||||||
|
|
||||||
|
#define amdgpu_dpm_vblank_too_short(adev) \
|
||||||
|
((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle))
|
||||||
|
|
||||||
|
#define amdgpu_dpm_enable_bapm(adev, e) \
|
||||||
|
((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
|
||||||
|
|
||||||
#define amdgpu_dpm_read_sensor(adev, idx, value, size) \
|
#define amdgpu_dpm_read_sensor(adev, idx, value, size) \
|
||||||
((adev)->pp_enabled ? \
|
((adev)->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, (idx), (value), (size)))
|
||||||
(adev)->powerplay.pp_funcs->read_sensor(adev->powerplay.pp_handle, (idx), (value), (size)) : \
|
|
||||||
(adev)->pm.funcs->read_sensor((adev), (idx), (value), (size)))
|
|
||||||
|
|
||||||
#define amdgpu_dpm_get_temperature(adev) \
|
#define amdgpu_dpm_get_temperature(adev) \
|
||||||
((adev)->pp_enabled ? \
|
((adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle))
|
||||||
(adev)->powerplay.pp_funcs->get_temperature((adev)->powerplay.pp_handle) : \
|
|
||||||
(adev)->pm.funcs->get_temperature((adev)))
|
|
||||||
|
|
||||||
#define amdgpu_dpm_set_fan_control_mode(adev, m) \
|
#define amdgpu_dpm_set_fan_control_mode(adev, m) \
|
||||||
((adev)->pp_enabled ? \
|
((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)))
|
||||||
(adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)) : \
|
|
||||||
(adev)->pm.funcs->set_fan_control_mode((adev), (m)))
|
|
||||||
|
|
||||||
#define amdgpu_dpm_get_fan_control_mode(adev) \
|
#define amdgpu_dpm_get_fan_control_mode(adev) \
|
||||||
((adev)->pp_enabled ? \
|
((adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle))
|
||||||
(adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle) : \
|
|
||||||
(adev)->pm.funcs->get_fan_control_mode((adev)))
|
|
||||||
|
|
||||||
#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
|
#define amdgpu_dpm_set_fan_speed_percent(adev, s) \
|
||||||
((adev)->pp_enabled ? \
|
((adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)))
|
||||||
(adev)->powerplay.pp_funcs->set_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
|
|
||||||
(adev)->pm.funcs->set_fan_speed_percent((adev), (s)))
|
|
||||||
|
|
||||||
#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
|
#define amdgpu_dpm_get_fan_speed_percent(adev, s) \
|
||||||
((adev)->pp_enabled ? \
|
((adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)))
|
||||||
(adev)->powerplay.pp_funcs->get_fan_speed_percent((adev)->powerplay.pp_handle, (s)) : \
|
|
||||||
(adev)->pm.funcs->get_fan_speed_percent((adev), (s)))
|
|
||||||
|
|
||||||
#define amdgpu_dpm_get_fan_speed_rpm(adev, s) \
|
#define amdgpu_dpm_get_fan_speed_rpm(adev, s) \
|
||||||
((adev)->pp_enabled ? \
|
((adev)->powerplay.pp_funcs->get_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
|
||||||
(adev)->powerplay.pp_funcs->get_fan_speed_rpm((adev)->powerplay.pp_handle, (s)) : \
|
|
||||||
-EINVAL)
|
|
||||||
|
|
||||||
#define amdgpu_dpm_get_sclk(adev, l) \
|
#define amdgpu_dpm_get_sclk(adev, l) \
|
||||||
((adev)->pp_enabled ? \
|
((adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)))
|
||||||
(adev)->powerplay.pp_funcs->get_sclk((adev)->powerplay.pp_handle, (l)) : \
|
|
||||||
(adev)->pm.funcs->get_sclk((adev), (l)))
|
|
||||||
|
|
||||||
#define amdgpu_dpm_get_mclk(adev, l) \
|
#define amdgpu_dpm_get_mclk(adev, l) \
|
||||||
((adev)->pp_enabled ? \
|
((adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)))
|
||||||
(adev)->powerplay.pp_funcs->get_mclk((adev)->powerplay.pp_handle, (l)) : \
|
|
||||||
(adev)->pm.funcs->get_mclk((adev), (l)))
|
|
||||||
|
|
||||||
|
|
||||||
#define amdgpu_dpm_force_performance_level(adev, l) \
|
#define amdgpu_dpm_force_performance_level(adev, l) \
|
||||||
((adev)->pp_enabled ? \
|
((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)))
|
||||||
(adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)) : \
|
|
||||||
(adev)->pm.funcs->force_performance_level((adev), (l)))
|
|
||||||
|
|
||||||
#define amdgpu_dpm_powergate_uvd(adev, g) \
|
#define amdgpu_dpm_powergate_uvd(adev, g) \
|
||||||
((adev)->pp_enabled ? \
|
((adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)))
|
||||||
(adev)->powerplay.pp_funcs->powergate_uvd((adev)->powerplay.pp_handle, (g)) : \
|
|
||||||
(adev)->pm.funcs->powergate_uvd((adev), (g)))
|
|
||||||
|
|
||||||
#define amdgpu_dpm_powergate_vce(adev, g) \
|
#define amdgpu_dpm_powergate_vce(adev, g) \
|
||||||
((adev)->pp_enabled ? \
|
((adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)))
|
||||||
(adev)->powerplay.pp_funcs->powergate_vce((adev)->powerplay.pp_handle, (g)) : \
|
|
||||||
(adev)->pm.funcs->powergate_vce((adev), (g)))
|
|
||||||
|
|
||||||
#define amdgpu_dpm_get_current_power_state(adev) \
|
#define amdgpu_dpm_get_current_power_state(adev) \
|
||||||
(adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)
|
((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
|
||||||
|
|
||||||
#define amdgpu_dpm_get_pp_num_states(adev, data) \
|
#define amdgpu_dpm_get_pp_num_states(adev, data) \
|
||||||
(adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)
|
((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data))
|
||||||
|
|
||||||
#define amdgpu_dpm_get_pp_table(adev, table) \
|
#define amdgpu_dpm_get_pp_table(adev, table) \
|
||||||
(adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)
|
((adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table))
|
||||||
|
|
||||||
#define amdgpu_dpm_set_pp_table(adev, buf, size) \
|
#define amdgpu_dpm_set_pp_table(adev, buf, size) \
|
||||||
(adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)
|
((adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size))
|
||||||
|
|
||||||
#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
|
#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
|
||||||
(adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)
|
((adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf))
|
||||||
|
|
||||||
#define amdgpu_dpm_force_clock_level(adev, type, level) \
|
#define amdgpu_dpm_force_clock_level(adev, type, level) \
|
||||||
(adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)
|
((adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level))
|
||||||
|
|
||||||
#define amdgpu_dpm_get_sclk_od(adev) \
|
#define amdgpu_dpm_get_sclk_od(adev) \
|
||||||
(adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)
|
((adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle))
|
||||||
|
|
||||||
#define amdgpu_dpm_set_sclk_od(adev, value) \
|
#define amdgpu_dpm_set_sclk_od(adev, value) \
|
||||||
(adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)
|
((adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value))
|
||||||
|
|
||||||
#define amdgpu_dpm_get_mclk_od(adev) \
|
#define amdgpu_dpm_get_mclk_od(adev) \
|
||||||
((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
|
((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
|
||||||
|
|
||||||
#define amdgpu_dpm_set_mclk_od(adev, value) \
|
#define amdgpu_dpm_set_mclk_od(adev, value) \
|
||||||
((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
|
((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
|
||||||
|
|
||||||
#define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \
|
#define amdgpu_dpm_dispatch_task(adev, task_id, input, output) \
|
||||||
(adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output))
|
((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (input), (output))
|
||||||
|
|
||||||
#define amgdpu_dpm_check_state_equal(adev, cps, rps, equal) (adev)->pm.funcs->check_state_equal((adev), (cps),(rps),(equal))
|
#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
|
||||||
|
((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
|
||||||
|
|
||||||
#define amdgpu_dpm_get_vce_clock_state(adev, i) \
|
#define amdgpu_dpm_get_vce_clock_state(adev, i) \
|
||||||
((adev)->pp_enabled ? \
|
((adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)))
|
||||||
(adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)) : \
|
|
||||||
(adev)->pm.funcs->get_vce_clock_state((adev), (i)))
|
|
||||||
|
|
||||||
#define amdgpu_dpm_get_performance_level(adev) \
|
#define amdgpu_dpm_get_performance_level(adev) \
|
||||||
((adev)->pp_enabled ? \
|
((adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle))
|
||||||
(adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle) : \
|
|
||||||
(adev)->pm.dpm.forced_level)
|
|
||||||
|
|
||||||
#define amdgpu_dpm_reset_power_profile_state(adev, request) \
|
#define amdgpu_dpm_reset_power_profile_state(adev, request) \
|
||||||
((adev)->powerplay.pp_funcs->reset_power_profile_state(\
|
((adev)->powerplay.pp_funcs->reset_power_profile_state(\
|
||||||
(adev)->powerplay.pp_handle, request))
|
(adev)->powerplay.pp_handle, request))
|
||||||
|
|
||||||
#define amdgpu_dpm_get_power_profile_state(adev, query) \
|
#define amdgpu_dpm_get_power_profile_state(adev, query) \
|
||||||
((adev)->powerplay.pp_funcs->get_power_profile_state(\
|
((adev)->powerplay.pp_funcs->get_power_profile_state(\
|
||||||
(adev)->powerplay.pp_handle, query))
|
(adev)->powerplay.pp_handle, query))
|
||||||
|
|
||||||
#define amdgpu_dpm_set_power_profile_state(adev, request) \
|
#define amdgpu_dpm_set_power_profile_state(adev, request) \
|
||||||
((adev)->powerplay.pp_funcs->set_power_profile_state(\
|
((adev)->powerplay.pp_funcs->set_power_profile_state(\
|
||||||
(adev)->powerplay.pp_handle, request))
|
(adev)->powerplay.pp_handle, request))
|
||||||
|
|
||||||
#define amdgpu_dpm_switch_power_profile(adev, type) \
|
#define amdgpu_dpm_switch_power_profile(adev, type) \
|
||||||
((adev)->powerplay.pp_funcs->switch_power_profile(\
|
((adev)->powerplay.pp_funcs->switch_power_profile(\
|
||||||
(adev)->powerplay.pp_handle, type))
|
(adev)->powerplay.pp_handle, type))
|
||||||
|
|
||||||
struct amdgpu_dpm {
|
struct amdgpu_dpm {
|
||||||
@ -485,7 +427,6 @@ struct amdgpu_pm {
|
|||||||
struct amdgpu_dpm dpm;
|
struct amdgpu_dpm dpm;
|
||||||
const struct firmware *fw; /* SMC firmware */
|
const struct firmware *fw; /* SMC firmware */
|
||||||
uint32_t fw_version;
|
uint32_t fw_version;
|
||||||
const struct amdgpu_dpm_funcs *funcs;
|
|
||||||
uint32_t pcie_gen_mask;
|
uint32_t pcie_gen_mask;
|
||||||
uint32_t pcie_mlw_mask;
|
uint32_t pcie_mlw_mask;
|
||||||
struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
|
struct amd_pp_display_configuration pm_display_cfg;/* set by DAL */
|
||||||
@ -551,6 +492,6 @@ u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
|
|||||||
u8 amdgpu_encode_pci_lane_width(u32 lanes);
|
u8 amdgpu_encode_pci_lane_width(u32 lanes);
|
||||||
|
|
||||||
struct amd_vce_state*
|
struct amd_vce_state*
|
||||||
amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx);
|
amdgpu_get_vce_clock_state(void *handle, u32 idx);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -69,9 +69,10 @@
|
|||||||
* - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS.
|
* - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS.
|
||||||
* - 3.18.0 - Export gpu always on cu bitmap
|
* - 3.18.0 - Export gpu always on cu bitmap
|
||||||
* - 3.19.0 - Add support for UVD MJPEG decode
|
* - 3.19.0 - Add support for UVD MJPEG decode
|
||||||
|
* - 3.20.0 - Add support for local BOs
|
||||||
*/
|
*/
|
||||||
#define KMS_DRIVER_MAJOR 3
|
#define KMS_DRIVER_MAJOR 3
|
||||||
#define KMS_DRIVER_MINOR 19
|
#define KMS_DRIVER_MINOR 20
|
||||||
#define KMS_DRIVER_PATCHLEVEL 0
|
#define KMS_DRIVER_PATCHLEVEL 0
|
||||||
|
|
||||||
int amdgpu_vram_limit = 0;
|
int amdgpu_vram_limit = 0;
|
||||||
@ -91,7 +92,7 @@ int amdgpu_dpm = -1;
|
|||||||
int amdgpu_fw_load_type = -1;
|
int amdgpu_fw_load_type = -1;
|
||||||
int amdgpu_aspm = -1;
|
int amdgpu_aspm = -1;
|
||||||
int amdgpu_runtime_pm = -1;
|
int amdgpu_runtime_pm = -1;
|
||||||
unsigned amdgpu_ip_block_mask = 0xffffffff;
|
uint amdgpu_ip_block_mask = 0xffffffff;
|
||||||
int amdgpu_bapm = -1;
|
int amdgpu_bapm = -1;
|
||||||
int amdgpu_deep_color = 0;
|
int amdgpu_deep_color = 0;
|
||||||
int amdgpu_vm_size = -1;
|
int amdgpu_vm_size = -1;
|
||||||
@ -106,14 +107,14 @@ int amdgpu_sched_jobs = 32;
|
|||||||
int amdgpu_sched_hw_submission = 2;
|
int amdgpu_sched_hw_submission = 2;
|
||||||
int amdgpu_no_evict = 0;
|
int amdgpu_no_evict = 0;
|
||||||
int amdgpu_direct_gma_size = 0;
|
int amdgpu_direct_gma_size = 0;
|
||||||
unsigned amdgpu_pcie_gen_cap = 0;
|
uint amdgpu_pcie_gen_cap = 0;
|
||||||
unsigned amdgpu_pcie_lane_cap = 0;
|
uint amdgpu_pcie_lane_cap = 0;
|
||||||
unsigned amdgpu_cg_mask = 0xffffffff;
|
uint amdgpu_cg_mask = 0xffffffff;
|
||||||
unsigned amdgpu_pg_mask = 0xffffffff;
|
uint amdgpu_pg_mask = 0xffffffff;
|
||||||
unsigned amdgpu_sdma_phase_quantum = 32;
|
uint amdgpu_sdma_phase_quantum = 32;
|
||||||
char *amdgpu_disable_cu = NULL;
|
char *amdgpu_disable_cu = NULL;
|
||||||
char *amdgpu_virtual_display = NULL;
|
char *amdgpu_virtual_display = NULL;
|
||||||
unsigned amdgpu_pp_feature_mask = 0xffffffff;
|
uint amdgpu_pp_feature_mask = 0xffffffff;
|
||||||
int amdgpu_ngg = 0;
|
int amdgpu_ngg = 0;
|
||||||
int amdgpu_prim_buf_per_se = 0;
|
int amdgpu_prim_buf_per_se = 0;
|
||||||
int amdgpu_pos_buf_per_se = 0;
|
int amdgpu_pos_buf_per_se = 0;
|
||||||
@ -608,6 +609,8 @@ amdgpu_pci_remove(struct pci_dev *pdev)
|
|||||||
|
|
||||||
drm_dev_unregister(dev);
|
drm_dev_unregister(dev);
|
||||||
drm_dev_unref(dev);
|
drm_dev_unref(dev);
|
||||||
|
pci_disable_device(pdev);
|
||||||
|
pci_set_drvdata(pdev, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
@ -852,6 +855,7 @@ static struct drm_driver kms_driver = {
|
|||||||
.gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
|
.gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table,
|
||||||
.gem_prime_vmap = amdgpu_gem_prime_vmap,
|
.gem_prime_vmap = amdgpu_gem_prime_vmap,
|
||||||
.gem_prime_vunmap = amdgpu_gem_prime_vunmap,
|
.gem_prime_vunmap = amdgpu_gem_prime_vunmap,
|
||||||
|
.gem_prime_mmap = amdgpu_gem_prime_mmap,
|
||||||
|
|
||||||
.name = DRIVER_NAME,
|
.name = DRIVER_NAME,
|
||||||
.desc = DRIVER_DESC,
|
.desc = DRIVER_DESC,
|
||||||
|
@ -149,7 +149,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
|
|||||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
|
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
|
||||||
AMDGPU_GEM_CREATE_VRAM_CLEARED,
|
AMDGPU_GEM_CREATE_VRAM_CLEARED,
|
||||||
true, &gobj);
|
true, NULL, &gobj);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
|
pr_err("failed to allocate framebuffer (%d)\n", aligned_size);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -303,10 +303,10 @@ static int amdgpu_fbdev_destroy(struct drm_device *dev, struct amdgpu_fbdev *rfb
|
|||||||
if (rfb->obj) {
|
if (rfb->obj) {
|
||||||
amdgpufb_destroy_pinned_object(rfb->obj);
|
amdgpufb_destroy_pinned_object(rfb->obj);
|
||||||
rfb->obj = NULL;
|
rfb->obj = NULL;
|
||||||
|
drm_framebuffer_unregister_private(&rfb->base);
|
||||||
|
drm_framebuffer_cleanup(&rfb->base);
|
||||||
}
|
}
|
||||||
drm_fb_helper_fini(&rfbdev->helper);
|
drm_fb_helper_fini(&rfbdev->helper);
|
||||||
drm_framebuffer_unregister_private(&rfb->base);
|
|
||||||
drm_framebuffer_cleanup(&rfb->base);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -44,11 +44,12 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
|
|||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
|
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
|
||||||
int alignment, u32 initial_domain,
|
int alignment, u32 initial_domain,
|
||||||
u64 flags, bool kernel,
|
u64 flags, bool kernel,
|
||||||
struct drm_gem_object **obj)
|
struct reservation_object *resv,
|
||||||
|
struct drm_gem_object **obj)
|
||||||
{
|
{
|
||||||
struct amdgpu_bo *robj;
|
struct amdgpu_bo *bo;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
*obj = NULL;
|
*obj = NULL;
|
||||||
@ -59,7 +60,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
|
|||||||
|
|
||||||
retry:
|
retry:
|
||||||
r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
|
r = amdgpu_bo_create(adev, size, alignment, kernel, initial_domain,
|
||||||
flags, NULL, NULL, 0, &robj);
|
flags, NULL, resv, 0, &bo);
|
||||||
if (r) {
|
if (r) {
|
||||||
if (r != -ERESTARTSYS) {
|
if (r != -ERESTARTSYS) {
|
||||||
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
|
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
|
||||||
@ -71,7 +72,7 @@ retry:
|
|||||||
}
|
}
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
*obj = &robj->gem_base;
|
*obj = &bo->gem_base;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -112,7 +113,17 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
|
|||||||
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
|
struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
|
||||||
struct amdgpu_vm *vm = &fpriv->vm;
|
struct amdgpu_vm *vm = &fpriv->vm;
|
||||||
struct amdgpu_bo_va *bo_va;
|
struct amdgpu_bo_va *bo_va;
|
||||||
|
struct mm_struct *mm;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
|
mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm);
|
||||||
|
if (mm && mm != current->mm)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
|
if (abo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID &&
|
||||||
|
abo->tbo.resv != vm->root.base.bo->tbo.resv)
|
||||||
|
return -EPERM;
|
||||||
|
|
||||||
r = amdgpu_bo_reserve(abo, false);
|
r = amdgpu_bo_reserve(abo, false);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
@ -127,35 +138,6 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int amdgpu_gem_vm_check(void *param, struct amdgpu_bo *bo)
|
|
||||||
{
|
|
||||||
/* if anything is swapped out don't swap it in here,
|
|
||||||
just abort and wait for the next CS */
|
|
||||||
if (!amdgpu_bo_gpu_accessible(bo))
|
|
||||||
return -ERESTARTSYS;
|
|
||||||
|
|
||||||
if (bo->shadow && !amdgpu_bo_gpu_accessible(bo->shadow))
|
|
||||||
return -ERESTARTSYS;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool amdgpu_gem_vm_ready(struct amdgpu_device *adev,
|
|
||||||
struct amdgpu_vm *vm,
|
|
||||||
struct list_head *list)
|
|
||||||
{
|
|
||||||
struct ttm_validate_buffer *entry;
|
|
||||||
|
|
||||||
list_for_each_entry(entry, list, head) {
|
|
||||||
struct amdgpu_bo *bo =
|
|
||||||
container_of(entry->bo, struct amdgpu_bo, tbo);
|
|
||||||
if (amdgpu_gem_vm_check(NULL, bo))
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return !amdgpu_vm_validate_pt_bos(adev, vm, amdgpu_gem_vm_check, NULL);
|
|
||||||
}
|
|
||||||
|
|
||||||
void amdgpu_gem_object_close(struct drm_gem_object *obj,
|
void amdgpu_gem_object_close(struct drm_gem_object *obj,
|
||||||
struct drm_file *file_priv)
|
struct drm_file *file_priv)
|
||||||
{
|
{
|
||||||
@ -165,13 +147,14 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
|
|||||||
struct amdgpu_vm *vm = &fpriv->vm;
|
struct amdgpu_vm *vm = &fpriv->vm;
|
||||||
|
|
||||||
struct amdgpu_bo_list_entry vm_pd;
|
struct amdgpu_bo_list_entry vm_pd;
|
||||||
struct list_head list;
|
struct list_head list, duplicates;
|
||||||
struct ttm_validate_buffer tv;
|
struct ttm_validate_buffer tv;
|
||||||
struct ww_acquire_ctx ticket;
|
struct ww_acquire_ctx ticket;
|
||||||
struct amdgpu_bo_va *bo_va;
|
struct amdgpu_bo_va *bo_va;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&list);
|
INIT_LIST_HEAD(&list);
|
||||||
|
INIT_LIST_HEAD(&duplicates);
|
||||||
|
|
||||||
tv.bo = &bo->tbo;
|
tv.bo = &bo->tbo;
|
||||||
tv.shared = true;
|
tv.shared = true;
|
||||||
@ -179,7 +162,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
|
|||||||
|
|
||||||
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
|
amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
|
||||||
|
|
||||||
r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
|
r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
|
||||||
if (r) {
|
if (r) {
|
||||||
dev_err(adev->dev, "leaking bo va because "
|
dev_err(adev->dev, "leaking bo va because "
|
||||||
"we fail to reserve bo (%d)\n", r);
|
"we fail to reserve bo (%d)\n", r);
|
||||||
@ -189,7 +172,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
|
|||||||
if (bo_va && --bo_va->ref_count == 0) {
|
if (bo_va && --bo_va->ref_count == 0) {
|
||||||
amdgpu_vm_bo_rmv(adev, bo_va);
|
amdgpu_vm_bo_rmv(adev, bo_va);
|
||||||
|
|
||||||
if (amdgpu_gem_vm_ready(adev, vm, &list)) {
|
if (amdgpu_vm_ready(vm)) {
|
||||||
struct dma_fence *fence = NULL;
|
struct dma_fence *fence = NULL;
|
||||||
|
|
||||||
r = amdgpu_vm_clear_freed(adev, vm, &fence);
|
r = amdgpu_vm_clear_freed(adev, vm, &fence);
|
||||||
@ -214,18 +197,22 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
|||||||
struct drm_file *filp)
|
struct drm_file *filp)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = dev->dev_private;
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
|
struct amdgpu_fpriv *fpriv = filp->driver_priv;
|
||||||
|
struct amdgpu_vm *vm = &fpriv->vm;
|
||||||
union drm_amdgpu_gem_create *args = data;
|
union drm_amdgpu_gem_create *args = data;
|
||||||
|
uint64_t flags = args->in.domain_flags;
|
||||||
uint64_t size = args->in.bo_size;
|
uint64_t size = args->in.bo_size;
|
||||||
|
struct reservation_object *resv = NULL;
|
||||||
struct drm_gem_object *gobj;
|
struct drm_gem_object *gobj;
|
||||||
uint32_t handle;
|
uint32_t handle;
|
||||||
bool kernel = false;
|
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
/* reject invalid gem flags */
|
/* reject invalid gem flags */
|
||||||
if (args->in.domain_flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
if (flags & ~(AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
|
||||||
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
|
AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
|
||||||
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
|
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
|
||||||
AMDGPU_GEM_CREATE_VRAM_CLEARED))
|
AMDGPU_GEM_CREATE_VRAM_CLEARED |
|
||||||
|
AMDGPU_GEM_CREATE_VM_ALWAYS_VALID))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
/* reject invalid gem domains */
|
/* reject invalid gem domains */
|
||||||
@ -240,7 +227,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
|||||||
/* create a gem object to contain this object in */
|
/* create a gem object to contain this object in */
|
||||||
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
|
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
|
||||||
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
|
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
|
||||||
kernel = true;
|
flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
|
||||||
if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
|
if (args->in.domains == AMDGPU_GEM_DOMAIN_GDS)
|
||||||
size = size << AMDGPU_GDS_SHIFT;
|
size = size << AMDGPU_GDS_SHIFT;
|
||||||
else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
|
else if (args->in.domains == AMDGPU_GEM_DOMAIN_GWS)
|
||||||
@ -252,10 +239,25 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
|||||||
}
|
}
|
||||||
size = roundup(size, PAGE_SIZE);
|
size = roundup(size, PAGE_SIZE);
|
||||||
|
|
||||||
|
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
|
||||||
|
r = amdgpu_bo_reserve(vm->root.base.bo, false);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
resv = vm->root.base.bo->tbo.resv;
|
||||||
|
}
|
||||||
|
|
||||||
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
|
r = amdgpu_gem_object_create(adev, size, args->in.alignment,
|
||||||
(u32)(0xffffffff & args->in.domains),
|
(u32)(0xffffffff & args->in.domains),
|
||||||
args->in.domain_flags,
|
flags, false, resv, &gobj);
|
||||||
kernel, &gobj);
|
if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) {
|
||||||
|
if (!r) {
|
||||||
|
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
|
||||||
|
|
||||||
|
abo->parent = amdgpu_bo_ref(vm->root.base.bo);
|
||||||
|
}
|
||||||
|
amdgpu_bo_unreserve(vm->root.base.bo);
|
||||||
|
}
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
@ -297,9 +299,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* create a gem object to contain this object in */
|
/* create a gem object to contain this object in */
|
||||||
r = amdgpu_gem_object_create(adev, args->size, 0,
|
r = amdgpu_gem_object_create(adev, args->size, 0, AMDGPU_GEM_DOMAIN_CPU,
|
||||||
AMDGPU_GEM_DOMAIN_CPU, 0,
|
0, 0, NULL, &gobj);
|
||||||
0, &gobj);
|
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
@ -317,8 +318,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
|
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
|
||||||
down_read(¤t->mm->mmap_sem);
|
|
||||||
|
|
||||||
r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
|
r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
|
||||||
bo->tbo.ttm->pages);
|
bo->tbo.ttm->pages);
|
||||||
if (r)
|
if (r)
|
||||||
@ -333,8 +332,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
|
|||||||
amdgpu_bo_unreserve(bo);
|
amdgpu_bo_unreserve(bo);
|
||||||
if (r)
|
if (r)
|
||||||
goto free_pages;
|
goto free_pages;
|
||||||
|
|
||||||
up_read(¤t->mm->mmap_sem);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
r = drm_gem_handle_create(filp, gobj, &handle);
|
r = drm_gem_handle_create(filp, gobj, &handle);
|
||||||
@ -511,10 +508,10 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
|
|||||||
struct list_head *list,
|
struct list_head *list,
|
||||||
uint32_t operation)
|
uint32_t operation)
|
||||||
{
|
{
|
||||||
int r = -ERESTARTSYS;
|
int r;
|
||||||
|
|
||||||
if (!amdgpu_gem_vm_ready(adev, vm, list))
|
if (!amdgpu_vm_ready(vm))
|
||||||
goto error;
|
return;
|
||||||
|
|
||||||
r = amdgpu_vm_update_directories(adev, vm);
|
r = amdgpu_vm_update_directories(adev, vm);
|
||||||
if (r)
|
if (r)
|
||||||
@ -551,7 +548,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||||||
struct amdgpu_bo_list_entry vm_pd;
|
struct amdgpu_bo_list_entry vm_pd;
|
||||||
struct ttm_validate_buffer tv;
|
struct ttm_validate_buffer tv;
|
||||||
struct ww_acquire_ctx ticket;
|
struct ww_acquire_ctx ticket;
|
||||||
struct list_head list;
|
struct list_head list, duplicates;
|
||||||
uint64_t va_flags;
|
uint64_t va_flags;
|
||||||
int r = 0;
|
int r = 0;
|
||||||
|
|
||||||
@ -587,6 +584,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||||||
}
|
}
|
||||||
|
|
||||||
INIT_LIST_HEAD(&list);
|
INIT_LIST_HEAD(&list);
|
||||||
|
INIT_LIST_HEAD(&duplicates);
|
||||||
if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
|
if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
|
||||||
!(args->flags & AMDGPU_VM_PAGE_PRT)) {
|
!(args->flags & AMDGPU_VM_PAGE_PRT)) {
|
||||||
gobj = drm_gem_object_lookup(filp, args->handle);
|
gobj = drm_gem_object_lookup(filp, args->handle);
|
||||||
@ -603,7 +601,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
|
|||||||
|
|
||||||
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
|
amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd);
|
||||||
|
|
||||||
r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
|
r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
|
||||||
if (r)
|
if (r)
|
||||||
goto error_unref;
|
goto error_unref;
|
||||||
|
|
||||||
@ -669,6 +667,7 @@ error_unref:
|
|||||||
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
|
int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
|
||||||
struct drm_file *filp)
|
struct drm_file *filp)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = dev->dev_private;
|
||||||
struct drm_amdgpu_gem_op *args = data;
|
struct drm_amdgpu_gem_op *args = data;
|
||||||
struct drm_gem_object *gobj;
|
struct drm_gem_object *gobj;
|
||||||
struct amdgpu_bo *robj;
|
struct amdgpu_bo *robj;
|
||||||
@ -716,6 +715,9 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
|
|||||||
if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
|
if (robj->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
|
||||||
robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
|
robj->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
|
||||||
|
|
||||||
|
if (robj->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
|
||||||
|
amdgpu_vm_bo_invalidate(adev, robj, true);
|
||||||
|
|
||||||
amdgpu_bo_unreserve(robj);
|
amdgpu_bo_unreserve(robj);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@ -745,8 +747,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv,
|
|||||||
r = amdgpu_gem_object_create(adev, args->size, 0,
|
r = amdgpu_gem_object_create(adev, args->size, 0,
|
||||||
AMDGPU_GEM_DOMAIN_VRAM,
|
AMDGPU_GEM_DOMAIN_VRAM,
|
||||||
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED,
|
||||||
ttm_bo_type_device,
|
false, NULL, &gobj);
|
||||||
&gobj);
|
|
||||||
if (r)
|
if (r)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
@ -260,8 +260,13 @@ int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev,
|
|||||||
/* create MQD for KIQ */
|
/* create MQD for KIQ */
|
||||||
ring = &adev->gfx.kiq.ring;
|
ring = &adev->gfx.kiq.ring;
|
||||||
if (!ring->mqd_obj) {
|
if (!ring->mqd_obj) {
|
||||||
|
/* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
|
||||||
|
* otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
|
||||||
|
* deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
|
||||||
|
* KIQ MQD no matter SRIOV or Bare-metal
|
||||||
|
*/
|
||||||
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
|
r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
|
||||||
AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
|
AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj,
|
||||||
&ring->mqd_gpu_addr, &ring->mqd_ptr);
|
&ring->mqd_gpu_addr, &ring->mqd_ptr);
|
||||||
if (r) {
|
if (r) {
|
||||||
dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
|
dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
|
||||||
|
@ -169,6 +169,12 @@ restart_ih:
|
|||||||
while (adev->irq.ih.rptr != wptr) {
|
while (adev->irq.ih.rptr != wptr) {
|
||||||
u32 ring_index = adev->irq.ih.rptr >> 2;
|
u32 ring_index = adev->irq.ih.rptr >> 2;
|
||||||
|
|
||||||
|
/* Prescreening of high-frequency interrupts */
|
||||||
|
if (!amdgpu_ih_prescreen_iv(adev)) {
|
||||||
|
adev->irq.ih.rptr &= adev->irq.ih.ptr_mask;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
/* Before dispatching irq to IP blocks, send it to amdkfd */
|
/* Before dispatching irq to IP blocks, send it to amdkfd */
|
||||||
amdgpu_amdkfd_interrupt(adev,
|
amdgpu_amdkfd_interrupt(adev,
|
||||||
(const void *) &adev->irq.ih.ring[ring_index]);
|
(const void *) &adev->irq.ih.ring[ring_index]);
|
||||||
@ -190,3 +196,79 @@ restart_ih:
|
|||||||
|
|
||||||
return IRQ_HANDLED;
|
return IRQ_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_ih_add_fault - Add a page fault record
|
||||||
|
*
|
||||||
|
* @adev: amdgpu device pointer
|
||||||
|
* @key: 64-bit encoding of PASID and address
|
||||||
|
*
|
||||||
|
* This should be called when a retry page fault interrupt is
|
||||||
|
* received. If this is a new page fault, it will be added to a hash
|
||||||
|
* table. The return value indicates whether this is a new fault, or
|
||||||
|
* a fault that was already known and is already being handled.
|
||||||
|
*
|
||||||
|
* If there are too many pending page faults, this will fail. Retry
|
||||||
|
* interrupts should be ignored in this case until there is enough
|
||||||
|
* free space.
|
||||||
|
*
|
||||||
|
* Returns 0 if the fault was added, 1 if the fault was already known,
|
||||||
|
* -ENOSPC if there are too many pending faults.
|
||||||
|
*/
|
||||||
|
int amdgpu_ih_add_fault(struct amdgpu_device *adev, u64 key)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
int r = -ENOSPC;
|
||||||
|
|
||||||
|
if (WARN_ON_ONCE(!adev->irq.ih.faults))
|
||||||
|
/* Should be allocated in <IP>_ih_sw_init on GPUs that
|
||||||
|
* support retry faults and require retry filtering.
|
||||||
|
*/
|
||||||
|
return r;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&adev->irq.ih.faults->lock, flags);
|
||||||
|
|
||||||
|
/* Only let the hash table fill up to 50% for best performance */
|
||||||
|
if (adev->irq.ih.faults->count >= (1 << (AMDGPU_PAGEFAULT_HASH_BITS-1)))
|
||||||
|
goto unlock_out;
|
||||||
|
|
||||||
|
r = chash_table_copy_in(&adev->irq.ih.faults->hash, key, NULL);
|
||||||
|
if (!r)
|
||||||
|
adev->irq.ih.faults->count++;
|
||||||
|
|
||||||
|
/* chash_table_copy_in should never fail unless we're losing count */
|
||||||
|
WARN_ON_ONCE(r < 0);
|
||||||
|
|
||||||
|
unlock_out:
|
||||||
|
spin_unlock_irqrestore(&adev->irq.ih.faults->lock, flags);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_ih_clear_fault - Remove a page fault record
|
||||||
|
*
|
||||||
|
* @adev: amdgpu device pointer
|
||||||
|
* @key: 64-bit encoding of PASID and address
|
||||||
|
*
|
||||||
|
* This should be called when a page fault has been handled. Any
|
||||||
|
* future interrupt with this key will be processed as a new
|
||||||
|
* page fault.
|
||||||
|
*/
|
||||||
|
void amdgpu_ih_clear_fault(struct amdgpu_device *adev, u64 key)
|
||||||
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
if (!adev->irq.ih.faults)
|
||||||
|
return;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&adev->irq.ih.faults->lock, flags);
|
||||||
|
|
||||||
|
r = chash_table_remove(&adev->irq.ih.faults->hash, key, NULL);
|
||||||
|
if (!WARN_ON_ONCE(r < 0)) {
|
||||||
|
adev->irq.ih.faults->count--;
|
||||||
|
WARN_ON_ONCE(adev->irq.ih.faults->count < 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
spin_unlock_irqrestore(&adev->irq.ih.faults->lock, flags);
|
||||||
|
}
|
||||||
|
@ -24,6 +24,8 @@
|
|||||||
#ifndef __AMDGPU_IH_H__
|
#ifndef __AMDGPU_IH_H__
|
||||||
#define __AMDGPU_IH_H__
|
#define __AMDGPU_IH_H__
|
||||||
|
|
||||||
|
#include <linux/chash.h>
|
||||||
|
|
||||||
struct amdgpu_device;
|
struct amdgpu_device;
|
||||||
/*
|
/*
|
||||||
* vega10+ IH clients
|
* vega10+ IH clients
|
||||||
@ -69,6 +71,13 @@ enum amdgpu_ih_clientid
|
|||||||
|
|
||||||
#define AMDGPU_IH_CLIENTID_LEGACY 0
|
#define AMDGPU_IH_CLIENTID_LEGACY 0
|
||||||
|
|
||||||
|
#define AMDGPU_PAGEFAULT_HASH_BITS 8
|
||||||
|
struct amdgpu_retryfault_hashtable {
|
||||||
|
DECLARE_CHASH_TABLE(hash, AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
|
||||||
|
spinlock_t lock;
|
||||||
|
int count;
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* R6xx+ IH ring
|
* R6xx+ IH ring
|
||||||
*/
|
*/
|
||||||
@ -87,6 +96,7 @@ struct amdgpu_ih_ring {
|
|||||||
bool use_doorbell;
|
bool use_doorbell;
|
||||||
bool use_bus_addr;
|
bool use_bus_addr;
|
||||||
dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */
|
dma_addr_t rb_dma_addr; /* only used when use_bus_addr = true */
|
||||||
|
struct amdgpu_retryfault_hashtable *faults;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define AMDGPU_IH_SRC_DATA_MAX_SIZE_DW 4
|
#define AMDGPU_IH_SRC_DATA_MAX_SIZE_DW 4
|
||||||
@ -109,5 +119,7 @@ int amdgpu_ih_ring_init(struct amdgpu_device *adev, unsigned ring_size,
|
|||||||
bool use_bus_addr);
|
bool use_bus_addr);
|
||||||
void amdgpu_ih_ring_fini(struct amdgpu_device *adev);
|
void amdgpu_ih_ring_fini(struct amdgpu_device *adev);
|
||||||
int amdgpu_ih_process(struct amdgpu_device *adev);
|
int amdgpu_ih_process(struct amdgpu_device *adev);
|
||||||
|
int amdgpu_ih_add_fault(struct amdgpu_device *adev, u64 key);
|
||||||
|
void amdgpu_ih_clear_fault(struct amdgpu_device *adev, u64 key);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -825,7 +825,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
|||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_vm_init(adev, &fpriv->vm,
|
r = amdgpu_vm_init(adev, &fpriv->vm,
|
||||||
AMDGPU_VM_CONTEXT_GFX);
|
AMDGPU_VM_CONTEXT_GFX, 0);
|
||||||
if (r) {
|
if (r) {
|
||||||
kfree(fpriv);
|
kfree(fpriv);
|
||||||
goto out_suspend;
|
goto out_suspend;
|
||||||
@ -841,8 +841,11 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
|
|||||||
|
|
||||||
if (amdgpu_sriov_vf(adev)) {
|
if (amdgpu_sriov_vf(adev)) {
|
||||||
r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
|
r = amdgpu_map_static_csa(adev, &fpriv->vm, &fpriv->csa_va);
|
||||||
if (r)
|
if (r) {
|
||||||
|
amdgpu_vm_fini(adev, &fpriv->vm);
|
||||||
|
kfree(fpriv);
|
||||||
goto out_suspend;
|
goto out_suspend;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_init(&fpriv->bo_list_lock);
|
mutex_init(&fpriv->bo_list_lock);
|
||||||
|
@ -50,8 +50,10 @@ struct amdgpu_mn {
|
|||||||
struct hlist_node node;
|
struct hlist_node node;
|
||||||
|
|
||||||
/* objects protected by lock */
|
/* objects protected by lock */
|
||||||
struct mutex lock;
|
struct rw_semaphore lock;
|
||||||
struct rb_root_cached objects;
|
struct rb_root_cached objects;
|
||||||
|
struct mutex read_lock;
|
||||||
|
atomic_t recursion;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_mn_node {
|
struct amdgpu_mn_node {
|
||||||
@ -74,7 +76,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
|
|||||||
struct amdgpu_bo *bo, *next_bo;
|
struct amdgpu_bo *bo, *next_bo;
|
||||||
|
|
||||||
mutex_lock(&adev->mn_lock);
|
mutex_lock(&adev->mn_lock);
|
||||||
mutex_lock(&rmn->lock);
|
down_write(&rmn->lock);
|
||||||
hash_del(&rmn->node);
|
hash_del(&rmn->node);
|
||||||
rbtree_postorder_for_each_entry_safe(node, next_node,
|
rbtree_postorder_for_each_entry_safe(node, next_node,
|
||||||
&rmn->objects.rb_root, it.rb) {
|
&rmn->objects.rb_root, it.rb) {
|
||||||
@ -84,7 +86,7 @@ static void amdgpu_mn_destroy(struct work_struct *work)
|
|||||||
}
|
}
|
||||||
kfree(node);
|
kfree(node);
|
||||||
}
|
}
|
||||||
mutex_unlock(&rmn->lock);
|
up_write(&rmn->lock);
|
||||||
mutex_unlock(&adev->mn_lock);
|
mutex_unlock(&adev->mn_lock);
|
||||||
mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
|
mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
|
||||||
kfree(rmn);
|
kfree(rmn);
|
||||||
@ -106,6 +108,53 @@ static void amdgpu_mn_release(struct mmu_notifier *mn,
|
|||||||
schedule_work(&rmn->work);
|
schedule_work(&rmn->work);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_mn_lock - take the write side lock for this mn
|
||||||
|
*/
|
||||||
|
void amdgpu_mn_lock(struct amdgpu_mn *mn)
|
||||||
|
{
|
||||||
|
if (mn)
|
||||||
|
down_write(&mn->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_mn_unlock - drop the write side lock for this mn
|
||||||
|
*/
|
||||||
|
void amdgpu_mn_unlock(struct amdgpu_mn *mn)
|
||||||
|
{
|
||||||
|
if (mn)
|
||||||
|
up_write(&mn->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_mn_read_lock - take the rmn read lock
|
||||||
|
*
|
||||||
|
* @rmn: our notifier
|
||||||
|
*
|
||||||
|
* Take the rmn read side lock.
|
||||||
|
*/
|
||||||
|
static void amdgpu_mn_read_lock(struct amdgpu_mn *rmn)
|
||||||
|
{
|
||||||
|
mutex_lock(&rmn->read_lock);
|
||||||
|
if (atomic_inc_return(&rmn->recursion) == 1)
|
||||||
|
down_read_non_owner(&rmn->lock);
|
||||||
|
mutex_unlock(&rmn->read_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* amdgpu_mn_read_unlock - drop the rmn read lock
|
||||||
|
*
|
||||||
|
* @rmn: our notifier
|
||||||
|
*
|
||||||
|
* Drop the rmn read side lock.
|
||||||
|
*/
|
||||||
|
static void amdgpu_mn_read_unlock(struct amdgpu_mn *rmn)
|
||||||
|
{
|
||||||
|
if (atomic_dec_return(&rmn->recursion) == 0)
|
||||||
|
up_read_non_owner(&rmn->lock);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_mn_invalidate_node - unmap all BOs of a node
|
* amdgpu_mn_invalidate_node - unmap all BOs of a node
|
||||||
*
|
*
|
||||||
@ -126,23 +175,12 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
|
|||||||
if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
|
if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
r = amdgpu_bo_reserve(bo, true);
|
|
||||||
if (r) {
|
|
||||||
DRM_ERROR("(%ld) failed to reserve user bo\n", r);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
|
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
|
||||||
true, false, MAX_SCHEDULE_TIMEOUT);
|
true, false, MAX_SCHEDULE_TIMEOUT);
|
||||||
if (r <= 0)
|
if (r <= 0)
|
||||||
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
|
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
|
||||||
|
|
||||||
amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
|
amdgpu_ttm_tt_mark_user_pages(bo->tbo.ttm);
|
||||||
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
|
|
||||||
if (r)
|
|
||||||
DRM_ERROR("(%ld) failed to validate user bo\n", r);
|
|
||||||
|
|
||||||
amdgpu_bo_unreserve(bo);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -168,7 +206,7 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
|
|||||||
/* notification is exclusive, but interval is inclusive */
|
/* notification is exclusive, but interval is inclusive */
|
||||||
end -= 1;
|
end -= 1;
|
||||||
|
|
||||||
mutex_lock(&rmn->lock);
|
amdgpu_mn_read_lock(rmn);
|
||||||
|
|
||||||
it = interval_tree_iter_first(&rmn->objects, start, end);
|
it = interval_tree_iter_first(&rmn->objects, start, end);
|
||||||
while (it) {
|
while (it) {
|
||||||
@ -179,13 +217,32 @@ static void amdgpu_mn_invalidate_range_start(struct mmu_notifier *mn,
|
|||||||
|
|
||||||
amdgpu_mn_invalidate_node(node, start, end);
|
amdgpu_mn_invalidate_node(node, start, end);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
mutex_unlock(&rmn->lock);
|
/**
|
||||||
|
* amdgpu_mn_invalidate_range_end - callback to notify about mm change
|
||||||
|
*
|
||||||
|
* @mn: our notifier
|
||||||
|
* @mn: the mm this callback is about
|
||||||
|
* @start: start of updated range
|
||||||
|
* @end: end of updated range
|
||||||
|
*
|
||||||
|
* Release the lock again to allow new command submissions.
|
||||||
|
*/
|
||||||
|
static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn,
|
||||||
|
struct mm_struct *mm,
|
||||||
|
unsigned long start,
|
||||||
|
unsigned long end)
|
||||||
|
{
|
||||||
|
struct amdgpu_mn *rmn = container_of(mn, struct amdgpu_mn, mn);
|
||||||
|
|
||||||
|
amdgpu_mn_read_unlock(rmn);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct mmu_notifier_ops amdgpu_mn_ops = {
|
static const struct mmu_notifier_ops amdgpu_mn_ops = {
|
||||||
.release = amdgpu_mn_release,
|
.release = amdgpu_mn_release,
|
||||||
.invalidate_range_start = amdgpu_mn_invalidate_range_start,
|
.invalidate_range_start = amdgpu_mn_invalidate_range_start,
|
||||||
|
.invalidate_range_end = amdgpu_mn_invalidate_range_end,
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -195,7 +252,7 @@ static const struct mmu_notifier_ops amdgpu_mn_ops = {
|
|||||||
*
|
*
|
||||||
* Creates a notifier context for current->mm.
|
* Creates a notifier context for current->mm.
|
||||||
*/
|
*/
|
||||||
static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
|
struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
struct amdgpu_mn *rmn;
|
struct amdgpu_mn *rmn;
|
||||||
@ -220,8 +277,10 @@ static struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
|
|||||||
rmn->adev = adev;
|
rmn->adev = adev;
|
||||||
rmn->mm = mm;
|
rmn->mm = mm;
|
||||||
rmn->mn.ops = &amdgpu_mn_ops;
|
rmn->mn.ops = &amdgpu_mn_ops;
|
||||||
mutex_init(&rmn->lock);
|
init_rwsem(&rmn->lock);
|
||||||
rmn->objects = RB_ROOT_CACHED;
|
rmn->objects = RB_ROOT_CACHED;
|
||||||
|
mutex_init(&rmn->read_lock);
|
||||||
|
atomic_set(&rmn->recursion, 0);
|
||||||
|
|
||||||
r = __mmu_notifier_register(&rmn->mn, mm);
|
r = __mmu_notifier_register(&rmn->mn, mm);
|
||||||
if (r)
|
if (r)
|
||||||
@ -267,7 +326,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
|
|||||||
|
|
||||||
INIT_LIST_HEAD(&bos);
|
INIT_LIST_HEAD(&bos);
|
||||||
|
|
||||||
mutex_lock(&rmn->lock);
|
down_write(&rmn->lock);
|
||||||
|
|
||||||
while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
|
while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
|
||||||
kfree(node);
|
kfree(node);
|
||||||
@ -281,7 +340,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
|
|||||||
if (!node) {
|
if (!node) {
|
||||||
node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
|
node = kmalloc(sizeof(struct amdgpu_mn_node), GFP_KERNEL);
|
||||||
if (!node) {
|
if (!node) {
|
||||||
mutex_unlock(&rmn->lock);
|
up_write(&rmn->lock);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -296,7 +355,7 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
|
|||||||
|
|
||||||
interval_tree_insert(&node->it, &rmn->objects);
|
interval_tree_insert(&node->it, &rmn->objects);
|
||||||
|
|
||||||
mutex_unlock(&rmn->lock);
|
up_write(&rmn->lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -322,7 +381,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&rmn->lock);
|
down_write(&rmn->lock);
|
||||||
|
|
||||||
/* save the next list entry for later */
|
/* save the next list entry for later */
|
||||||
head = bo->mn_list.next;
|
head = bo->mn_list.next;
|
||||||
@ -337,6 +396,7 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
|
|||||||
kfree(node);
|
kfree(node);
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_unlock(&rmn->lock);
|
up_write(&rmn->lock);
|
||||||
mutex_unlock(&adev->mn_lock);
|
mutex_unlock(&adev->mn_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
* Copyright 2017 Advanced Micro Devices, Inc.
|
||||||
*
|
*
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
@ -19,20 +19,34 @@
|
|||||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
*
|
*
|
||||||
|
* Authors: Christian König
|
||||||
*/
|
*/
|
||||||
#include "eventmgr.h"
|
#ifndef __AMDGPU_MN_H__
|
||||||
#include "eventinit.h"
|
#define __AMDGPU_MN_H__
|
||||||
#include "eventmanagement.h"
|
|
||||||
#include "eventmanager.h"
|
|
||||||
#include "power_state.h"
|
|
||||||
#include "hardwaremanager.h"
|
|
||||||
|
|
||||||
int psm_get_ui_state(struct pp_eventmgr *eventmgr, enum PP_StateUILabel ui_label, unsigned long *state_id);
|
/*
|
||||||
|
* MMU Notifier
|
||||||
|
*/
|
||||||
|
struct amdgpu_mn;
|
||||||
|
|
||||||
int psm_get_state_by_classification(struct pp_eventmgr *eventmgr, enum PP_StateClassificationFlag flag, unsigned long *state_id);
|
#if defined(CONFIG_MMU_NOTIFIER)
|
||||||
|
void amdgpu_mn_lock(struct amdgpu_mn *mn);
|
||||||
|
void amdgpu_mn_unlock(struct amdgpu_mn *mn);
|
||||||
|
struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev);
|
||||||
|
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
|
||||||
|
void amdgpu_mn_unregister(struct amdgpu_bo *bo);
|
||||||
|
#else
|
||||||
|
static inline void amdgpu_mn_lock(struct amdgpu_mn *mn) {}
|
||||||
|
static inline void amdgpu_mn_unlock(struct amdgpu_mn *mn) {}
|
||||||
|
static inline struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr)
|
||||||
|
{
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
|
||||||
|
#endif
|
||||||
|
|
||||||
int psm_set_states(struct pp_eventmgr *eventmgr, unsigned long *state_id);
|
#endif
|
||||||
|
|
||||||
int psm_adjust_power_state_dynamic(struct pp_eventmgr *eventmgr, bool skip);
|
|
||||||
|
|
||||||
int psm_adjust_power_state_static(struct pp_eventmgr *eventmgr, bool skip);
|
|
@ -64,11 +64,12 @@ bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
|
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
|
||||||
struct ttm_placement *placement,
|
|
||||||
struct ttm_place *places,
|
|
||||||
u32 domain, u64 flags)
|
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
|
||||||
|
struct ttm_placement *placement = &abo->placement;
|
||||||
|
struct ttm_place *places = abo->placements;
|
||||||
|
u64 flags = abo->flags;
|
||||||
u32 c = 0;
|
u32 c = 0;
|
||||||
|
|
||||||
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
|
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
|
||||||
@ -151,27 +152,6 @@ static void amdgpu_ttm_placement_init(struct amdgpu_device *adev,
|
|||||||
placement->busy_placement = places;
|
placement->busy_placement = places;
|
||||||
}
|
}
|
||||||
|
|
||||||
void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
|
|
||||||
{
|
|
||||||
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
|
|
||||||
|
|
||||||
amdgpu_ttm_placement_init(adev, &abo->placement, abo->placements,
|
|
||||||
domain, abo->flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void amdgpu_fill_placement_to_bo(struct amdgpu_bo *bo,
|
|
||||||
struct ttm_placement *placement)
|
|
||||||
{
|
|
||||||
BUG_ON(placement->num_placement > (AMDGPU_GEM_DOMAIN_MAX + 1));
|
|
||||||
|
|
||||||
memcpy(bo->placements, placement->placement,
|
|
||||||
placement->num_placement * sizeof(struct ttm_place));
|
|
||||||
bo->placement.num_placement = placement->num_placement;
|
|
||||||
bo->placement.num_busy_placement = placement->num_busy_placement;
|
|
||||||
bo->placement.placement = bo->placements;
|
|
||||||
bo->placement.busy_placement = bo->placements;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* amdgpu_bo_create_reserved - create reserved BO for kernel use
|
* amdgpu_bo_create_reserved - create reserved BO for kernel use
|
||||||
*
|
*
|
||||||
@ -303,14 +283,13 @@ void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr,
|
|||||||
*cpu_addr = NULL;
|
*cpu_addr = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
||||||
unsigned long size, int byte_align,
|
unsigned long size, int byte_align,
|
||||||
bool kernel, u32 domain, u64 flags,
|
bool kernel, u32 domain, u64 flags,
|
||||||
struct sg_table *sg,
|
struct sg_table *sg,
|
||||||
struct ttm_placement *placement,
|
struct reservation_object *resv,
|
||||||
struct reservation_object *resv,
|
uint64_t init_value,
|
||||||
uint64_t init_value,
|
struct amdgpu_bo **bo_ptr)
|
||||||
struct amdgpu_bo **bo_ptr)
|
|
||||||
{
|
{
|
||||||
struct amdgpu_bo *bo;
|
struct amdgpu_bo *bo;
|
||||||
enum ttm_bo_type type;
|
enum ttm_bo_type type;
|
||||||
@ -384,10 +363,11 @@ int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
|||||||
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
|
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
amdgpu_fill_placement_to_bo(bo, placement);
|
bo->tbo.bdev = &adev->mman.bdev;
|
||||||
/* Kernel allocation are uninterruptible */
|
amdgpu_ttm_placement_from_domain(bo, domain);
|
||||||
|
|
||||||
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
|
initial_bytes_moved = atomic64_read(&adev->num_bytes_moved);
|
||||||
|
/* Kernel allocation are uninterruptible */
|
||||||
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
|
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, type,
|
||||||
&bo->placement, page_align, !kernel, NULL,
|
&bo->placement, page_align, !kernel, NULL,
|
||||||
acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
|
acc_size, sg, resv, &amdgpu_ttm_bo_destroy);
|
||||||
@ -442,27 +422,17 @@ static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
|
|||||||
unsigned long size, int byte_align,
|
unsigned long size, int byte_align,
|
||||||
struct amdgpu_bo *bo)
|
struct amdgpu_bo *bo)
|
||||||
{
|
{
|
||||||
struct ttm_placement placement = {0};
|
|
||||||
struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
|
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (bo->shadow)
|
if (bo->shadow)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
memset(&placements, 0, sizeof(placements));
|
r = amdgpu_bo_do_create(adev, size, byte_align, true,
|
||||||
amdgpu_ttm_placement_init(adev, &placement, placements,
|
AMDGPU_GEM_DOMAIN_GTT,
|
||||||
AMDGPU_GEM_DOMAIN_GTT,
|
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
|
||||||
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
|
AMDGPU_GEM_CREATE_SHADOW,
|
||||||
AMDGPU_GEM_CREATE_SHADOW);
|
NULL, bo->tbo.resv, 0,
|
||||||
|
&bo->shadow);
|
||||||
r = amdgpu_bo_create_restricted(adev, size, byte_align, true,
|
|
||||||
AMDGPU_GEM_DOMAIN_GTT,
|
|
||||||
AMDGPU_GEM_CREATE_CPU_GTT_USWC |
|
|
||||||
AMDGPU_GEM_CREATE_SHADOW,
|
|
||||||
NULL, &placement,
|
|
||||||
bo->tbo.resv,
|
|
||||||
0,
|
|
||||||
&bo->shadow);
|
|
||||||
if (!r) {
|
if (!r) {
|
||||||
bo->shadow->parent = amdgpu_bo_ref(bo);
|
bo->shadow->parent = amdgpu_bo_ref(bo);
|
||||||
mutex_lock(&adev->shadow_list_lock);
|
mutex_lock(&adev->shadow_list_lock);
|
||||||
@ -484,18 +454,11 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
|||||||
uint64_t init_value,
|
uint64_t init_value,
|
||||||
struct amdgpu_bo **bo_ptr)
|
struct amdgpu_bo **bo_ptr)
|
||||||
{
|
{
|
||||||
struct ttm_placement placement = {0};
|
|
||||||
struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
|
|
||||||
uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
|
uint64_t parent_flags = flags & ~AMDGPU_GEM_CREATE_SHADOW;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
memset(&placements, 0, sizeof(placements));
|
r = amdgpu_bo_do_create(adev, size, byte_align, kernel, domain,
|
||||||
amdgpu_ttm_placement_init(adev, &placement, placements,
|
parent_flags, sg, resv, init_value, bo_ptr);
|
||||||
domain, parent_flags);
|
|
||||||
|
|
||||||
r = amdgpu_bo_create_restricted(adev, size, byte_align, kernel, domain,
|
|
||||||
parent_flags, sg, &placement, resv,
|
|
||||||
init_value, bo_ptr);
|
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
@ -672,7 +635,6 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||||
int r, i;
|
int r, i;
|
||||||
unsigned fpfn, lpfn;
|
|
||||||
|
|
||||||
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
|
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
@ -704,22 +666,16 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
|
|||||||
}
|
}
|
||||||
|
|
||||||
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
|
||||||
|
/* force to pin into visible video ram */
|
||||||
|
if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
|
||||||
|
bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
|
||||||
amdgpu_ttm_placement_from_domain(bo, domain);
|
amdgpu_ttm_placement_from_domain(bo, domain);
|
||||||
for (i = 0; i < bo->placement.num_placement; i++) {
|
for (i = 0; i < bo->placement.num_placement; i++) {
|
||||||
/* force to pin into visible video ram */
|
unsigned fpfn, lpfn;
|
||||||
if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
|
|
||||||
!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS) &&
|
fpfn = min_offset >> PAGE_SHIFT;
|
||||||
(!max_offset || max_offset >
|
lpfn = max_offset >> PAGE_SHIFT;
|
||||||
adev->mc.visible_vram_size)) {
|
|
||||||
if (WARN_ON_ONCE(min_offset >
|
|
||||||
adev->mc.visible_vram_size))
|
|
||||||
return -EINVAL;
|
|
||||||
fpfn = min_offset >> PAGE_SHIFT;
|
|
||||||
lpfn = adev->mc.visible_vram_size >> PAGE_SHIFT;
|
|
||||||
} else {
|
|
||||||
fpfn = min_offset >> PAGE_SHIFT;
|
|
||||||
lpfn = max_offset >> PAGE_SHIFT;
|
|
||||||
}
|
|
||||||
if (fpfn > bo->placements[i].fpfn)
|
if (fpfn > bo->placements[i].fpfn)
|
||||||
bo->placements[i].fpfn = fpfn;
|
bo->placements[i].fpfn = fpfn;
|
||||||
if (!bo->placements[i].lpfn ||
|
if (!bo->placements[i].lpfn ||
|
||||||
@ -929,7 +885,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
abo = container_of(bo, struct amdgpu_bo, tbo);
|
abo = container_of(bo, struct amdgpu_bo, tbo);
|
||||||
amdgpu_vm_bo_invalidate(adev, abo);
|
amdgpu_vm_bo_invalidate(adev, abo, evict);
|
||||||
|
|
||||||
amdgpu_bo_kunmap(abo);
|
amdgpu_bo_kunmap(abo);
|
||||||
|
|
||||||
|
@ -35,6 +35,7 @@
|
|||||||
|
|
||||||
/* bo virtual addresses in a vm */
|
/* bo virtual addresses in a vm */
|
||||||
struct amdgpu_bo_va_mapping {
|
struct amdgpu_bo_va_mapping {
|
||||||
|
struct amdgpu_bo_va *bo_va;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct rb_node rb;
|
struct rb_node rb;
|
||||||
uint64_t start;
|
uint64_t start;
|
||||||
@ -49,12 +50,17 @@ struct amdgpu_bo_va {
|
|||||||
struct amdgpu_vm_bo_base base;
|
struct amdgpu_vm_bo_base base;
|
||||||
|
|
||||||
/* protected by bo being reserved */
|
/* protected by bo being reserved */
|
||||||
struct dma_fence *last_pt_update;
|
|
||||||
unsigned ref_count;
|
unsigned ref_count;
|
||||||
|
|
||||||
|
/* all other members protected by the VM PD being reserved */
|
||||||
|
struct dma_fence *last_pt_update;
|
||||||
|
|
||||||
/* mappings for this bo_va */
|
/* mappings for this bo_va */
|
||||||
struct list_head invalids;
|
struct list_head invalids;
|
||||||
struct list_head valids;
|
struct list_head valids;
|
||||||
|
|
||||||
|
/* If the mappings are cleared or filled */
|
||||||
|
bool cleared;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_bo {
|
struct amdgpu_bo {
|
||||||
@ -189,14 +195,6 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
|||||||
struct reservation_object *resv,
|
struct reservation_object *resv,
|
||||||
uint64_t init_value,
|
uint64_t init_value,
|
||||||
struct amdgpu_bo **bo_ptr);
|
struct amdgpu_bo **bo_ptr);
|
||||||
int amdgpu_bo_create_restricted(struct amdgpu_device *adev,
|
|
||||||
unsigned long size, int byte_align,
|
|
||||||
bool kernel, u32 domain, u64 flags,
|
|
||||||
struct sg_table *sg,
|
|
||||||
struct ttm_placement *placement,
|
|
||||||
struct reservation_object *resv,
|
|
||||||
uint64_t init_value,
|
|
||||||
struct amdgpu_bo **bo_ptr);
|
|
||||||
int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
|
int amdgpu_bo_create_reserved(struct amdgpu_device *adev,
|
||||||
unsigned long size, int align,
|
unsigned long size, int align,
|
||||||
u32 domain, struct amdgpu_bo **bo_ptr,
|
u32 domain, struct amdgpu_bo **bo_ptr,
|
||||||
|
@ -74,7 +74,7 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
|
|||||||
adev->pm.dpm.ac_power = true;
|
adev->pm.dpm.ac_power = true;
|
||||||
else
|
else
|
||||||
adev->pm.dpm.ac_power = false;
|
adev->pm.dpm.ac_power = false;
|
||||||
if (adev->pm.funcs->enable_bapm)
|
if (adev->powerplay.pp_funcs->enable_bapm)
|
||||||
amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
|
amdgpu_dpm_enable_bapm(adev, adev->pm.dpm.ac_power);
|
||||||
mutex_unlock(&adev->pm.mutex);
|
mutex_unlock(&adev->pm.mutex);
|
||||||
}
|
}
|
||||||
@ -88,9 +88,9 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev,
|
|||||||
struct amdgpu_device *adev = ddev->dev_private;
|
struct amdgpu_device *adev = ddev->dev_private;
|
||||||
enum amd_pm_state_type pm;
|
enum amd_pm_state_type pm;
|
||||||
|
|
||||||
if (adev->pp_enabled) {
|
if (adev->powerplay.pp_funcs->get_current_power_state)
|
||||||
pm = amdgpu_dpm_get_current_power_state(adev);
|
pm = amdgpu_dpm_get_current_power_state(adev);
|
||||||
} else
|
else
|
||||||
pm = adev->pm.dpm.user_state;
|
pm = adev->pm.dpm.user_state;
|
||||||
|
|
||||||
return snprintf(buf, PAGE_SIZE, "%s\n",
|
return snprintf(buf, PAGE_SIZE, "%s\n",
|
||||||
@ -119,7 +119,7 @@ static ssize_t amdgpu_set_dpm_state(struct device *dev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (adev->pp_enabled) {
|
if (adev->pp_enabled) {
|
||||||
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
|
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL);
|
||||||
} else {
|
} else {
|
||||||
mutex_lock(&adev->pm.mutex);
|
mutex_lock(&adev->pm.mutex);
|
||||||
adev->pm.dpm.user_state = state;
|
adev->pm.dpm.user_state = state;
|
||||||
@ -140,13 +140,17 @@ static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev,
|
|||||||
{
|
{
|
||||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||||
struct amdgpu_device *adev = ddev->dev_private;
|
struct amdgpu_device *adev = ddev->dev_private;
|
||||||
enum amd_dpm_forced_level level;
|
enum amd_dpm_forced_level level = 0xff;
|
||||||
|
|
||||||
if ((adev->flags & AMD_IS_PX) &&
|
if ((adev->flags & AMD_IS_PX) &&
|
||||||
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
||||||
return snprintf(buf, PAGE_SIZE, "off\n");
|
return snprintf(buf, PAGE_SIZE, "off\n");
|
||||||
|
|
||||||
level = amdgpu_dpm_get_performance_level(adev);
|
if (adev->powerplay.pp_funcs->get_performance_level)
|
||||||
|
level = amdgpu_dpm_get_performance_level(adev);
|
||||||
|
else
|
||||||
|
level = adev->pm.dpm.forced_level;
|
||||||
|
|
||||||
return snprintf(buf, PAGE_SIZE, "%s\n",
|
return snprintf(buf, PAGE_SIZE, "%s\n",
|
||||||
(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
|
(level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" :
|
||||||
(level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
|
(level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" :
|
||||||
@ -167,7 +171,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
|
|||||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||||
struct amdgpu_device *adev = ddev->dev_private;
|
struct amdgpu_device *adev = ddev->dev_private;
|
||||||
enum amd_dpm_forced_level level;
|
enum amd_dpm_forced_level level;
|
||||||
enum amd_dpm_forced_level current_level;
|
enum amd_dpm_forced_level current_level = 0xff;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
/* Can't force performance level when the card is off */
|
/* Can't force performance level when the card is off */
|
||||||
@ -175,7 +179,8 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
|
|||||||
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
current_level = amdgpu_dpm_get_performance_level(adev);
|
if (adev->powerplay.pp_funcs->get_performance_level)
|
||||||
|
current_level = amdgpu_dpm_get_performance_level(adev);
|
||||||
|
|
||||||
if (strncmp("low", buf, strlen("low")) == 0) {
|
if (strncmp("low", buf, strlen("low")) == 0) {
|
||||||
level = AMD_DPM_FORCED_LEVEL_LOW;
|
level = AMD_DPM_FORCED_LEVEL_LOW;
|
||||||
@ -203,9 +208,7 @@ static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev,
|
|||||||
if (current_level == level)
|
if (current_level == level)
|
||||||
return count;
|
return count;
|
||||||
|
|
||||||
if (adev->pp_enabled)
|
if (adev->powerplay.pp_funcs->force_performance_level) {
|
||||||
amdgpu_dpm_force_performance_level(adev, level);
|
|
||||||
else {
|
|
||||||
mutex_lock(&adev->pm.mutex);
|
mutex_lock(&adev->pm.mutex);
|
||||||
if (adev->pm.dpm.thermal_active) {
|
if (adev->pm.dpm.thermal_active) {
|
||||||
count = -EINVAL;
|
count = -EINVAL;
|
||||||
@ -233,7 +236,7 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
|
|||||||
struct pp_states_info data;
|
struct pp_states_info data;
|
||||||
int i, buf_len;
|
int i, buf_len;
|
||||||
|
|
||||||
if (adev->pp_enabled)
|
if (adev->powerplay.pp_funcs->get_pp_num_states)
|
||||||
amdgpu_dpm_get_pp_num_states(adev, &data);
|
amdgpu_dpm_get_pp_num_states(adev, &data);
|
||||||
|
|
||||||
buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
|
buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums);
|
||||||
@ -257,8 +260,8 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
|
|||||||
enum amd_pm_state_type pm = 0;
|
enum amd_pm_state_type pm = 0;
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
|
||||||
if (adev->pp_enabled) {
|
if (adev->powerplay.pp_funcs->get_current_power_state
|
||||||
|
&& adev->powerplay.pp_funcs->get_pp_num_states) {
|
||||||
pm = amdgpu_dpm_get_current_power_state(adev);
|
pm = amdgpu_dpm_get_current_power_state(adev);
|
||||||
amdgpu_dpm_get_pp_num_states(adev, &data);
|
amdgpu_dpm_get_pp_num_states(adev, &data);
|
||||||
|
|
||||||
@ -280,25 +283,10 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
|
|||||||
{
|
{
|
||||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||||
struct amdgpu_device *adev = ddev->dev_private;
|
struct amdgpu_device *adev = ddev->dev_private;
|
||||||
struct pp_states_info data;
|
|
||||||
enum amd_pm_state_type pm = 0;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
if (adev->pp_force_state_enabled && adev->pp_enabled) {
|
if (adev->pp_force_state_enabled)
|
||||||
pm = amdgpu_dpm_get_current_power_state(adev);
|
return amdgpu_get_pp_cur_state(dev, attr, buf);
|
||||||
amdgpu_dpm_get_pp_num_states(adev, &data);
|
else
|
||||||
|
|
||||||
for (i = 0; i < data.nums; i++) {
|
|
||||||
if (pm == data.states[i])
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (i == data.nums)
|
|
||||||
i = -EINVAL;
|
|
||||||
|
|
||||||
return snprintf(buf, PAGE_SIZE, "%d\n", i);
|
|
||||||
|
|
||||||
} else
|
|
||||||
return snprintf(buf, PAGE_SIZE, "\n");
|
return snprintf(buf, PAGE_SIZE, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -330,7 +318,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
|
|||||||
if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
|
if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
|
||||||
state != POWER_STATE_TYPE_DEFAULT) {
|
state != POWER_STATE_TYPE_DEFAULT) {
|
||||||
amdgpu_dpm_dispatch_task(adev,
|
amdgpu_dpm_dispatch_task(adev,
|
||||||
AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL);
|
AMD_PP_TASK_ENABLE_USER_STATE, &state, NULL);
|
||||||
adev->pp_force_state_enabled = true;
|
adev->pp_force_state_enabled = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -347,7 +335,7 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
|
|||||||
char *table = NULL;
|
char *table = NULL;
|
||||||
int size;
|
int size;
|
||||||
|
|
||||||
if (adev->pp_enabled)
|
if (adev->powerplay.pp_funcs->get_pp_table)
|
||||||
size = amdgpu_dpm_get_pp_table(adev, &table);
|
size = amdgpu_dpm_get_pp_table(adev, &table);
|
||||||
else
|
else
|
||||||
return 0;
|
return 0;
|
||||||
@ -368,7 +356,7 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
|
|||||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||||
struct amdgpu_device *adev = ddev->dev_private;
|
struct amdgpu_device *adev = ddev->dev_private;
|
||||||
|
|
||||||
if (adev->pp_enabled)
|
if (adev->powerplay.pp_funcs->set_pp_table)
|
||||||
amdgpu_dpm_set_pp_table(adev, buf, count);
|
amdgpu_dpm_set_pp_table(adev, buf, count);
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
@ -380,14 +368,11 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev,
|
|||||||
{
|
{
|
||||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||||
struct amdgpu_device *adev = ddev->dev_private;
|
struct amdgpu_device *adev = ddev->dev_private;
|
||||||
ssize_t size = 0;
|
|
||||||
|
|
||||||
if (adev->pp_enabled)
|
if (adev->powerplay.pp_funcs->print_clock_levels)
|
||||||
size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
|
return amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf);
|
||||||
else if (adev->pm.funcs->print_clock_levels)
|
else
|
||||||
size = adev->pm.funcs->print_clock_levels(adev, PP_SCLK, buf);
|
return snprintf(buf, PAGE_SIZE, "\n");
|
||||||
|
|
||||||
return size;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
|
static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
|
||||||
@ -416,10 +401,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev,
|
|||||||
mask |= 1 << level;
|
mask |= 1 << level;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (adev->pp_enabled)
|
if (adev->powerplay.pp_funcs->force_clock_level)
|
||||||
amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
|
amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask);
|
||||||
else if (adev->pm.funcs->force_clock_level)
|
|
||||||
adev->pm.funcs->force_clock_level(adev, PP_SCLK, mask);
|
|
||||||
fail:
|
fail:
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
@ -430,14 +414,11 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev,
|
|||||||
{
|
{
|
||||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||||
struct amdgpu_device *adev = ddev->dev_private;
|
struct amdgpu_device *adev = ddev->dev_private;
|
||||||
ssize_t size = 0;
|
|
||||||
|
|
||||||
if (adev->pp_enabled)
|
if (adev->powerplay.pp_funcs->print_clock_levels)
|
||||||
size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
|
return amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf);
|
||||||
else if (adev->pm.funcs->print_clock_levels)
|
else
|
||||||
size = adev->pm.funcs->print_clock_levels(adev, PP_MCLK, buf);
|
return snprintf(buf, PAGE_SIZE, "\n");
|
||||||
|
|
||||||
return size;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
|
static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
|
||||||
@ -465,11 +446,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev,
|
|||||||
}
|
}
|
||||||
mask |= 1 << level;
|
mask |= 1 << level;
|
||||||
}
|
}
|
||||||
|
if (adev->powerplay.pp_funcs->force_clock_level)
|
||||||
if (adev->pp_enabled)
|
|
||||||
amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
|
amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask);
|
||||||
else if (adev->pm.funcs->force_clock_level)
|
|
||||||
adev->pm.funcs->force_clock_level(adev, PP_MCLK, mask);
|
|
||||||
fail:
|
fail:
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
@ -480,14 +459,11 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev,
|
|||||||
{
|
{
|
||||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||||
struct amdgpu_device *adev = ddev->dev_private;
|
struct amdgpu_device *adev = ddev->dev_private;
|
||||||
ssize_t size = 0;
|
|
||||||
|
|
||||||
if (adev->pp_enabled)
|
if (adev->powerplay.pp_funcs->print_clock_levels)
|
||||||
size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
|
return amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf);
|
||||||
else if (adev->pm.funcs->print_clock_levels)
|
else
|
||||||
size = adev->pm.funcs->print_clock_levels(adev, PP_PCIE, buf);
|
return snprintf(buf, PAGE_SIZE, "\n");
|
||||||
|
|
||||||
return size;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
|
static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
|
||||||
@ -515,11 +491,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev,
|
|||||||
}
|
}
|
||||||
mask |= 1 << level;
|
mask |= 1 << level;
|
||||||
}
|
}
|
||||||
|
if (adev->powerplay.pp_funcs->force_clock_level)
|
||||||
if (adev->pp_enabled)
|
|
||||||
amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
|
amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask);
|
||||||
else if (adev->pm.funcs->force_clock_level)
|
|
||||||
adev->pm.funcs->force_clock_level(adev, PP_PCIE, mask);
|
|
||||||
fail:
|
fail:
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
@ -532,10 +506,8 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
|
|||||||
struct amdgpu_device *adev = ddev->dev_private;
|
struct amdgpu_device *adev = ddev->dev_private;
|
||||||
uint32_t value = 0;
|
uint32_t value = 0;
|
||||||
|
|
||||||
if (adev->pp_enabled)
|
if (adev->powerplay.pp_funcs->get_sclk_od)
|
||||||
value = amdgpu_dpm_get_sclk_od(adev);
|
value = amdgpu_dpm_get_sclk_od(adev);
|
||||||
else if (adev->pm.funcs->get_sclk_od)
|
|
||||||
value = adev->pm.funcs->get_sclk_od(adev);
|
|
||||||
|
|
||||||
return snprintf(buf, PAGE_SIZE, "%d\n", value);
|
return snprintf(buf, PAGE_SIZE, "%d\n", value);
|
||||||
}
|
}
|
||||||
@ -556,12 +528,12 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
|
|||||||
count = -EINVAL;
|
count = -EINVAL;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
if (adev->powerplay.pp_funcs->set_sclk_od)
|
||||||
|
amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
|
||||||
|
|
||||||
if (adev->pp_enabled) {
|
if (adev->pp_enabled) {
|
||||||
amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
|
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
|
||||||
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
|
} else {
|
||||||
} else if (adev->pm.funcs->set_sclk_od) {
|
|
||||||
adev->pm.funcs->set_sclk_od(adev, (uint32_t)value);
|
|
||||||
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
|
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
|
||||||
amdgpu_pm_compute_clocks(adev);
|
amdgpu_pm_compute_clocks(adev);
|
||||||
}
|
}
|
||||||
@ -578,10 +550,8 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
|
|||||||
struct amdgpu_device *adev = ddev->dev_private;
|
struct amdgpu_device *adev = ddev->dev_private;
|
||||||
uint32_t value = 0;
|
uint32_t value = 0;
|
||||||
|
|
||||||
if (adev->pp_enabled)
|
if (adev->powerplay.pp_funcs->get_mclk_od)
|
||||||
value = amdgpu_dpm_get_mclk_od(adev);
|
value = amdgpu_dpm_get_mclk_od(adev);
|
||||||
else if (adev->pm.funcs->get_mclk_od)
|
|
||||||
value = adev->pm.funcs->get_mclk_od(adev);
|
|
||||||
|
|
||||||
return snprintf(buf, PAGE_SIZE, "%d\n", value);
|
return snprintf(buf, PAGE_SIZE, "%d\n", value);
|
||||||
}
|
}
|
||||||
@ -602,12 +572,12 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
|
|||||||
count = -EINVAL;
|
count = -EINVAL;
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
if (adev->powerplay.pp_funcs->set_mclk_od)
|
||||||
|
amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
|
||||||
|
|
||||||
if (adev->pp_enabled) {
|
if (adev->pp_enabled) {
|
||||||
amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
|
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
|
||||||
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL);
|
} else {
|
||||||
} else if (adev->pm.funcs->set_mclk_od) {
|
|
||||||
adev->pm.funcs->set_mclk_od(adev, (uint32_t)value);
|
|
||||||
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
|
adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
|
||||||
amdgpu_pm_compute_clocks(adev);
|
amdgpu_pm_compute_clocks(adev);
|
||||||
}
|
}
|
||||||
@ -621,14 +591,11 @@ static ssize_t amdgpu_get_pp_power_profile(struct device *dev,
|
|||||||
{
|
{
|
||||||
struct drm_device *ddev = dev_get_drvdata(dev);
|
struct drm_device *ddev = dev_get_drvdata(dev);
|
||||||
struct amdgpu_device *adev = ddev->dev_private;
|
struct amdgpu_device *adev = ddev->dev_private;
|
||||||
int ret = 0;
|
int ret = 0xff;
|
||||||
|
|
||||||
if (adev->pp_enabled)
|
if (adev->powerplay.pp_funcs->get_power_profile_state)
|
||||||
ret = amdgpu_dpm_get_power_profile_state(
|
ret = amdgpu_dpm_get_power_profile_state(
|
||||||
adev, query);
|
adev, query);
|
||||||
else if (adev->pm.funcs->get_power_profile_state)
|
|
||||||
ret = adev->pm.funcs->get_power_profile_state(
|
|
||||||
adev, query);
|
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
@ -675,15 +642,12 @@ static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
|
|||||||
char *sub_str, buf_cpy[128], *tmp_str;
|
char *sub_str, buf_cpy[128], *tmp_str;
|
||||||
const char delimiter[3] = {' ', '\n', '\0'};
|
const char delimiter[3] = {' ', '\n', '\0'};
|
||||||
long int value;
|
long int value;
|
||||||
int ret = 0;
|
int ret = 0xff;
|
||||||
|
|
||||||
if (strncmp("reset", buf, strlen("reset")) == 0) {
|
if (strncmp("reset", buf, strlen("reset")) == 0) {
|
||||||
if (adev->pp_enabled)
|
if (adev->powerplay.pp_funcs->reset_power_profile_state)
|
||||||
ret = amdgpu_dpm_reset_power_profile_state(
|
ret = amdgpu_dpm_reset_power_profile_state(
|
||||||
adev, request);
|
adev, request);
|
||||||
else if (adev->pm.funcs->reset_power_profile_state)
|
|
||||||
ret = adev->pm.funcs->reset_power_profile_state(
|
|
||||||
adev, request);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
count = -EINVAL;
|
count = -EINVAL;
|
||||||
goto fail;
|
goto fail;
|
||||||
@ -692,12 +656,10 @@ static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (strncmp("set", buf, strlen("set")) == 0) {
|
if (strncmp("set", buf, strlen("set")) == 0) {
|
||||||
if (adev->pp_enabled)
|
if (adev->powerplay.pp_funcs->set_power_profile_state)
|
||||||
ret = amdgpu_dpm_set_power_profile_state(
|
ret = amdgpu_dpm_set_power_profile_state(
|
||||||
adev, request);
|
adev, request);
|
||||||
else if (adev->pm.funcs->set_power_profile_state)
|
|
||||||
ret = adev->pm.funcs->set_power_profile_state(
|
|
||||||
adev, request);
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
count = -EINVAL;
|
count = -EINVAL;
|
||||||
goto fail;
|
goto fail;
|
||||||
@ -745,13 +707,8 @@ static ssize_t amdgpu_set_pp_power_profile(struct device *dev,
|
|||||||
|
|
||||||
loop++;
|
loop++;
|
||||||
}
|
}
|
||||||
|
if (adev->powerplay.pp_funcs->set_power_profile_state)
|
||||||
if (adev->pp_enabled)
|
ret = amdgpu_dpm_set_power_profile_state(adev, request);
|
||||||
ret = amdgpu_dpm_set_power_profile_state(
|
|
||||||
adev, request);
|
|
||||||
else if (adev->pm.funcs->set_power_profile_state)
|
|
||||||
ret = adev->pm.funcs->set_power_profile_state(
|
|
||||||
adev, request);
|
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
count = -EINVAL;
|
count = -EINVAL;
|
||||||
@ -831,7 +788,7 @@ static ssize_t amdgpu_hwmon_show_temp(struct device *dev,
|
|||||||
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
(ddev->switch_power_state != DRM_SWITCH_POWER_ON))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
|
if (!adev->powerplay.pp_funcs->get_temperature)
|
||||||
temp = 0;
|
temp = 0;
|
||||||
else
|
else
|
||||||
temp = amdgpu_dpm_get_temperature(adev);
|
temp = amdgpu_dpm_get_temperature(adev);
|
||||||
@ -862,7 +819,7 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
|
|||||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||||
u32 pwm_mode = 0;
|
u32 pwm_mode = 0;
|
||||||
|
|
||||||
if (!adev->pp_enabled && !adev->pm.funcs->get_fan_control_mode)
|
if (!adev->powerplay.pp_funcs->get_fan_control_mode)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
|
pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
|
||||||
@ -879,7 +836,7 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
|
|||||||
int err;
|
int err;
|
||||||
int value;
|
int value;
|
||||||
|
|
||||||
if (!adev->pp_enabled && !adev->pm.funcs->set_fan_control_mode)
|
if (!adev->powerplay.pp_funcs->set_fan_control_mode)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
err = kstrtoint(buf, 10, &value);
|
err = kstrtoint(buf, 10, &value);
|
||||||
@ -919,9 +876,11 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
|
|||||||
|
|
||||||
value = (value * 100) / 255;
|
value = (value * 100) / 255;
|
||||||
|
|
||||||
err = amdgpu_dpm_set_fan_speed_percent(adev, value);
|
if (adev->powerplay.pp_funcs->set_fan_speed_percent) {
|
||||||
if (err)
|
err = amdgpu_dpm_set_fan_speed_percent(adev, value);
|
||||||
return err;
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
@ -932,11 +891,13 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||||
int err;
|
int err;
|
||||||
u32 speed;
|
u32 speed = 0;
|
||||||
|
|
||||||
err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
|
if (adev->powerplay.pp_funcs->get_fan_speed_percent) {
|
||||||
if (err)
|
err = amdgpu_dpm_get_fan_speed_percent(adev, &speed);
|
||||||
return err;
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
speed = (speed * 255) / 100;
|
speed = (speed * 255) / 100;
|
||||||
|
|
||||||
@ -949,11 +910,13 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
struct amdgpu_device *adev = dev_get_drvdata(dev);
|
||||||
int err;
|
int err;
|
||||||
u32 speed;
|
u32 speed = 0;
|
||||||
|
|
||||||
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
|
if (adev->powerplay.pp_funcs->get_fan_speed_rpm) {
|
||||||
if (err)
|
err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
|
||||||
return err;
|
if (err)
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
return sprintf(buf, "%i\n", speed);
|
return sprintf(buf, "%i\n", speed);
|
||||||
}
|
}
|
||||||
@ -1008,21 +971,21 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* mask fan attributes if we have no bindings for this asic to expose */
|
/* mask fan attributes if we have no bindings for this asic to expose */
|
||||||
if ((!adev->pm.funcs->get_fan_speed_percent &&
|
if ((!adev->powerplay.pp_funcs->get_fan_speed_percent &&
|
||||||
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
|
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
|
||||||
(!adev->pm.funcs->get_fan_control_mode &&
|
(!adev->powerplay.pp_funcs->get_fan_control_mode &&
|
||||||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
|
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
|
||||||
effective_mode &= ~S_IRUGO;
|
effective_mode &= ~S_IRUGO;
|
||||||
|
|
||||||
if ((!adev->pm.funcs->set_fan_speed_percent &&
|
if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
|
||||||
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
|
attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
|
||||||
(!adev->pm.funcs->set_fan_control_mode &&
|
(!adev->powerplay.pp_funcs->set_fan_control_mode &&
|
||||||
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
|
attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
|
||||||
effective_mode &= ~S_IWUSR;
|
effective_mode &= ~S_IWUSR;
|
||||||
|
|
||||||
/* hide max/min values if we can't both query and manage the fan */
|
/* hide max/min values if we can't both query and manage the fan */
|
||||||
if ((!adev->pm.funcs->set_fan_speed_percent &&
|
if ((!adev->powerplay.pp_funcs->set_fan_speed_percent &&
|
||||||
!adev->pm.funcs->get_fan_speed_percent) &&
|
!adev->powerplay.pp_funcs->get_fan_speed_percent) &&
|
||||||
(attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
|
(attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
|
||||||
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
|
attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
|
||||||
return 0;
|
return 0;
|
||||||
@ -1055,7 +1018,7 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
|
|||||||
if (!adev->pm.dpm_enabled)
|
if (!adev->pm.dpm_enabled)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (adev->pm.funcs->get_temperature) {
|
if (adev->powerplay.pp_funcs->get_temperature) {
|
||||||
int temp = amdgpu_dpm_get_temperature(adev);
|
int temp = amdgpu_dpm_get_temperature(adev);
|
||||||
|
|
||||||
if (temp < adev->pm.dpm.thermal.min_temp)
|
if (temp < adev->pm.dpm.thermal.min_temp)
|
||||||
@ -1087,7 +1050,7 @@ static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
|
|||||||
true : false;
|
true : false;
|
||||||
|
|
||||||
/* check if the vblank period is too short to adjust the mclk */
|
/* check if the vblank period is too short to adjust the mclk */
|
||||||
if (single_display && adev->pm.funcs->vblank_too_short) {
|
if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
|
||||||
if (amdgpu_dpm_vblank_too_short(adev))
|
if (amdgpu_dpm_vblank_too_short(adev))
|
||||||
single_display = false;
|
single_display = false;
|
||||||
}
|
}
|
||||||
@ -1216,7 +1179,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
|
|||||||
struct amdgpu_ps *ps;
|
struct amdgpu_ps *ps;
|
||||||
enum amd_pm_state_type dpm_state;
|
enum amd_pm_state_type dpm_state;
|
||||||
int ret;
|
int ret;
|
||||||
bool equal;
|
bool equal = false;
|
||||||
|
|
||||||
/* if dpm init failed */
|
/* if dpm init failed */
|
||||||
if (!adev->pm.dpm_enabled)
|
if (!adev->pm.dpm_enabled)
|
||||||
@ -1236,7 +1199,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
|
|||||||
else
|
else
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (amdgpu_dpm == 1) {
|
if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
|
||||||
printk("switching from power state:\n");
|
printk("switching from power state:\n");
|
||||||
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
|
amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
|
||||||
printk("switching to power state:\n");
|
printk("switching to power state:\n");
|
||||||
@ -1245,15 +1208,17 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
/* update whether vce is active */
|
/* update whether vce is active */
|
||||||
ps->vce_active = adev->pm.dpm.vce_active;
|
ps->vce_active = adev->pm.dpm.vce_active;
|
||||||
|
if (adev->powerplay.pp_funcs->display_configuration_changed)
|
||||||
amdgpu_dpm_display_configuration_changed(adev);
|
amdgpu_dpm_display_configuration_changed(adev);
|
||||||
|
|
||||||
ret = amdgpu_dpm_pre_set_power_state(adev);
|
ret = amdgpu_dpm_pre_set_power_state(adev);
|
||||||
if (ret)
|
if (ret)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if ((0 != amgdpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)))
|
if (adev->powerplay.pp_funcs->check_state_equal) {
|
||||||
equal = false;
|
if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
|
||||||
|
equal = false;
|
||||||
|
}
|
||||||
|
|
||||||
if (equal)
|
if (equal)
|
||||||
return;
|
return;
|
||||||
@ -1264,7 +1229,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
|
|||||||
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
|
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
|
||||||
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
|
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
|
||||||
|
|
||||||
if (adev->pm.funcs->force_performance_level) {
|
if (adev->powerplay.pp_funcs->force_performance_level) {
|
||||||
if (adev->pm.dpm.thermal_active) {
|
if (adev->pm.dpm.thermal_active) {
|
||||||
enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
|
enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
|
||||||
/* force low perf level for thermal */
|
/* force low perf level for thermal */
|
||||||
@ -1280,7 +1245,7 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
|
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
|
||||||
{
|
{
|
||||||
if (adev->pp_enabled || adev->pm.funcs->powergate_uvd) {
|
if (adev->powerplay.pp_funcs->powergate_uvd) {
|
||||||
/* enable/disable UVD */
|
/* enable/disable UVD */
|
||||||
mutex_lock(&adev->pm.mutex);
|
mutex_lock(&adev->pm.mutex);
|
||||||
amdgpu_dpm_powergate_uvd(adev, !enable);
|
amdgpu_dpm_powergate_uvd(adev, !enable);
|
||||||
@ -1302,7 +1267,7 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
|
|||||||
|
|
||||||
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
|
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
|
||||||
{
|
{
|
||||||
if (adev->pp_enabled || adev->pm.funcs->powergate_vce) {
|
if (adev->powerplay.pp_funcs->powergate_vce) {
|
||||||
/* enable/disable VCE */
|
/* enable/disable VCE */
|
||||||
mutex_lock(&adev->pm.mutex);
|
mutex_lock(&adev->pm.mutex);
|
||||||
amdgpu_dpm_powergate_vce(adev, !enable);
|
amdgpu_dpm_powergate_vce(adev, !enable);
|
||||||
@ -1337,8 +1302,7 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (adev->pp_enabled)
|
if (adev->powerplay.pp_funcs->print_power_state == NULL)
|
||||||
/* TO DO */
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
for (i = 0; i < adev->pm.dpm.num_ps; i++)
|
for (i = 0; i < adev->pm.dpm.num_ps; i++)
|
||||||
@ -1353,10 +1317,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
|
|||||||
if (adev->pm.sysfs_initialized)
|
if (adev->pm.sysfs_initialized)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (!adev->pp_enabled) {
|
if (adev->powerplay.pp_funcs->get_temperature == NULL)
|
||||||
if (adev->pm.funcs->get_temperature == NULL)
|
return 0;
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
|
adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev,
|
||||||
DRIVER_NAME, adev,
|
DRIVER_NAME, adev,
|
||||||
@ -1496,7 +1458,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (adev->pp_enabled) {
|
if (adev->pp_enabled) {
|
||||||
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE, NULL, NULL);
|
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL, NULL);
|
||||||
} else {
|
} else {
|
||||||
mutex_lock(&adev->pm.mutex);
|
mutex_lock(&adev->pm.mutex);
|
||||||
adev->pm.dpm.new_active_crtcs = 0;
|
adev->pm.dpm.new_active_crtcs = 0;
|
||||||
@ -1634,8 +1596,8 @@ static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data)
|
|||||||
return amdgpu_debugfs_pm_info_pp(m, adev);
|
return amdgpu_debugfs_pm_info_pp(m, adev);
|
||||||
} else {
|
} else {
|
||||||
mutex_lock(&adev->pm.mutex);
|
mutex_lock(&adev->pm.mutex);
|
||||||
if (adev->pm.funcs->debugfs_print_current_performance_level)
|
if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
|
||||||
adev->pm.funcs->debugfs_print_current_performance_level(adev, m);
|
adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
|
||||||
else
|
else
|
||||||
seq_printf(m, "Debugfs support not implemented for this asic\n");
|
seq_printf(m, "Debugfs support not implemented for this asic\n");
|
||||||
mutex_unlock(&adev->pm.mutex);
|
mutex_unlock(&adev->pm.mutex);
|
||||||
|
@ -87,17 +87,28 @@ static int amdgpu_pp_early_init(void *handle)
|
|||||||
case CHIP_OLAND:
|
case CHIP_OLAND:
|
||||||
case CHIP_HAINAN:
|
case CHIP_HAINAN:
|
||||||
amd_pp->ip_funcs = &si_dpm_ip_funcs;
|
amd_pp->ip_funcs = &si_dpm_ip_funcs;
|
||||||
|
amd_pp->pp_funcs = &si_dpm_funcs;
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_DRM_AMDGPU_CIK
|
#ifdef CONFIG_DRM_AMDGPU_CIK
|
||||||
case CHIP_BONAIRE:
|
case CHIP_BONAIRE:
|
||||||
case CHIP_HAWAII:
|
case CHIP_HAWAII:
|
||||||
amd_pp->ip_funcs = &ci_dpm_ip_funcs;
|
if (amdgpu_dpm == -1) {
|
||||||
|
amd_pp->ip_funcs = &ci_dpm_ip_funcs;
|
||||||
|
amd_pp->pp_funcs = &ci_dpm_funcs;
|
||||||
|
} else {
|
||||||
|
adev->pp_enabled = true;
|
||||||
|
if (amdgpu_create_pp_handle(adev))
|
||||||
|
return -EINVAL;
|
||||||
|
amd_pp->ip_funcs = &pp_ip_funcs;
|
||||||
|
amd_pp->pp_funcs = &pp_dpm_funcs;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case CHIP_KABINI:
|
case CHIP_KABINI:
|
||||||
case CHIP_MULLINS:
|
case CHIP_MULLINS:
|
||||||
case CHIP_KAVERI:
|
case CHIP_KAVERI:
|
||||||
amd_pp->ip_funcs = &kv_dpm_ip_funcs;
|
amd_pp->ip_funcs = &kv_dpm_ip_funcs;
|
||||||
|
amd_pp->pp_funcs = &kv_dpm_funcs;
|
||||||
break;
|
break;
|
||||||
#endif
|
#endif
|
||||||
default:
|
default:
|
||||||
@ -128,7 +139,7 @@ static int amdgpu_pp_late_init(void *handle)
|
|||||||
|
|
||||||
if (adev->pp_enabled && adev->pm.dpm_enabled) {
|
if (adev->pp_enabled && adev->pm.dpm_enabled) {
|
||||||
amdgpu_pm_sysfs_init(adev);
|
amdgpu_pm_sysfs_init(adev);
|
||||||
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
|
amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_COMPLETE_INIT, NULL, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -57,6 +57,40 @@ void amdgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
|
|||||||
ttm_bo_kunmap(&bo->dma_buf_vmap);
|
ttm_bo_kunmap(&bo->dma_buf_vmap);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
|
||||||
|
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||||
|
unsigned asize = amdgpu_bo_size(bo);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!vma->vm_file)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
if (adev == NULL)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
/* Check for valid size. */
|
||||||
|
if (asize < vma->vm_end - vma->vm_start)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
|
||||||
|
(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
|
||||||
|
return -EPERM;
|
||||||
|
}
|
||||||
|
vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
/* prime mmap does not need to check access, so allow here */
|
||||||
|
ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev);
|
||||||
|
drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
struct drm_gem_object *
|
struct drm_gem_object *
|
||||||
amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
|
amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
|
||||||
struct dma_buf_attachment *attach,
|
struct dma_buf_attachment *attach,
|
||||||
@ -136,7 +170,8 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
|
|||||||
{
|
{
|
||||||
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
|
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
|
||||||
|
|
||||||
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
|
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
|
||||||
|
bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
|
||||||
return ERR_PTR(-EPERM);
|
return ERR_PTR(-EPERM);
|
||||||
|
|
||||||
return drm_gem_prime_export(dev, gobj, flags);
|
return drm_gem_prime_export(dev, gobj, flags);
|
||||||
|
@ -57,21 +57,23 @@ static int psp_sw_init(void *handle)
|
|||||||
psp->prep_cmd_buf = psp_v3_1_prep_cmd_buf;
|
psp->prep_cmd_buf = psp_v3_1_prep_cmd_buf;
|
||||||
psp->ring_init = psp_v3_1_ring_init;
|
psp->ring_init = psp_v3_1_ring_init;
|
||||||
psp->ring_create = psp_v3_1_ring_create;
|
psp->ring_create = psp_v3_1_ring_create;
|
||||||
|
psp->ring_stop = psp_v3_1_ring_stop;
|
||||||
psp->ring_destroy = psp_v3_1_ring_destroy;
|
psp->ring_destroy = psp_v3_1_ring_destroy;
|
||||||
psp->cmd_submit = psp_v3_1_cmd_submit;
|
psp->cmd_submit = psp_v3_1_cmd_submit;
|
||||||
psp->compare_sram_data = psp_v3_1_compare_sram_data;
|
psp->compare_sram_data = psp_v3_1_compare_sram_data;
|
||||||
psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk;
|
psp->smu_reload_quirk = psp_v3_1_smu_reload_quirk;
|
||||||
|
psp->mode1_reset = psp_v3_1_mode1_reset;
|
||||||
break;
|
break;
|
||||||
case CHIP_RAVEN:
|
case CHIP_RAVEN:
|
||||||
#if 0
|
|
||||||
psp->init_microcode = psp_v10_0_init_microcode;
|
psp->init_microcode = psp_v10_0_init_microcode;
|
||||||
#endif
|
|
||||||
psp->prep_cmd_buf = psp_v10_0_prep_cmd_buf;
|
psp->prep_cmd_buf = psp_v10_0_prep_cmd_buf;
|
||||||
psp->ring_init = psp_v10_0_ring_init;
|
psp->ring_init = psp_v10_0_ring_init;
|
||||||
psp->ring_create = psp_v10_0_ring_create;
|
psp->ring_create = psp_v10_0_ring_create;
|
||||||
|
psp->ring_stop = psp_v10_0_ring_stop;
|
||||||
psp->ring_destroy = psp_v10_0_ring_destroy;
|
psp->ring_destroy = psp_v10_0_ring_destroy;
|
||||||
psp->cmd_submit = psp_v10_0_cmd_submit;
|
psp->cmd_submit = psp_v10_0_cmd_submit;
|
||||||
psp->compare_sram_data = psp_v10_0_compare_sram_data;
|
psp->compare_sram_data = psp_v10_0_compare_sram_data;
|
||||||
|
psp->mode1_reset = psp_v10_0_mode1_reset;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -90,6 +92,12 @@ static int psp_sw_init(void *handle)
|
|||||||
|
|
||||||
static int psp_sw_fini(void *handle)
|
static int psp_sw_fini(void *handle)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
release_firmware(adev->psp.sos_fw);
|
||||||
|
adev->psp.sos_fw = NULL;
|
||||||
|
release_firmware(adev->psp.asd_fw);
|
||||||
|
adev->psp.asd_fw = NULL;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -253,15 +261,18 @@ static int psp_asd_load(struct psp_context *psp)
|
|||||||
|
|
||||||
static int psp_hw_start(struct psp_context *psp)
|
static int psp_hw_start(struct psp_context *psp)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = psp->adev;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = psp_bootloader_load_sysdrv(psp);
|
if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) {
|
||||||
if (ret)
|
ret = psp_bootloader_load_sysdrv(psp);
|
||||||
return ret;
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
ret = psp_bootloader_load_sos(psp);
|
ret = psp_bootloader_load_sos(psp);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
|
ret = psp_ring_create(psp, PSP_RING_TYPE__KM);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -453,6 +464,16 @@ static int psp_hw_fini(void *handle)
|
|||||||
|
|
||||||
static int psp_suspend(void *handle)
|
static int psp_suspend(void *handle)
|
||||||
{
|
{
|
||||||
|
int ret;
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
struct psp_context *psp = &adev->psp;
|
||||||
|
|
||||||
|
ret = psp_ring_stop(psp, PSP_RING_TYPE__KM);
|
||||||
|
if (ret) {
|
||||||
|
DRM_ERROR("PSP ring stop failed\n");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -487,6 +508,22 @@ failed:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool psp_check_reset(void* handle)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
|
if (adev->flags & AMD_IS_APU)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int psp_reset(void* handle)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
return psp_mode1_reset(&adev->psp);
|
||||||
|
}
|
||||||
|
|
||||||
static bool psp_check_fw_loading_status(struct amdgpu_device *adev,
|
static bool psp_check_fw_loading_status(struct amdgpu_device *adev,
|
||||||
enum AMDGPU_UCODE_ID ucode_type)
|
enum AMDGPU_UCODE_ID ucode_type)
|
||||||
{
|
{
|
||||||
@ -530,8 +567,9 @@ const struct amd_ip_funcs psp_ip_funcs = {
|
|||||||
.suspend = psp_suspend,
|
.suspend = psp_suspend,
|
||||||
.resume = psp_resume,
|
.resume = psp_resume,
|
||||||
.is_idle = NULL,
|
.is_idle = NULL,
|
||||||
|
.check_soft_reset = psp_check_reset,
|
||||||
.wait_for_idle = NULL,
|
.wait_for_idle = NULL,
|
||||||
.soft_reset = NULL,
|
.soft_reset = psp_reset,
|
||||||
.set_clockgating_state = psp_set_clockgating_state,
|
.set_clockgating_state = psp_set_clockgating_state,
|
||||||
.set_powergating_state = psp_set_powergating_state,
|
.set_powergating_state = psp_set_powergating_state,
|
||||||
};
|
};
|
||||||
|
@ -66,6 +66,8 @@ struct psp_context
|
|||||||
struct psp_gfx_cmd_resp *cmd);
|
struct psp_gfx_cmd_resp *cmd);
|
||||||
int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
|
int (*ring_init)(struct psp_context *psp, enum psp_ring_type ring_type);
|
||||||
int (*ring_create)(struct psp_context *psp, enum psp_ring_type ring_type);
|
int (*ring_create)(struct psp_context *psp, enum psp_ring_type ring_type);
|
||||||
|
int (*ring_stop)(struct psp_context *psp,
|
||||||
|
enum psp_ring_type ring_type);
|
||||||
int (*ring_destroy)(struct psp_context *psp,
|
int (*ring_destroy)(struct psp_context *psp,
|
||||||
enum psp_ring_type ring_type);
|
enum psp_ring_type ring_type);
|
||||||
int (*cmd_submit)(struct psp_context *psp, struct amdgpu_firmware_info *ucode,
|
int (*cmd_submit)(struct psp_context *psp, struct amdgpu_firmware_info *ucode,
|
||||||
@ -74,6 +76,7 @@ struct psp_context
|
|||||||
struct amdgpu_firmware_info *ucode,
|
struct amdgpu_firmware_info *ucode,
|
||||||
enum AMDGPU_UCODE_ID ucode_type);
|
enum AMDGPU_UCODE_ID ucode_type);
|
||||||
bool (*smu_reload_quirk)(struct psp_context *psp);
|
bool (*smu_reload_quirk)(struct psp_context *psp);
|
||||||
|
int (*mode1_reset)(struct psp_context *psp);
|
||||||
|
|
||||||
/* fence buffer */
|
/* fence buffer */
|
||||||
struct amdgpu_bo *fw_pri_bo;
|
struct amdgpu_bo *fw_pri_bo;
|
||||||
@ -123,6 +126,7 @@ struct amdgpu_psp_funcs {
|
|||||||
#define psp_prep_cmd_buf(ucode, type) (psp)->prep_cmd_buf((ucode), (type))
|
#define psp_prep_cmd_buf(ucode, type) (psp)->prep_cmd_buf((ucode), (type))
|
||||||
#define psp_ring_init(psp, type) (psp)->ring_init((psp), (type))
|
#define psp_ring_init(psp, type) (psp)->ring_init((psp), (type))
|
||||||
#define psp_ring_create(psp, type) (psp)->ring_create((psp), (type))
|
#define psp_ring_create(psp, type) (psp)->ring_create((psp), (type))
|
||||||
|
#define psp_ring_stop(psp, type) (psp)->ring_stop((psp), (type))
|
||||||
#define psp_ring_destroy(psp, type) ((psp)->ring_destroy((psp), (type)))
|
#define psp_ring_destroy(psp, type) ((psp)->ring_destroy((psp), (type)))
|
||||||
#define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \
|
#define psp_cmd_submit(psp, ucode, cmd_mc, fence_mc, index) \
|
||||||
(psp)->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index))
|
(psp)->cmd_submit((psp), (ucode), (cmd_mc), (fence_mc), (index))
|
||||||
@ -136,6 +140,8 @@ struct amdgpu_psp_funcs {
|
|||||||
((psp)->bootloader_load_sos ? (psp)->bootloader_load_sos((psp)) : 0)
|
((psp)->bootloader_load_sos ? (psp)->bootloader_load_sos((psp)) : 0)
|
||||||
#define psp_smu_reload_quirk(psp) \
|
#define psp_smu_reload_quirk(psp) \
|
||||||
((psp)->smu_reload_quirk ? (psp)->smu_reload_quirk((psp)) : false)
|
((psp)->smu_reload_quirk ? (psp)->smu_reload_quirk((psp)) : false)
|
||||||
|
#define psp_mode1_reset(psp) \
|
||||||
|
((psp)->mode1_reset ? (psp)->mode1_reset((psp)) : false)
|
||||||
|
|
||||||
extern const struct amd_ip_funcs psp_ip_funcs;
|
extern const struct amd_ip_funcs psp_ip_funcs;
|
||||||
|
|
||||||
|
@ -14,62 +14,6 @@
|
|||||||
#define AMDGPU_JOB_GET_TIMELINE_NAME(job) \
|
#define AMDGPU_JOB_GET_TIMELINE_NAME(job) \
|
||||||
job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
|
job->base.s_fence->finished.ops->get_timeline_name(&job->base.s_fence->finished)
|
||||||
|
|
||||||
TRACE_EVENT(amdgpu_ttm_tt_populate,
|
|
||||||
TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address),
|
|
||||||
TP_ARGS(adev, dma_address, phys_address),
|
|
||||||
TP_STRUCT__entry(
|
|
||||||
__field(uint16_t, domain)
|
|
||||||
__field(uint8_t, bus)
|
|
||||||
__field(uint8_t, slot)
|
|
||||||
__field(uint8_t, func)
|
|
||||||
__field(uint64_t, dma)
|
|
||||||
__field(uint64_t, phys)
|
|
||||||
),
|
|
||||||
TP_fast_assign(
|
|
||||||
__entry->domain = pci_domain_nr(adev->pdev->bus);
|
|
||||||
__entry->bus = adev->pdev->bus->number;
|
|
||||||
__entry->slot = PCI_SLOT(adev->pdev->devfn);
|
|
||||||
__entry->func = PCI_FUNC(adev->pdev->devfn);
|
|
||||||
__entry->dma = dma_address;
|
|
||||||
__entry->phys = phys_address;
|
|
||||||
),
|
|
||||||
TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx",
|
|
||||||
(unsigned)__entry->domain,
|
|
||||||
(unsigned)__entry->bus,
|
|
||||||
(unsigned)__entry->slot,
|
|
||||||
(unsigned)__entry->func,
|
|
||||||
(unsigned long long)__entry->dma,
|
|
||||||
(unsigned long long)__entry->phys)
|
|
||||||
);
|
|
||||||
|
|
||||||
TRACE_EVENT(amdgpu_ttm_tt_unpopulate,
|
|
||||||
TP_PROTO(struct amdgpu_device *adev, uint64_t dma_address, uint64_t phys_address),
|
|
||||||
TP_ARGS(adev, dma_address, phys_address),
|
|
||||||
TP_STRUCT__entry(
|
|
||||||
__field(uint16_t, domain)
|
|
||||||
__field(uint8_t, bus)
|
|
||||||
__field(uint8_t, slot)
|
|
||||||
__field(uint8_t, func)
|
|
||||||
__field(uint64_t, dma)
|
|
||||||
__field(uint64_t, phys)
|
|
||||||
),
|
|
||||||
TP_fast_assign(
|
|
||||||
__entry->domain = pci_domain_nr(adev->pdev->bus);
|
|
||||||
__entry->bus = adev->pdev->bus->number;
|
|
||||||
__entry->slot = PCI_SLOT(adev->pdev->devfn);
|
|
||||||
__entry->func = PCI_FUNC(adev->pdev->devfn);
|
|
||||||
__entry->dma = dma_address;
|
|
||||||
__entry->phys = phys_address;
|
|
||||||
),
|
|
||||||
TP_printk("%04x:%02x:%02x.%x: 0x%llx => 0x%llx",
|
|
||||||
(unsigned)__entry->domain,
|
|
||||||
(unsigned)__entry->bus,
|
|
||||||
(unsigned)__entry->slot,
|
|
||||||
(unsigned)__entry->func,
|
|
||||||
(unsigned long long)__entry->dma,
|
|
||||||
(unsigned long long)__entry->phys)
|
|
||||||
);
|
|
||||||
|
|
||||||
TRACE_EVENT(amdgpu_mm_rreg,
|
TRACE_EVENT(amdgpu_mm_rreg,
|
||||||
TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
|
TP_PROTO(unsigned did, uint32_t reg, uint32_t value),
|
||||||
TP_ARGS(did, reg, value),
|
TP_ARGS(did, reg, value),
|
||||||
@ -473,5 +417,5 @@ TRACE_EVENT(amdgpu_ttm_bo_move,
|
|||||||
|
|
||||||
/* This part must be outside protection */
|
/* This part must be outside protection */
|
||||||
#undef TRACE_INCLUDE_PATH
|
#undef TRACE_INCLUDE_PATH
|
||||||
#define TRACE_INCLUDE_PATH .
|
#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/amd/amdgpu
|
||||||
#include <trace/define_trace.h>
|
#include <trace/define_trace.h>
|
||||||
|
@ -1,4 +1,23 @@
|
|||||||
/* Copyright Red Hat Inc 2010.
|
/* Copyright Red Hat Inc 2010.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||||
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||||
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
* Author : Dave Airlie <airlied@redhat.com>
|
* Author : Dave Airlie <airlied@redhat.com>
|
||||||
*/
|
*/
|
||||||
#include <drm/drmP.h>
|
#include <drm/drmP.h>
|
||||||
|
@ -42,6 +42,7 @@
|
|||||||
#include <linux/swap.h>
|
#include <linux/swap.h>
|
||||||
#include <linux/pagemap.h>
|
#include <linux/pagemap.h>
|
||||||
#include <linux/debugfs.h>
|
#include <linux/debugfs.h>
|
||||||
|
#include <linux/iommu.h>
|
||||||
#include "amdgpu.h"
|
#include "amdgpu.h"
|
||||||
#include "amdgpu_trace.h"
|
#include "amdgpu_trace.h"
|
||||||
#include "bif/bif_4_1_d.h"
|
#include "bif/bif_4_1_d.h"
|
||||||
@ -608,6 +609,7 @@ struct amdgpu_ttm_tt {
|
|||||||
spinlock_t guptasklock;
|
spinlock_t guptasklock;
|
||||||
struct list_head guptasks;
|
struct list_head guptasks;
|
||||||
atomic_t mmu_invalidations;
|
atomic_t mmu_invalidations;
|
||||||
|
uint32_t last_set_pages;
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -621,6 +623,8 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
|
|||||||
if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
|
if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
|
||||||
flags |= FOLL_WRITE;
|
flags |= FOLL_WRITE;
|
||||||
|
|
||||||
|
down_read(¤t->mm->mmap_sem);
|
||||||
|
|
||||||
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
|
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
|
||||||
/* check that we only use anonymous memory
|
/* check that we only use anonymous memory
|
||||||
to prevent problems with writeback */
|
to prevent problems with writeback */
|
||||||
@ -628,8 +632,10 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
|
|||||||
struct vm_area_struct *vma;
|
struct vm_area_struct *vma;
|
||||||
|
|
||||||
vma = find_vma(gtt->usermm, gtt->userptr);
|
vma = find_vma(gtt->usermm, gtt->userptr);
|
||||||
if (!vma || vma->vm_file || vma->vm_end < end)
|
if (!vma || vma->vm_file || vma->vm_end < end) {
|
||||||
|
up_read(¤t->mm->mmap_sem);
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
do {
|
do {
|
||||||
@ -656,42 +662,44 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
|
|||||||
|
|
||||||
} while (pinned < ttm->num_pages);
|
} while (pinned < ttm->num_pages);
|
||||||
|
|
||||||
|
up_read(¤t->mm->mmap_sem);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
release_pages:
|
release_pages:
|
||||||
release_pages(pages, pinned, 0);
|
release_pages(pages, pinned, 0);
|
||||||
|
up_read(¤t->mm->mmap_sem);
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amdgpu_trace_dma_map(struct ttm_tt *ttm)
|
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
|
|
||||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
if (unlikely(trace_amdgpu_ttm_tt_populate_enabled())) {
|
gtt->last_set_pages = atomic_read(>t->mmu_invalidations);
|
||||||
for (i = 0; i < ttm->num_pages; i++) {
|
for (i = 0; i < ttm->num_pages; ++i) {
|
||||||
trace_amdgpu_ttm_tt_populate(
|
if (ttm->pages[i])
|
||||||
adev,
|
put_page(ttm->pages[i]);
|
||||||
gtt->ttm.dma_address[i],
|
|
||||||
page_to_phys(ttm->pages[i]));
|
ttm->pages[i] = pages ? pages[i] : NULL;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amdgpu_trace_dma_unmap(struct ttm_tt *ttm)
|
void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
|
|
||||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
if (unlikely(trace_amdgpu_ttm_tt_unpopulate_enabled())) {
|
for (i = 0; i < ttm->num_pages; ++i) {
|
||||||
for (i = 0; i < ttm->num_pages; i++) {
|
struct page *page = ttm->pages[i];
|
||||||
trace_amdgpu_ttm_tt_unpopulate(
|
|
||||||
adev,
|
if (!page)
|
||||||
gtt->ttm.dma_address[i],
|
continue;
|
||||||
page_to_phys(ttm->pages[i]));
|
|
||||||
}
|
if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
|
||||||
|
set_page_dirty(page);
|
||||||
|
|
||||||
|
mark_page_accessed(page);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -721,8 +729,6 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
|
|||||||
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
|
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
|
||||||
gtt->ttm.dma_address, ttm->num_pages);
|
gtt->ttm.dma_address, ttm->num_pages);
|
||||||
|
|
||||||
amdgpu_trace_dma_map(ttm);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
release_sg:
|
release_sg:
|
||||||
@ -734,7 +740,6 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
|
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
|
||||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||||
struct sg_page_iter sg_iter;
|
|
||||||
|
|
||||||
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
|
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
|
||||||
enum dma_data_direction direction = write ?
|
enum dma_data_direction direction = write ?
|
||||||
@ -747,16 +752,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
|
|||||||
/* free the sg table and pages again */
|
/* free the sg table and pages again */
|
||||||
dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
|
dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
|
||||||
|
|
||||||
for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
|
amdgpu_ttm_tt_mark_user_pages(ttm);
|
||||||
struct page *page = sg_page_iter_page(&sg_iter);
|
|
||||||
if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY))
|
|
||||||
set_page_dirty(page);
|
|
||||||
|
|
||||||
mark_page_accessed(page);
|
|
||||||
put_page(page);
|
|
||||||
}
|
|
||||||
|
|
||||||
amdgpu_trace_dma_unmap(ttm);
|
|
||||||
|
|
||||||
sg_free_table(ttm->sg);
|
sg_free_table(ttm->sg);
|
||||||
}
|
}
|
||||||
@ -818,7 +814,6 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
|
|||||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
|
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
|
||||||
struct ttm_tt *ttm = bo->ttm;
|
struct ttm_tt *ttm = bo->ttm;
|
||||||
struct ttm_mem_reg tmp;
|
struct ttm_mem_reg tmp;
|
||||||
|
|
||||||
struct ttm_placement placement;
|
struct ttm_placement placement;
|
||||||
struct ttm_place placements;
|
struct ttm_place placements;
|
||||||
int r;
|
int r;
|
||||||
@ -834,7 +829,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
|
|||||||
placement.busy_placement = &placements;
|
placement.busy_placement = &placements;
|
||||||
placements.fpfn = 0;
|
placements.fpfn = 0;
|
||||||
placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
|
placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT;
|
||||||
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
|
placements.flags = bo->mem.placement | TTM_PL_FLAG_TT;
|
||||||
|
|
||||||
r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
|
r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
|
||||||
if (unlikely(r))
|
if (unlikely(r))
|
||||||
@ -941,8 +936,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
|
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
|
||||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||||
unsigned i;
|
|
||||||
int r;
|
|
||||||
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
|
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
|
||||||
|
|
||||||
if (ttm->state != tt_unpopulated)
|
if (ttm->state != tt_unpopulated)
|
||||||
@ -962,52 +955,26 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
|
|||||||
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
|
drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
|
||||||
gtt->ttm.dma_address, ttm->num_pages);
|
gtt->ttm.dma_address, ttm->num_pages);
|
||||||
ttm->state = tt_unbound;
|
ttm->state = tt_unbound;
|
||||||
r = 0;
|
return 0;
|
||||||
goto trace_mappings;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_SWIOTLB
|
#ifdef CONFIG_SWIOTLB
|
||||||
if (swiotlb_nr_tbl()) {
|
if (swiotlb_nr_tbl()) {
|
||||||
r = ttm_dma_populate(>t->ttm, adev->dev);
|
return ttm_dma_populate(>t->ttm, adev->dev);
|
||||||
goto trace_mappings;
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
r = ttm_pool_populate(ttm);
|
return ttm_populate_and_map_pages(adev->dev, >t->ttm);
|
||||||
if (r) {
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
for (i = 0; i < ttm->num_pages; i++) {
|
|
||||||
gtt->ttm.dma_address[i] = pci_map_page(adev->pdev, ttm->pages[i],
|
|
||||||
0, PAGE_SIZE,
|
|
||||||
PCI_DMA_BIDIRECTIONAL);
|
|
||||||
if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
|
|
||||||
while (i--) {
|
|
||||||
pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
|
|
||||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
|
||||||
gtt->ttm.dma_address[i] = 0;
|
|
||||||
}
|
|
||||||
ttm_pool_unpopulate(ttm);
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
r = 0;
|
|
||||||
trace_mappings:
|
|
||||||
if (likely(!r))
|
|
||||||
amdgpu_trace_dma_map(ttm);
|
|
||||||
return r;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
|
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev;
|
struct amdgpu_device *adev;
|
||||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||||
unsigned i;
|
|
||||||
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
|
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
|
||||||
|
|
||||||
if (gtt && gtt->userptr) {
|
if (gtt && gtt->userptr) {
|
||||||
|
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
|
||||||
kfree(ttm->sg);
|
kfree(ttm->sg);
|
||||||
ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
|
ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
|
||||||
return;
|
return;
|
||||||
@ -1018,8 +985,6 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
|
|||||||
|
|
||||||
adev = amdgpu_ttm_adev(ttm->bdev);
|
adev = amdgpu_ttm_adev(ttm->bdev);
|
||||||
|
|
||||||
amdgpu_trace_dma_unmap(ttm);
|
|
||||||
|
|
||||||
#ifdef CONFIG_SWIOTLB
|
#ifdef CONFIG_SWIOTLB
|
||||||
if (swiotlb_nr_tbl()) {
|
if (swiotlb_nr_tbl()) {
|
||||||
ttm_dma_unpopulate(>t->ttm, adev->dev);
|
ttm_dma_unpopulate(>t->ttm, adev->dev);
|
||||||
@ -1027,14 +992,7 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
for (i = 0; i < ttm->num_pages; i++) {
|
ttm_unmap_and_unpopulate_pages(adev->dev, >t->ttm);
|
||||||
if (gtt->ttm.dma_address[i]) {
|
|
||||||
pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
|
|
||||||
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ttm_pool_unpopulate(ttm);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
|
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
|
||||||
@ -1051,6 +1009,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
|
|||||||
spin_lock_init(>t->guptasklock);
|
spin_lock_init(>t->guptasklock);
|
||||||
INIT_LIST_HEAD(>t->guptasks);
|
INIT_LIST_HEAD(>t->guptasks);
|
||||||
atomic_set(>t->mmu_invalidations, 0);
|
atomic_set(>t->mmu_invalidations, 0);
|
||||||
|
gtt->last_set_pages = 0;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1103,6 +1062,16 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
|
|||||||
return prev_invalidated != *last_invalidated;
|
return prev_invalidated != *last_invalidated;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
|
||||||
|
{
|
||||||
|
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||||
|
|
||||||
|
if (gtt == NULL || !gtt->userptr)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
return atomic_read(>t->mmu_invalidations) != gtt->last_set_pages;
|
||||||
|
}
|
||||||
|
|
||||||
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
|
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
|
||||||
{
|
{
|
||||||
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
struct amdgpu_ttm_tt *gtt = (void *)ttm;
|
||||||
@ -1202,14 +1171,14 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
|
|||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
|
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
|
||||||
WREG32(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
|
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
|
||||||
WREG32(mmMM_INDEX_HI, aligned_pos >> 31);
|
WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
|
||||||
if (!write || mask != 0xffffffff)
|
if (!write || mask != 0xffffffff)
|
||||||
value = RREG32(mmMM_DATA);
|
value = RREG32_NO_KIQ(mmMM_DATA);
|
||||||
if (write) {
|
if (write) {
|
||||||
value &= ~mask;
|
value &= ~mask;
|
||||||
value |= (*(uint32_t *)buf << shift) & mask;
|
value |= (*(uint32_t *)buf << shift) & mask;
|
||||||
WREG32(mmMM_DATA, value);
|
WREG32_NO_KIQ(mmMM_DATA, value);
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
|
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
|
||||||
if (!write) {
|
if (!write) {
|
||||||
@ -1557,8 +1526,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
|||||||
struct dma_fence **fence)
|
struct dma_fence **fence)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||||
/* max_bytes applies to SDMA_OP_PTEPDE as well as SDMA_OP_CONST_FILL*/
|
uint32_t max_bytes = 8 *
|
||||||
uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
|
adev->vm_manager.vm_pte_funcs->set_max_nums_pte_pde;
|
||||||
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
|
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
|
||||||
|
|
||||||
struct drm_mm_node *mm_node;
|
struct drm_mm_node *mm_node;
|
||||||
@ -1590,8 +1559,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
|||||||
++mm_node;
|
++mm_node;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* 10 double words for each SDMA_OP_PTEPDE cmd */
|
/* num of dwords for each SDMA_OP_PTEPDE cmd */
|
||||||
num_dw = num_loops * 10;
|
num_dw = num_loops * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw;
|
||||||
|
|
||||||
/* for IB padding */
|
/* for IB padding */
|
||||||
num_dw += 64;
|
num_dw += 64;
|
||||||
@ -1697,9 +1666,9 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
|
|||||||
return result;
|
return result;
|
||||||
|
|
||||||
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
|
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
|
||||||
WREG32(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
|
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
|
||||||
WREG32(mmMM_INDEX_HI, *pos >> 31);
|
WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
|
||||||
value = RREG32(mmMM_DATA);
|
value = RREG32_NO_KIQ(mmMM_DATA);
|
||||||
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
|
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
|
||||||
|
|
||||||
r = put_user(value, (uint32_t *)buf);
|
r = put_user(value, (uint32_t *)buf);
|
||||||
@ -1715,10 +1684,50 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf,
|
||||||
|
size_t size, loff_t *pos)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = file_inode(f)->i_private;
|
||||||
|
ssize_t result = 0;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
if (size & 0x3 || *pos & 0x3)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (*pos >= adev->mc.mc_vram_size)
|
||||||
|
return -ENXIO;
|
||||||
|
|
||||||
|
while (size) {
|
||||||
|
unsigned long flags;
|
||||||
|
uint32_t value;
|
||||||
|
|
||||||
|
if (*pos >= adev->mc.mc_vram_size)
|
||||||
|
return result;
|
||||||
|
|
||||||
|
r = get_user(value, (uint32_t *)buf);
|
||||||
|
if (r)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
|
||||||
|
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
|
||||||
|
WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
|
||||||
|
WREG32_NO_KIQ(mmMM_DATA, value);
|
||||||
|
spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
|
||||||
|
|
||||||
|
result += 4;
|
||||||
|
buf += 4;
|
||||||
|
*pos += 4;
|
||||||
|
size -= 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct file_operations amdgpu_ttm_vram_fops = {
|
static const struct file_operations amdgpu_ttm_vram_fops = {
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.read = amdgpu_ttm_vram_read,
|
.read = amdgpu_ttm_vram_read,
|
||||||
.llseek = default_llseek
|
.write = amdgpu_ttm_vram_write,
|
||||||
|
.llseek = default_llseek,
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
|
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
|
||||||
@ -1770,6 +1779,53 @@ static const struct file_operations amdgpu_ttm_gtt_fops = {
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static ssize_t amdgpu_iova_to_phys_read(struct file *f, char __user *buf,
|
||||||
|
size_t size, loff_t *pos)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = file_inode(f)->i_private;
|
||||||
|
int r;
|
||||||
|
uint64_t phys;
|
||||||
|
struct iommu_domain *dom;
|
||||||
|
|
||||||
|
// always return 8 bytes
|
||||||
|
if (size != 8)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
// only accept page addresses
|
||||||
|
if (*pos & 0xFFF)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
dom = iommu_get_domain_for_dev(adev->dev);
|
||||||
|
if (dom)
|
||||||
|
phys = iommu_iova_to_phys(dom, *pos);
|
||||||
|
else
|
||||||
|
phys = *pos;
|
||||||
|
|
||||||
|
r = copy_to_user(buf, &phys, 8);
|
||||||
|
if (r)
|
||||||
|
return -EFAULT;
|
||||||
|
|
||||||
|
return 8;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct file_operations amdgpu_ttm_iova_fops = {
|
||||||
|
.owner = THIS_MODULE,
|
||||||
|
.read = amdgpu_iova_to_phys_read,
|
||||||
|
.llseek = default_llseek
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct {
|
||||||
|
char *name;
|
||||||
|
const struct file_operations *fops;
|
||||||
|
int domain;
|
||||||
|
} ttm_debugfs_entries[] = {
|
||||||
|
{ "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
|
||||||
|
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
|
||||||
|
{ "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
|
||||||
|
#endif
|
||||||
|
{ "amdgpu_iova", &amdgpu_ttm_iova_fops, TTM_PL_SYSTEM },
|
||||||
|
};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
|
static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
|
||||||
@ -1780,22 +1836,21 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
|
|||||||
struct drm_minor *minor = adev->ddev->primary;
|
struct drm_minor *minor = adev->ddev->primary;
|
||||||
struct dentry *ent, *root = minor->debugfs_root;
|
struct dentry *ent, *root = minor->debugfs_root;
|
||||||
|
|
||||||
ent = debugfs_create_file("amdgpu_vram", S_IFREG | S_IRUGO, root,
|
for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
|
||||||
adev, &amdgpu_ttm_vram_fops);
|
ent = debugfs_create_file(
|
||||||
if (IS_ERR(ent))
|
ttm_debugfs_entries[count].name,
|
||||||
return PTR_ERR(ent);
|
S_IFREG | S_IRUGO, root,
|
||||||
i_size_write(ent->d_inode, adev->mc.mc_vram_size);
|
adev,
|
||||||
adev->mman.vram = ent;
|
ttm_debugfs_entries[count].fops);
|
||||||
|
if (IS_ERR(ent))
|
||||||
|
return PTR_ERR(ent);
|
||||||
|
if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
|
||||||
|
i_size_write(ent->d_inode, adev->mc.mc_vram_size);
|
||||||
|
else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
|
||||||
|
i_size_write(ent->d_inode, adev->mc.gart_size);
|
||||||
|
adev->mman.debugfs_entries[count] = ent;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
|
|
||||||
ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root,
|
|
||||||
adev, &amdgpu_ttm_gtt_fops);
|
|
||||||
if (IS_ERR(ent))
|
|
||||||
return PTR_ERR(ent);
|
|
||||||
i_size_write(ent->d_inode, adev->mc.gart_size);
|
|
||||||
adev->mman.gtt = ent;
|
|
||||||
|
|
||||||
#endif
|
|
||||||
count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
|
count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
|
||||||
|
|
||||||
#ifdef CONFIG_SWIOTLB
|
#ifdef CONFIG_SWIOTLB
|
||||||
@ -1805,7 +1860,6 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
|
return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
|
||||||
#else
|
#else
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
@ -1813,14 +1867,9 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
|
|||||||
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
|
static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_DEBUG_FS)
|
#if defined(CONFIG_DEBUG_FS)
|
||||||
|
unsigned i;
|
||||||
|
|
||||||
debugfs_remove(adev->mman.vram);
|
for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
|
||||||
adev->mman.vram = NULL;
|
debugfs_remove(adev->mman.debugfs_entries[i]);
|
||||||
|
|
||||||
#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
|
|
||||||
debugfs_remove(adev->mman.gtt);
|
|
||||||
adev->mman.gtt = NULL;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
#ifndef __AMDGPU_TTM_H__
|
#ifndef __AMDGPU_TTM_H__
|
||||||
#define __AMDGPU_TTM_H__
|
#define __AMDGPU_TTM_H__
|
||||||
|
|
||||||
|
#include "amdgpu.h"
|
||||||
#include "gpu_scheduler.h"
|
#include "gpu_scheduler.h"
|
||||||
|
|
||||||
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
|
#define AMDGPU_PL_GDS (TTM_PL_PRIV + 0)
|
||||||
@ -45,8 +46,7 @@ struct amdgpu_mman {
|
|||||||
bool initialized;
|
bool initialized;
|
||||||
|
|
||||||
#if defined(CONFIG_DEBUG_FS)
|
#if defined(CONFIG_DEBUG_FS)
|
||||||
struct dentry *vram;
|
struct dentry *debugfs_entries[8];
|
||||||
struct dentry *gtt;
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* buffer handling */
|
/* buffer handling */
|
||||||
@ -82,4 +82,20 @@ bool amdgpu_ttm_is_bound(struct ttm_tt *ttm);
|
|||||||
int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem);
|
int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem);
|
||||||
int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
|
int amdgpu_ttm_recover_gart(struct amdgpu_device *adev);
|
||||||
|
|
||||||
|
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages);
|
||||||
|
void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages);
|
||||||
|
void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm);
|
||||||
|
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
|
||||||
|
uint32_t flags);
|
||||||
|
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
|
||||||
|
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
|
||||||
|
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
|
||||||
|
unsigned long end);
|
||||||
|
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
|
||||||
|
int *last_invalidated);
|
||||||
|
bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm);
|
||||||
|
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
|
||||||
|
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
|
||||||
|
struct ttm_mem_reg *mem);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -270,12 +270,8 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
|
|||||||
else
|
else
|
||||||
return AMDGPU_FW_LOAD_SMU;
|
return AMDGPU_FW_LOAD_SMU;
|
||||||
case CHIP_VEGA10:
|
case CHIP_VEGA10:
|
||||||
if (!load_type)
|
|
||||||
return AMDGPU_FW_LOAD_DIRECT;
|
|
||||||
else
|
|
||||||
return AMDGPU_FW_LOAD_PSP;
|
|
||||||
case CHIP_RAVEN:
|
case CHIP_RAVEN:
|
||||||
if (load_type != 2)
|
if (!load_type)
|
||||||
return AMDGPU_FW_LOAD_DIRECT;
|
return AMDGPU_FW_LOAD_DIRECT;
|
||||||
else
|
else
|
||||||
return AMDGPU_FW_LOAD_PSP;
|
return AMDGPU_FW_LOAD_PSP;
|
||||||
@ -364,8 +360,6 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
|
|||||||
int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
|
int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct amdgpu_bo **bo = &adev->firmware.fw_buf;
|
struct amdgpu_bo **bo = &adev->firmware.fw_buf;
|
||||||
uint64_t fw_mc_addr;
|
|
||||||
void *fw_buf_ptr = NULL;
|
|
||||||
uint64_t fw_offset = 0;
|
uint64_t fw_offset = 0;
|
||||||
int i, err;
|
int i, err;
|
||||||
struct amdgpu_firmware_info *ucode = NULL;
|
struct amdgpu_firmware_info *ucode = NULL;
|
||||||
@ -376,37 +370,39 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
|
if (!amdgpu_sriov_vf(adev) || !adev->in_sriov_reset) {
|
||||||
amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
|
err = amdgpu_bo_create(adev, adev->firmware.fw_size, PAGE_SIZE, true,
|
||||||
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
|
||||||
NULL, NULL, 0, bo);
|
AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS,
|
||||||
if (err) {
|
NULL, NULL, 0, bo);
|
||||||
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
|
if (err) {
|
||||||
goto failed;
|
dev_err(adev->dev, "(%d) Firmware buffer allocate failed\n", err);
|
||||||
|
goto failed;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = amdgpu_bo_reserve(*bo, false);
|
||||||
|
if (err) {
|
||||||
|
dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
|
||||||
|
goto failed_reserve;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
|
||||||
|
&adev->firmware.fw_buf_mc);
|
||||||
|
if (err) {
|
||||||
|
dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
|
||||||
|
goto failed_pin;
|
||||||
|
}
|
||||||
|
|
||||||
|
err = amdgpu_bo_kmap(*bo, &adev->firmware.fw_buf_ptr);
|
||||||
|
if (err) {
|
||||||
|
dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
|
||||||
|
goto failed_kmap;
|
||||||
|
}
|
||||||
|
|
||||||
|
amdgpu_bo_unreserve(*bo);
|
||||||
}
|
}
|
||||||
|
|
||||||
err = amdgpu_bo_reserve(*bo, false);
|
memset(adev->firmware.fw_buf_ptr, 0, adev->firmware.fw_size);
|
||||||
if (err) {
|
|
||||||
dev_err(adev->dev, "(%d) Firmware buffer reserve failed\n", err);
|
|
||||||
goto failed_reserve;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = amdgpu_bo_pin(*bo, amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
|
|
||||||
&fw_mc_addr);
|
|
||||||
if (err) {
|
|
||||||
dev_err(adev->dev, "(%d) Firmware buffer pin failed\n", err);
|
|
||||||
goto failed_pin;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = amdgpu_bo_kmap(*bo, &fw_buf_ptr);
|
|
||||||
if (err) {
|
|
||||||
dev_err(adev->dev, "(%d) Firmware buffer kmap failed\n", err);
|
|
||||||
goto failed_kmap;
|
|
||||||
}
|
|
||||||
|
|
||||||
amdgpu_bo_unreserve(*bo);
|
|
||||||
|
|
||||||
memset(fw_buf_ptr, 0, adev->firmware.fw_size);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* if SMU loaded firmware, it needn't add SMC, UVD, and VCE
|
* if SMU loaded firmware, it needn't add SMC, UVD, and VCE
|
||||||
@ -425,14 +421,14 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
|
|||||||
ucode = &adev->firmware.ucode[i];
|
ucode = &adev->firmware.ucode[i];
|
||||||
if (ucode->fw) {
|
if (ucode->fw) {
|
||||||
header = (const struct common_firmware_header *)ucode->fw->data;
|
header = (const struct common_firmware_header *)ucode->fw->data;
|
||||||
amdgpu_ucode_init_single_fw(adev, ucode, fw_mc_addr + fw_offset,
|
amdgpu_ucode_init_single_fw(adev, ucode, adev->firmware.fw_buf_mc + fw_offset,
|
||||||
(void *)((uint8_t *)fw_buf_ptr + fw_offset));
|
adev->firmware.fw_buf_ptr + fw_offset);
|
||||||
if (i == AMDGPU_UCODE_ID_CP_MEC1 &&
|
if (i == AMDGPU_UCODE_ID_CP_MEC1 &&
|
||||||
adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
|
adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
|
||||||
const struct gfx_firmware_header_v1_0 *cp_hdr;
|
const struct gfx_firmware_header_v1_0 *cp_hdr;
|
||||||
cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
|
cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
|
||||||
amdgpu_ucode_patch_jt(ucode, fw_mc_addr + fw_offset,
|
amdgpu_ucode_patch_jt(ucode, adev->firmware.fw_buf_mc + fw_offset,
|
||||||
fw_buf_ptr + fw_offset);
|
adev->firmware.fw_buf_ptr + fw_offset);
|
||||||
fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
|
fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
fw_offset += ALIGN(ucode->ucode_size, PAGE_SIZE);
|
fw_offset += ALIGN(ucode->ucode_size, PAGE_SIZE);
|
||||||
|
@ -269,6 +269,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
|
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
|
int i;
|
||||||
kfree(adev->uvd.saved_bo);
|
kfree(adev->uvd.saved_bo);
|
||||||
|
|
||||||
amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
|
amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity);
|
||||||
@ -279,6 +280,9 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
amdgpu_ring_fini(&adev->uvd.ring);
|
amdgpu_ring_fini(&adev->uvd.ring);
|
||||||
|
|
||||||
|
for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
|
||||||
|
amdgpu_ring_fini(&adev->uvd.ring_enc[i]);
|
||||||
|
|
||||||
release_firmware(adev->uvd.fw);
|
release_firmware(adev->uvd.fw);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -410,10 +414,10 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
|
|||||||
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
|
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
|
||||||
int r = 0;
|
int r = 0;
|
||||||
|
|
||||||
mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
|
r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
|
||||||
if (mapping == NULL) {
|
if (r) {
|
||||||
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
|
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
|
||||||
return -EINVAL;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!ctx->parser->adev->uvd.address_64_bit) {
|
if (!ctx->parser->adev->uvd.address_64_bit) {
|
||||||
@ -737,10 +741,10 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
|
|||||||
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
|
uint64_t addr = amdgpu_uvd_get_addr_from_ctx(ctx);
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo);
|
r = amdgpu_cs_find_mapping(ctx->parser, addr, &bo, &mapping);
|
||||||
if (mapping == NULL) {
|
if (r) {
|
||||||
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
|
DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr);
|
||||||
return -EINVAL;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
start = amdgpu_bo_gpu_offset(bo);
|
start = amdgpu_bo_gpu_offset(bo);
|
||||||
@ -917,10 +921,6 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = amdgpu_cs_sysvm_access_required(parser);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
ctx.parser = parser;
|
ctx.parser = parser;
|
||||||
ctx.buf_sizes = buf_sizes;
|
ctx.buf_sizes = buf_sizes;
|
||||||
ctx.ib_idx = ib_idx;
|
ctx.ib_idx = ib_idx;
|
||||||
|
@ -559,6 +559,7 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
|
|||||||
struct amdgpu_bo_va_mapping *mapping;
|
struct amdgpu_bo_va_mapping *mapping;
|
||||||
struct amdgpu_bo *bo;
|
struct amdgpu_bo *bo;
|
||||||
uint64_t addr;
|
uint64_t addr;
|
||||||
|
int r;
|
||||||
|
|
||||||
if (index == 0xffffffff)
|
if (index == 0xffffffff)
|
||||||
index = 0;
|
index = 0;
|
||||||
@ -567,11 +568,11 @@ static int amdgpu_vce_cs_reloc(struct amdgpu_cs_parser *p, uint32_t ib_idx,
|
|||||||
((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
|
((uint64_t)amdgpu_get_ib_value(p, ib_idx, hi)) << 32;
|
||||||
addr += ((uint64_t)size) * ((uint64_t)index);
|
addr += ((uint64_t)size) * ((uint64_t)index);
|
||||||
|
|
||||||
mapping = amdgpu_cs_find_mapping(p, addr, &bo);
|
r = amdgpu_cs_find_mapping(p, addr, &bo, &mapping);
|
||||||
if (mapping == NULL) {
|
if (r) {
|
||||||
DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
|
DRM_ERROR("Can't find BO for addr 0x%010Lx %d %d %d %d\n",
|
||||||
addr, lo, hi, size, index);
|
addr, lo, hi, size, index);
|
||||||
return -EINVAL;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((addr + (uint64_t)size) >
|
if ((addr + (uint64_t)size) >
|
||||||
@ -652,10 +653,6 @@ int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx)
|
|||||||
p->job->vm = NULL;
|
p->job->vm = NULL;
|
||||||
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
|
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
|
||||||
|
|
||||||
r = amdgpu_cs_sysvm_access_required(p);
|
|
||||||
if (r)
|
|
||||||
return r;
|
|
||||||
|
|
||||||
while (idx < ib->length_dw) {
|
while (idx < ib->length_dw) {
|
||||||
uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
|
uint32_t len = amdgpu_get_ib_value(p, ib_idx, idx);
|
||||||
uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
|
uint32_t cmd = amdgpu_get_ib_value(p, ib_idx, idx + 1);
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -25,6 +25,7 @@
|
|||||||
#define __AMDGPU_VM_H__
|
#define __AMDGPU_VM_H__
|
||||||
|
|
||||||
#include <linux/rbtree.h>
|
#include <linux/rbtree.h>
|
||||||
|
#include <linux/idr.h>
|
||||||
|
|
||||||
#include "gpu_scheduler.h"
|
#include "gpu_scheduler.h"
|
||||||
#include "amdgpu_sync.h"
|
#include "amdgpu_sync.h"
|
||||||
@ -105,17 +106,24 @@ struct amdgpu_vm_bo_base {
|
|||||||
|
|
||||||
/* protected by spinlock */
|
/* protected by spinlock */
|
||||||
struct list_head vm_status;
|
struct list_head vm_status;
|
||||||
|
|
||||||
|
/* protected by the BO being reserved */
|
||||||
|
bool moved;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_vm_pt {
|
struct amdgpu_vm_pt {
|
||||||
struct amdgpu_bo *bo;
|
struct amdgpu_vm_bo_base base;
|
||||||
uint64_t addr;
|
uint64_t addr;
|
||||||
|
|
||||||
/* array of page tables, one for each directory entry */
|
/* array of page tables, one for each directory entry */
|
||||||
struct amdgpu_vm_pt *entries;
|
struct amdgpu_vm_pt *entries;
|
||||||
unsigned last_entry_used;
|
unsigned last_entry_used;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#define AMDGPU_VM_FAULT(pasid, addr) (((u64)(pasid) << 48) | (addr))
|
||||||
|
#define AMDGPU_VM_FAULT_PASID(fault) ((u64)(fault) >> 48)
|
||||||
|
#define AMDGPU_VM_FAULT_ADDR(fault) ((u64)(fault) & 0xfffffffff000ULL)
|
||||||
|
|
||||||
struct amdgpu_vm {
|
struct amdgpu_vm {
|
||||||
/* tree of virtual addresses mapped */
|
/* tree of virtual addresses mapped */
|
||||||
struct rb_root_cached va;
|
struct rb_root_cached va;
|
||||||
@ -123,19 +131,21 @@ struct amdgpu_vm {
|
|||||||
/* protecting invalidated */
|
/* protecting invalidated */
|
||||||
spinlock_t status_lock;
|
spinlock_t status_lock;
|
||||||
|
|
||||||
|
/* BOs who needs a validation */
|
||||||
|
struct list_head evicted;
|
||||||
|
|
||||||
|
/* PT BOs which relocated and their parent need an update */
|
||||||
|
struct list_head relocated;
|
||||||
|
|
||||||
/* BOs moved, but not yet updated in the PT */
|
/* BOs moved, but not yet updated in the PT */
|
||||||
struct list_head moved;
|
struct list_head moved;
|
||||||
|
|
||||||
/* BOs cleared in the PT because of a move */
|
|
||||||
struct list_head cleared;
|
|
||||||
|
|
||||||
/* BO mappings freed, but not yet updated in the PT */
|
/* BO mappings freed, but not yet updated in the PT */
|
||||||
struct list_head freed;
|
struct list_head freed;
|
||||||
|
|
||||||
/* contains the page directory */
|
/* contains the page directory */
|
||||||
struct amdgpu_vm_pt root;
|
struct amdgpu_vm_pt root;
|
||||||
struct dma_fence *last_dir_update;
|
struct dma_fence *last_update;
|
||||||
uint64_t last_eviction_counter;
|
|
||||||
|
|
||||||
/* protecting freed */
|
/* protecting freed */
|
||||||
spinlock_t freed_lock;
|
spinlock_t freed_lock;
|
||||||
@ -143,8 +153,9 @@ struct amdgpu_vm {
|
|||||||
/* Scheduler entity for page table updates */
|
/* Scheduler entity for page table updates */
|
||||||
struct amd_sched_entity entity;
|
struct amd_sched_entity entity;
|
||||||
|
|
||||||
/* client id */
|
/* client id and PASID (TODO: replace client_id with PASID) */
|
||||||
u64 client_id;
|
u64 client_id;
|
||||||
|
unsigned int pasid;
|
||||||
/* dedicated to vm */
|
/* dedicated to vm */
|
||||||
struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS];
|
struct amdgpu_vm_id *reserved_vmid[AMDGPU_MAX_VMHUBS];
|
||||||
|
|
||||||
@ -153,6 +164,9 @@ struct amdgpu_vm {
|
|||||||
|
|
||||||
/* Flag to indicate ATS support from PTE for GFX9 */
|
/* Flag to indicate ATS support from PTE for GFX9 */
|
||||||
bool pte_support_ats;
|
bool pte_support_ats;
|
||||||
|
|
||||||
|
/* Up to 128 pending page faults */
|
||||||
|
DECLARE_KFIFO(faults, u64, 128);
|
||||||
};
|
};
|
||||||
|
|
||||||
struct amdgpu_vm_id {
|
struct amdgpu_vm_id {
|
||||||
@ -215,16 +229,25 @@ struct amdgpu_vm_manager {
|
|||||||
* BIT1[= 0] Compute updated by SDMA [= 1] by CPU
|
* BIT1[= 0] Compute updated by SDMA [= 1] by CPU
|
||||||
*/
|
*/
|
||||||
int vm_update_mode;
|
int vm_update_mode;
|
||||||
|
|
||||||
|
/* PASID to VM mapping, will be used in interrupt context to
|
||||||
|
* look up VM of a page fault
|
||||||
|
*/
|
||||||
|
struct idr pasid_idr;
|
||||||
|
spinlock_t pasid_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
int amdgpu_vm_alloc_pasid(unsigned int bits);
|
||||||
|
void amdgpu_vm_free_pasid(unsigned int pasid);
|
||||||
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
|
void amdgpu_vm_manager_init(struct amdgpu_device *adev);
|
||||||
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
|
void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
|
||||||
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
int vm_context);
|
int vm_context, unsigned int pasid);
|
||||||
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
|
||||||
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
|
void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
|
||||||
struct list_head *validated,
|
struct list_head *validated,
|
||||||
struct amdgpu_bo_list_entry *entry);
|
struct amdgpu_bo_list_entry *entry);
|
||||||
|
bool amdgpu_vm_ready(struct amdgpu_vm *vm);
|
||||||
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||||
int (*callback)(void *p, struct amdgpu_bo *bo),
|
int (*callback)(void *p, struct amdgpu_bo *bo),
|
||||||
void *param);
|
void *param);
|
||||||
@ -243,13 +266,13 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev,
|
|||||||
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
|
||||||
struct amdgpu_vm *vm,
|
struct amdgpu_vm *vm,
|
||||||
struct dma_fence **fence);
|
struct dma_fence **fence);
|
||||||
int amdgpu_vm_clear_moved(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
|
||||||
struct amdgpu_sync *sync);
|
struct amdgpu_vm *vm);
|
||||||
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
|
||||||
struct amdgpu_bo_va *bo_va,
|
struct amdgpu_bo_va *bo_va,
|
||||||
bool clear);
|
bool clear);
|
||||||
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
|
void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
|
||||||
struct amdgpu_bo *bo);
|
struct amdgpu_bo *bo, bool evicted);
|
||||||
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
|
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
|
||||||
struct amdgpu_bo *bo);
|
struct amdgpu_bo *bo);
|
||||||
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
|
struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
|
||||||
@ -269,6 +292,8 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
|
|||||||
int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
|
||||||
struct amdgpu_vm *vm,
|
struct amdgpu_vm *vm,
|
||||||
uint64_t saddr, uint64_t size);
|
uint64_t saddr, uint64_t size);
|
||||||
|
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
|
||||||
|
uint64_t addr);
|
||||||
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
|
||||||
struct amdgpu_bo_va *bo_va);
|
struct amdgpu_bo_va *bo_va);
|
||||||
void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev,
|
void amdgpu_vm_set_fragment_size(struct amdgpu_device *adev,
|
||||||
|
@ -1343,8 +1343,11 @@ struct atom_context *amdgpu_atom_parse(struct card_info *card, void *bios)
|
|||||||
idx = 0x80;
|
idx = 0x80;
|
||||||
|
|
||||||
str = CSTR(idx);
|
str = CSTR(idx);
|
||||||
if (*str != '\0')
|
if (*str != '\0') {
|
||||||
pr_info("ATOM BIOS: %s\n", str);
|
pr_info("ATOM BIOS: %s\n", str);
|
||||||
|
strlcpy(ctx->vbios_version, str, sizeof(ctx->vbios_version));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
return ctx;
|
return ctx;
|
||||||
}
|
}
|
||||||
|
@ -140,6 +140,7 @@ struct atom_context {
|
|||||||
int io_mode;
|
int io_mode;
|
||||||
uint32_t *scratch;
|
uint32_t *scratch;
|
||||||
int scratch_size_bytes;
|
int scratch_size_bytes;
|
||||||
|
char vbios_version[20];
|
||||||
};
|
};
|
||||||
|
|
||||||
extern int amdgpu_atom_debug;
|
extern int amdgpu_atom_debug;
|
||||||
|
@ -307,7 +307,6 @@ static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
|
|||||||
static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
|
static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
|
||||||
u32 target_tdp);
|
u32 target_tdp);
|
||||||
static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
|
static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
|
||||||
static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev);
|
|
||||||
static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
|
static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
|
||||||
|
|
||||||
static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
|
static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
|
||||||
@ -883,8 +882,9 @@ static int ci_power_control_set_level(struct amdgpu_device *adev)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
|
static void ci_dpm_powergate_uvd(void *handle, bool gate)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct ci_power_info *pi = ci_get_pi(adev);
|
struct ci_power_info *pi = ci_get_pi(adev);
|
||||||
|
|
||||||
pi->uvd_power_gated = gate;
|
pi->uvd_power_gated = gate;
|
||||||
@ -901,8 +901,9 @@ static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
|
static bool ci_dpm_vblank_too_short(void *handle)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
|
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
|
||||||
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
|
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
|
||||||
|
|
||||||
@ -1210,11 +1211,12 @@ static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
|
static int ci_dpm_get_fan_speed_percent(void *handle,
|
||||||
u32 *speed)
|
u32 *speed)
|
||||||
{
|
{
|
||||||
u32 duty, duty100;
|
u32 duty, duty100;
|
||||||
u64 tmp64;
|
u64 tmp64;
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
if (adev->pm.no_fan)
|
if (adev->pm.no_fan)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
@ -1237,12 +1239,13 @@ static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
|
static int ci_dpm_set_fan_speed_percent(void *handle,
|
||||||
u32 speed)
|
u32 speed)
|
||||||
{
|
{
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
u32 duty, duty100;
|
u32 duty, duty100;
|
||||||
u64 tmp64;
|
u64 tmp64;
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct ci_power_info *pi = ci_get_pi(adev);
|
struct ci_power_info *pi = ci_get_pi(adev);
|
||||||
|
|
||||||
if (adev->pm.no_fan)
|
if (adev->pm.no_fan)
|
||||||
@ -1271,8 +1274,10 @@ static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
|
static void ci_dpm_set_fan_control_mode(void *handle, u32 mode)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
switch (mode) {
|
switch (mode) {
|
||||||
case AMD_FAN_CTRL_NONE:
|
case AMD_FAN_CTRL_NONE:
|
||||||
if (adev->pm.dpm.fan.ucode_fan_control)
|
if (adev->pm.dpm.fan.ucode_fan_control)
|
||||||
@ -1292,8 +1297,9 @@ static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev)
|
static u32 ci_dpm_get_fan_control_mode(void *handle)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct ci_power_info *pi = ci_get_pi(adev);
|
struct ci_power_info *pi = ci_get_pi(adev);
|
||||||
|
|
||||||
if (pi->fan_is_controlled_by_smc)
|
if (pi->fan_is_controlled_by_smc)
|
||||||
@ -4378,9 +4384,10 @@ static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
|
static int ci_dpm_force_performance_level(void *handle,
|
||||||
enum amd_dpm_forced_level level)
|
enum amd_dpm_forced_level level)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct ci_power_info *pi = ci_get_pi(adev);
|
struct ci_power_info *pi = ci_get_pi(adev);
|
||||||
u32 tmp, levels, i;
|
u32 tmp, levels, i;
|
||||||
int ret;
|
int ret;
|
||||||
@ -5291,8 +5298,9 @@ static void ci_update_requested_ps(struct amdgpu_device *adev,
|
|||||||
adev->pm.dpm.requested_ps = &pi->requested_rps;
|
adev->pm.dpm.requested_ps = &pi->requested_rps;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
|
static int ci_dpm_pre_set_power_state(void *handle)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct ci_power_info *pi = ci_get_pi(adev);
|
struct ci_power_info *pi = ci_get_pi(adev);
|
||||||
struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
|
struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
|
||||||
struct amdgpu_ps *new_ps = &requested_ps;
|
struct amdgpu_ps *new_ps = &requested_ps;
|
||||||
@ -5304,8 +5312,9 @@ static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ci_dpm_post_set_power_state(struct amdgpu_device *adev)
|
static void ci_dpm_post_set_power_state(void *handle)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct ci_power_info *pi = ci_get_pi(adev);
|
struct ci_power_info *pi = ci_get_pi(adev);
|
||||||
struct amdgpu_ps *new_ps = &pi->requested_rps;
|
struct amdgpu_ps *new_ps = &pi->requested_rps;
|
||||||
|
|
||||||
@ -5479,8 +5488,9 @@ static void ci_dpm_disable(struct amdgpu_device *adev)
|
|||||||
ci_update_current_ps(adev, boot_ps);
|
ci_update_current_ps(adev, boot_ps);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ci_dpm_set_power_state(struct amdgpu_device *adev)
|
static int ci_dpm_set_power_state(void *handle)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct ci_power_info *pi = ci_get_pi(adev);
|
struct ci_power_info *pi = ci_get_pi(adev);
|
||||||
struct amdgpu_ps *new_ps = &pi->requested_rps;
|
struct amdgpu_ps *new_ps = &pi->requested_rps;
|
||||||
struct amdgpu_ps *old_ps = &pi->current_rps;
|
struct amdgpu_ps *old_ps = &pi->current_rps;
|
||||||
@ -5551,8 +5561,10 @@ static void ci_dpm_reset_asic(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void ci_dpm_display_configuration_changed(struct amdgpu_device *adev)
|
static void ci_dpm_display_configuration_changed(void *handle)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
ci_program_display_gap(adev);
|
ci_program_display_gap(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -6105,9 +6117,10 @@ static int ci_dpm_init(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
|
ci_dpm_debugfs_print_current_performance_level(void *handle,
|
||||||
struct seq_file *m)
|
struct seq_file *m)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct ci_power_info *pi = ci_get_pi(adev);
|
struct ci_power_info *pi = ci_get_pi(adev);
|
||||||
struct amdgpu_ps *rps = &pi->current_rps;
|
struct amdgpu_ps *rps = &pi->current_rps;
|
||||||
u32 sclk = ci_get_average_sclk_freq(adev);
|
u32 sclk = ci_get_average_sclk_freq(adev);
|
||||||
@ -6131,12 +6144,13 @@ ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
|
|||||||
seq_printf(m, "GPU load: %u %%\n", activity_percent);
|
seq_printf(m, "GPU load: %u %%\n", activity_percent);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ci_dpm_print_power_state(struct amdgpu_device *adev,
|
static void ci_dpm_print_power_state(void *handle, void *current_ps)
|
||||||
struct amdgpu_ps *rps)
|
|
||||||
{
|
{
|
||||||
|
struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps;
|
||||||
struct ci_ps *ps = ci_get_ps(rps);
|
struct ci_ps *ps = ci_get_ps(rps);
|
||||||
struct ci_pl *pl;
|
struct ci_pl *pl;
|
||||||
int i;
|
int i;
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
amdgpu_dpm_print_class_info(rps->class, rps->class2);
|
amdgpu_dpm_print_class_info(rps->class, rps->class2);
|
||||||
amdgpu_dpm_print_cap_info(rps->caps);
|
amdgpu_dpm_print_cap_info(rps->caps);
|
||||||
@ -6158,20 +6172,23 @@ static inline bool ci_are_power_levels_equal(const struct ci_pl *ci_cpl1,
|
|||||||
(ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
|
(ci_cpl1->pcie_lane == ci_cpl2->pcie_lane));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ci_check_state_equal(struct amdgpu_device *adev,
|
static int ci_check_state_equal(void *handle,
|
||||||
struct amdgpu_ps *cps,
|
void *current_ps,
|
||||||
struct amdgpu_ps *rps,
|
void *request_ps,
|
||||||
bool *equal)
|
bool *equal)
|
||||||
{
|
{
|
||||||
struct ci_ps *ci_cps;
|
struct ci_ps *ci_cps;
|
||||||
struct ci_ps *ci_rps;
|
struct ci_ps *ci_rps;
|
||||||
int i;
|
int i;
|
||||||
|
struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
|
||||||
|
struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
|
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ci_cps = ci_get_ps(cps);
|
ci_cps = ci_get_ps((struct amdgpu_ps *)cps);
|
||||||
ci_rps = ci_get_ps(rps);
|
ci_rps = ci_get_ps((struct amdgpu_ps *)rps);
|
||||||
|
|
||||||
if (ci_cps == NULL) {
|
if (ci_cps == NULL) {
|
||||||
*equal = false;
|
*equal = false;
|
||||||
@ -6199,8 +6216,9 @@ static int ci_check_state_equal(struct amdgpu_device *adev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
|
static u32 ci_dpm_get_sclk(void *handle, bool low)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct ci_power_info *pi = ci_get_pi(adev);
|
struct ci_power_info *pi = ci_get_pi(adev);
|
||||||
struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
|
struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
|
||||||
|
|
||||||
@ -6210,8 +6228,9 @@ static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
|
|||||||
return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
|
return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
|
static u32 ci_dpm_get_mclk(void *handle, bool low)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct ci_power_info *pi = ci_get_pi(adev);
|
struct ci_power_info *pi = ci_get_pi(adev);
|
||||||
struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
|
struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
|
||||||
|
|
||||||
@ -6222,10 +6241,11 @@ static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* get temperature in millidegrees */
|
/* get temperature in millidegrees */
|
||||||
static int ci_dpm_get_temp(struct amdgpu_device *adev)
|
static int ci_dpm_get_temp(void *handle)
|
||||||
{
|
{
|
||||||
u32 temp;
|
u32 temp;
|
||||||
int actual_temp = 0;
|
int actual_temp = 0;
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
|
temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
|
||||||
CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
|
CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
|
||||||
@ -6261,7 +6281,6 @@ static int ci_dpm_early_init(void *handle)
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
ci_dpm_set_dpm_funcs(adev);
|
|
||||||
ci_dpm_set_irq_funcs(adev);
|
ci_dpm_set_irq_funcs(adev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -6551,9 +6570,10 @@ static int ci_dpm_set_powergating_state(void *handle,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
|
static int ci_dpm_print_clock_levels(void *handle,
|
||||||
enum pp_clock_type type, char *buf)
|
enum pp_clock_type type, char *buf)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct ci_power_info *pi = ci_get_pi(adev);
|
struct ci_power_info *pi = ci_get_pi(adev);
|
||||||
struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
|
struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
|
||||||
struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
|
struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
|
||||||
@ -6618,9 +6638,10 @@ static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
|
|||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
|
static int ci_dpm_force_clock_level(void *handle,
|
||||||
enum pp_clock_type type, uint32_t mask)
|
enum pp_clock_type type, uint32_t mask)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct ci_power_info *pi = ci_get_pi(adev);
|
struct ci_power_info *pi = ci_get_pi(adev);
|
||||||
|
|
||||||
if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
|
if (adev->pm.dpm.forced_level & (AMD_DPM_FORCED_LEVEL_AUTO |
|
||||||
@ -6664,8 +6685,9 @@ static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
|
static int ci_dpm_get_sclk_od(void *handle)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct ci_power_info *pi = ci_get_pi(adev);
|
struct ci_power_info *pi = ci_get_pi(adev);
|
||||||
struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
|
struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
|
||||||
struct ci_single_dpm_table *golden_sclk_table =
|
struct ci_single_dpm_table *golden_sclk_table =
|
||||||
@ -6680,8 +6702,9 @@ static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
|
|||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
|
static int ci_dpm_set_sclk_od(void *handle, uint32_t value)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct ci_power_info *pi = ci_get_pi(adev);
|
struct ci_power_info *pi = ci_get_pi(adev);
|
||||||
struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
|
struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
|
||||||
struct ci_single_dpm_table *golden_sclk_table =
|
struct ci_single_dpm_table *golden_sclk_table =
|
||||||
@ -6698,8 +6721,9 @@ static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
|
static int ci_dpm_get_mclk_od(void *handle)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct ci_power_info *pi = ci_get_pi(adev);
|
struct ci_power_info *pi = ci_get_pi(adev);
|
||||||
struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
|
struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
|
||||||
struct ci_single_dpm_table *golden_mclk_table =
|
struct ci_single_dpm_table *golden_mclk_table =
|
||||||
@ -6714,8 +6738,9 @@ static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
|
|||||||
return value;
|
return value;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
|
static int ci_dpm_set_mclk_od(void *handle, uint32_t value)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct ci_power_info *pi = ci_get_pi(adev);
|
struct ci_power_info *pi = ci_get_pi(adev);
|
||||||
struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
|
struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
|
||||||
struct ci_single_dpm_table *golden_mclk_table =
|
struct ci_single_dpm_table *golden_mclk_table =
|
||||||
@ -6732,9 +6757,10 @@ static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ci_dpm_get_power_profile_state(struct amdgpu_device *adev,
|
static int ci_dpm_get_power_profile_state(void *handle,
|
||||||
struct amd_pp_profile *query)
|
struct amd_pp_profile *query)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct ci_power_info *pi = ci_get_pi(adev);
|
struct ci_power_info *pi = ci_get_pi(adev);
|
||||||
|
|
||||||
if (!pi || !query)
|
if (!pi || !query)
|
||||||
@ -6851,9 +6877,10 @@ static int ci_set_power_profile_state(struct amdgpu_device *adev,
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ci_dpm_set_power_profile_state(struct amdgpu_device *adev,
|
static int ci_dpm_set_power_profile_state(void *handle,
|
||||||
struct amd_pp_profile *request)
|
struct amd_pp_profile *request)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct ci_power_info *pi = ci_get_pi(adev);
|
struct ci_power_info *pi = ci_get_pi(adev);
|
||||||
int ret = -1;
|
int ret = -1;
|
||||||
|
|
||||||
@ -6906,9 +6933,10 @@ static int ci_dpm_set_power_profile_state(struct amdgpu_device *adev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ci_dpm_reset_power_profile_state(struct amdgpu_device *adev,
|
static int ci_dpm_reset_power_profile_state(void *handle,
|
||||||
struct amd_pp_profile *request)
|
struct amd_pp_profile *request)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct ci_power_info *pi = ci_get_pi(adev);
|
struct ci_power_info *pi = ci_get_pi(adev);
|
||||||
|
|
||||||
if (!pi || !request)
|
if (!pi || !request)
|
||||||
@ -6927,9 +6955,10 @@ static int ci_dpm_reset_power_profile_state(struct amdgpu_device *adev,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ci_dpm_switch_power_profile(struct amdgpu_device *adev,
|
static int ci_dpm_switch_power_profile(void *handle,
|
||||||
enum amd_pp_profile_type type)
|
enum amd_pp_profile_type type)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct ci_power_info *pi = ci_get_pi(adev);
|
struct ci_power_info *pi = ci_get_pi(adev);
|
||||||
struct amd_pp_profile request = {0};
|
struct amd_pp_profile request = {0};
|
||||||
|
|
||||||
@ -6944,11 +6973,12 @@ static int ci_dpm_switch_power_profile(struct amdgpu_device *adev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ci_dpm_read_sensor(struct amdgpu_device *adev, int idx,
|
static int ci_dpm_read_sensor(void *handle, int idx,
|
||||||
void *value, int *size)
|
void *value, int *size)
|
||||||
{
|
{
|
||||||
u32 activity_percent = 50;
|
u32 activity_percent = 50;
|
||||||
int ret;
|
int ret;
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
/* size must be at least 4 bytes for all sensors */
|
/* size must be at least 4 bytes for all sensors */
|
||||||
if (*size < 4)
|
if (*size < 4)
|
||||||
@ -7003,7 +7033,7 @@ const struct amd_ip_funcs ci_dpm_ip_funcs = {
|
|||||||
.set_powergating_state = ci_dpm_set_powergating_state,
|
.set_powergating_state = ci_dpm_set_powergating_state,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
|
const struct amd_pm_funcs ci_dpm_funcs = {
|
||||||
.get_temperature = &ci_dpm_get_temp,
|
.get_temperature = &ci_dpm_get_temp,
|
||||||
.pre_set_power_state = &ci_dpm_pre_set_power_state,
|
.pre_set_power_state = &ci_dpm_pre_set_power_state,
|
||||||
.set_power_state = &ci_dpm_set_power_state,
|
.set_power_state = &ci_dpm_set_power_state,
|
||||||
@ -7035,12 +7065,6 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
|
|||||||
.read_sensor = ci_dpm_read_sensor,
|
.read_sensor = ci_dpm_read_sensor,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
if (adev->pm.funcs == NULL)
|
|
||||||
adev->pm.funcs = &ci_dpm_funcs;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
|
static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
|
||||||
.set = ci_dpm_set_interrupt_state,
|
.set = ci_dpm_set_interrupt_state,
|
||||||
.process = ci_dpm_process_interrupt,
|
.process = ci_dpm_process_interrupt,
|
||||||
|
@ -26,5 +26,6 @@
|
|||||||
|
|
||||||
extern const struct amd_ip_funcs ci_dpm_ip_funcs;
|
extern const struct amd_ip_funcs ci_dpm_ip_funcs;
|
||||||
extern const struct amd_ip_funcs kv_dpm_ip_funcs;
|
extern const struct amd_ip_funcs kv_dpm_ip_funcs;
|
||||||
|
extern const struct amd_pm_funcs ci_dpm_funcs;
|
||||||
|
extern const struct amd_pm_funcs kv_dpm_funcs;
|
||||||
#endif
|
#endif
|
||||||
|
@ -228,6 +228,19 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev)
|
|||||||
* [127:96] - reserved
|
* [127:96] - reserved
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cik_ih_prescreen_iv - prescreen an interrupt vector
|
||||||
|
*
|
||||||
|
* @adev: amdgpu_device pointer
|
||||||
|
*
|
||||||
|
* Returns true if the interrupt vector should be further processed.
|
||||||
|
*/
|
||||||
|
static bool cik_ih_prescreen_iv(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
/* Process all interrupts */
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cik_ih_decode_iv - decode an interrupt vector
|
* cik_ih_decode_iv - decode an interrupt vector
|
||||||
*
|
*
|
||||||
@ -433,6 +446,7 @@ static const struct amd_ip_funcs cik_ih_ip_funcs = {
|
|||||||
|
|
||||||
static const struct amdgpu_ih_funcs cik_ih_funcs = {
|
static const struct amdgpu_ih_funcs cik_ih_funcs = {
|
||||||
.get_wptr = cik_ih_get_wptr,
|
.get_wptr = cik_ih_get_wptr,
|
||||||
|
.prescreen_iv = cik_ih_prescreen_iv,
|
||||||
.decode_iv = cik_ih_decode_iv,
|
.decode_iv = cik_ih_decode_iv,
|
||||||
.set_rptr = cik_ih_set_rptr
|
.set_rptr = cik_ih_set_rptr
|
||||||
};
|
};
|
||||||
|
@ -1387,8 +1387,13 @@ static void cik_sdma_set_buffer_funcs(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
|
static const struct amdgpu_vm_pte_funcs cik_sdma_vm_pte_funcs = {
|
||||||
|
.copy_pte_num_dw = 7,
|
||||||
.copy_pte = cik_sdma_vm_copy_pte,
|
.copy_pte = cik_sdma_vm_copy_pte,
|
||||||
|
|
||||||
.write_pte = cik_sdma_vm_write_pte,
|
.write_pte = cik_sdma_vm_write_pte,
|
||||||
|
|
||||||
|
.set_max_nums_pte_pde = 0x1fffff >> 3,
|
||||||
|
.set_pte_pde_num_dw = 10,
|
||||||
.set_pte_pde = cik_sdma_vm_set_pte_pde,
|
.set_pte_pde = cik_sdma_vm_set_pte_pde,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -207,6 +207,19 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev)
|
|||||||
return (wptr & adev->irq.ih.ptr_mask);
|
return (wptr & adev->irq.ih.ptr_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* cz_ih_prescreen_iv - prescreen an interrupt vector
|
||||||
|
*
|
||||||
|
* @adev: amdgpu_device pointer
|
||||||
|
*
|
||||||
|
* Returns true if the interrupt vector should be further processed.
|
||||||
|
*/
|
||||||
|
static bool cz_ih_prescreen_iv(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
/* Process all interrupts */
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cz_ih_decode_iv - decode an interrupt vector
|
* cz_ih_decode_iv - decode an interrupt vector
|
||||||
*
|
*
|
||||||
@ -414,6 +427,7 @@ static const struct amd_ip_funcs cz_ih_ip_funcs = {
|
|||||||
|
|
||||||
static const struct amdgpu_ih_funcs cz_ih_funcs = {
|
static const struct amdgpu_ih_funcs cz_ih_funcs = {
|
||||||
.get_wptr = cz_ih_get_wptr,
|
.get_wptr = cz_ih_get_wptr,
|
||||||
|
.prescreen_iv = cz_ih_prescreen_iv,
|
||||||
.decode_iv = cz_ih_decode_iv,
|
.decode_iv = cz_ih_decode_iv,
|
||||||
.set_rptr = cz_ih_set_rptr
|
.set_rptr = cz_ih_set_rptr
|
||||||
};
|
};
|
||||||
|
@ -125,24 +125,39 @@ MODULE_FIRMWARE("amdgpu/fiji_mec2.bin");
|
|||||||
MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
|
MODULE_FIRMWARE("amdgpu/fiji_rlc.bin");
|
||||||
|
|
||||||
MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
|
MODULE_FIRMWARE("amdgpu/polaris11_ce.bin");
|
||||||
|
MODULE_FIRMWARE("amdgpu/polaris11_ce_2.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
|
MODULE_FIRMWARE("amdgpu/polaris11_pfp.bin");
|
||||||
|
MODULE_FIRMWARE("amdgpu/polaris11_pfp_2.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
|
MODULE_FIRMWARE("amdgpu/polaris11_me.bin");
|
||||||
|
MODULE_FIRMWARE("amdgpu/polaris11_me_2.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
|
MODULE_FIRMWARE("amdgpu/polaris11_mec.bin");
|
||||||
|
MODULE_FIRMWARE("amdgpu/polaris11_mec_2.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
|
MODULE_FIRMWARE("amdgpu/polaris11_mec2.bin");
|
||||||
|
MODULE_FIRMWARE("amdgpu/polaris11_mec2_2.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
|
MODULE_FIRMWARE("amdgpu/polaris11_rlc.bin");
|
||||||
|
|
||||||
MODULE_FIRMWARE("amdgpu/polaris10_ce.bin");
|
MODULE_FIRMWARE("amdgpu/polaris10_ce.bin");
|
||||||
|
MODULE_FIRMWARE("amdgpu/polaris10_ce_2.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin");
|
MODULE_FIRMWARE("amdgpu/polaris10_pfp.bin");
|
||||||
|
MODULE_FIRMWARE("amdgpu/polaris10_pfp_2.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/polaris10_me.bin");
|
MODULE_FIRMWARE("amdgpu/polaris10_me.bin");
|
||||||
|
MODULE_FIRMWARE("amdgpu/polaris10_me_2.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
|
MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
|
||||||
|
MODULE_FIRMWARE("amdgpu/polaris10_mec_2.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
|
MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
|
||||||
|
MODULE_FIRMWARE("amdgpu/polaris10_mec2_2.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
|
MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
|
||||||
|
|
||||||
MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
|
MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
|
||||||
|
MODULE_FIRMWARE("amdgpu/polaris12_ce_2.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
|
MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
|
||||||
|
MODULE_FIRMWARE("amdgpu/polaris12_pfp_2.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/polaris12_me.bin");
|
MODULE_FIRMWARE("amdgpu/polaris12_me.bin");
|
||||||
|
MODULE_FIRMWARE("amdgpu/polaris12_me_2.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/polaris12_mec.bin");
|
MODULE_FIRMWARE("amdgpu/polaris12_mec.bin");
|
||||||
|
MODULE_FIRMWARE("amdgpu/polaris12_mec_2.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
|
MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
|
||||||
|
MODULE_FIRMWARE("amdgpu/polaris12_mec2_2.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
|
MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
|
||||||
|
|
||||||
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
|
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
|
||||||
@ -918,8 +933,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
|
|||||||
BUG();
|
BUG();
|
||||||
}
|
}
|
||||||
|
|
||||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
|
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
|
||||||
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
|
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp_2.bin", chip_name);
|
||||||
|
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
|
||||||
|
if (err == -ENOENT) {
|
||||||
|
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
|
||||||
|
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_pfp.bin", chip_name);
|
||||||
|
err = request_firmware(&adev->gfx.pfp_fw, fw_name, adev->dev);
|
||||||
|
}
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
|
err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
|
||||||
@ -929,8 +953,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
|
|||||||
adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
|
adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
|
||||||
adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
|
adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
|
||||||
|
|
||||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
|
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
|
||||||
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
|
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me_2.bin", chip_name);
|
||||||
|
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
|
||||||
|
if (err == -ENOENT) {
|
||||||
|
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
|
||||||
|
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
|
||||||
|
err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
|
||||||
|
}
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
err = amdgpu_ucode_validate(adev->gfx.me_fw);
|
err = amdgpu_ucode_validate(adev->gfx.me_fw);
|
||||||
@ -941,8 +974,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
|
adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
|
||||||
|
|
||||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
|
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
|
||||||
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
|
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce_2.bin", chip_name);
|
||||||
|
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
|
||||||
|
if (err == -ENOENT) {
|
||||||
|
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
|
||||||
|
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
|
||||||
|
err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
|
||||||
|
}
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
err = amdgpu_ucode_validate(adev->gfx.ce_fw);
|
err = amdgpu_ucode_validate(adev->gfx.ce_fw);
|
||||||
@ -1012,8 +1054,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
|
|||||||
for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
|
for (i = 0 ; i < (rlc_hdr->reg_list_size_bytes >> 2); i++)
|
||||||
adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
|
adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
|
||||||
|
|
||||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
|
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
|
||||||
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
|
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec_2.bin", chip_name);
|
||||||
|
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
|
||||||
|
if (err == -ENOENT) {
|
||||||
|
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
|
||||||
|
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
|
||||||
|
err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
|
||||||
|
}
|
||||||
if (err)
|
if (err)
|
||||||
goto out;
|
goto out;
|
||||||
err = amdgpu_ucode_validate(adev->gfx.mec_fw);
|
err = amdgpu_ucode_validate(adev->gfx.mec_fw);
|
||||||
@ -1025,8 +1076,17 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
if ((adev->asic_type != CHIP_STONEY) &&
|
if ((adev->asic_type != CHIP_STONEY) &&
|
||||||
(adev->asic_type != CHIP_TOPAZ)) {
|
(adev->asic_type != CHIP_TOPAZ)) {
|
||||||
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
|
if (adev->asic_type >= CHIP_POLARIS10 && adev->asic_type <= CHIP_POLARIS12) {
|
||||||
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
|
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2_2.bin", chip_name);
|
||||||
|
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
|
||||||
|
if (err == -ENOENT) {
|
||||||
|
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
|
||||||
|
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
|
||||||
|
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
|
||||||
|
}
|
||||||
if (!err) {
|
if (!err) {
|
||||||
err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
|
err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
|
||||||
if (err)
|
if (err)
|
||||||
@ -2053,6 +2113,7 @@ static int gfx_v8_0_sw_fini(void *handle)
|
|||||||
amdgpu_gfx_compute_mqd_sw_fini(adev);
|
amdgpu_gfx_compute_mqd_sw_fini(adev);
|
||||||
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
|
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
|
||||||
amdgpu_gfx_kiq_fini(adev);
|
amdgpu_gfx_kiq_fini(adev);
|
||||||
|
amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
|
||||||
|
|
||||||
gfx_v8_0_mec_fini(adev);
|
gfx_v8_0_mec_fini(adev);
|
||||||
gfx_v8_0_rlc_fini(adev);
|
gfx_v8_0_rlc_fini(adev);
|
||||||
@ -4577,12 +4638,10 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
|
|||||||
mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
|
mqd->compute_static_thread_mgmt_se2 = 0xffffffff;
|
||||||
mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
|
mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
|
||||||
mqd->compute_misc_reserved = 0x00000003;
|
mqd->compute_misc_reserved = 0x00000003;
|
||||||
if (!(adev->flags & AMD_IS_APU)) {
|
mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr
|
||||||
mqd->dynamic_cu_mask_addr_lo = lower_32_bits(ring->mqd_gpu_addr
|
+ offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
|
||||||
+ offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
|
mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr
|
||||||
mqd->dynamic_cu_mask_addr_hi = upper_32_bits(ring->mqd_gpu_addr
|
+ offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
|
||||||
+ offsetof(struct vi_mqd_allocation, dynamic_cu_mask));
|
|
||||||
}
|
|
||||||
eop_base_addr = ring->eop_gpu_addr >> 8;
|
eop_base_addr = ring->eop_gpu_addr >> 8;
|
||||||
mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
|
mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
|
||||||
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
|
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
|
||||||
@ -4753,7 +4812,7 @@ static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring)
|
|||||||
|
|
||||||
gfx_v8_0_kiq_setting(ring);
|
gfx_v8_0_kiq_setting(ring);
|
||||||
|
|
||||||
if (adev->gfx.in_reset) { /* for GPU_RESET case */
|
if (adev->in_sriov_reset) { /* for GPU_RESET case */
|
||||||
/* reset MQD to a clean status */
|
/* reset MQD to a clean status */
|
||||||
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
||||||
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
|
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
|
||||||
@ -4790,7 +4849,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
|
|||||||
struct vi_mqd *mqd = ring->mqd_ptr;
|
struct vi_mqd *mqd = ring->mqd_ptr;
|
||||||
int mqd_idx = ring - &adev->gfx.compute_ring[0];
|
int mqd_idx = ring - &adev->gfx.compute_ring[0];
|
||||||
|
|
||||||
if (!adev->gfx.in_reset && !adev->gfx.in_suspend) {
|
if (!adev->in_sriov_reset && !adev->gfx.in_suspend) {
|
||||||
memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
|
memset((void *)mqd, 0, sizeof(struct vi_mqd_allocation));
|
||||||
((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
|
((struct vi_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
|
||||||
((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
|
((struct vi_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
|
||||||
@ -4802,7 +4861,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring)
|
|||||||
|
|
||||||
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
||||||
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
|
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
|
||||||
} else if (adev->gfx.in_reset) { /* for GPU_RESET case */
|
} else if (adev->in_sriov_reset) { /* for GPU_RESET case */
|
||||||
/* reset MQD to a clean status */
|
/* reset MQD to a clean status */
|
||||||
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
||||||
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
|
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
|
||||||
@ -4975,12 +5034,69 @@ static int gfx_v8_0_hw_init(void *handle)
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int gfx_v8_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = kiq_ring->adev;
|
||||||
|
uint32_t scratch, tmp = 0;
|
||||||
|
int r, i;
|
||||||
|
|
||||||
|
r = amdgpu_gfx_scratch_get(adev, &scratch);
|
||||||
|
if (r) {
|
||||||
|
DRM_ERROR("Failed to get scratch reg (%d).\n", r);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
WREG32(scratch, 0xCAFEDEAD);
|
||||||
|
|
||||||
|
r = amdgpu_ring_alloc(kiq_ring, 10);
|
||||||
|
if (r) {
|
||||||
|
DRM_ERROR("Failed to lock KIQ (%d).\n", r);
|
||||||
|
amdgpu_gfx_scratch_free(adev, scratch);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* unmap queues */
|
||||||
|
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
|
||||||
|
amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
|
||||||
|
PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
|
||||||
|
PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
|
||||||
|
PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
|
||||||
|
PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
|
||||||
|
amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
|
||||||
|
amdgpu_ring_write(kiq_ring, 0);
|
||||||
|
amdgpu_ring_write(kiq_ring, 0);
|
||||||
|
amdgpu_ring_write(kiq_ring, 0);
|
||||||
|
/* write to scratch for completion */
|
||||||
|
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
|
||||||
|
amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
|
||||||
|
amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
|
||||||
|
amdgpu_ring_commit(kiq_ring);
|
||||||
|
|
||||||
|
for (i = 0; i < adev->usec_timeout; i++) {
|
||||||
|
tmp = RREG32(scratch);
|
||||||
|
if (tmp == 0xDEADBEEF)
|
||||||
|
break;
|
||||||
|
DRM_UDELAY(1);
|
||||||
|
}
|
||||||
|
if (i >= adev->usec_timeout) {
|
||||||
|
DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp);
|
||||||
|
r = -EINVAL;
|
||||||
|
}
|
||||||
|
amdgpu_gfx_scratch_free(adev, scratch);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
static int gfx_v8_0_hw_fini(void *handle)
|
static int gfx_v8_0_hw_fini(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
int i;
|
||||||
|
|
||||||
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
|
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
|
||||||
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
|
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
|
||||||
|
|
||||||
|
/* disable KCQ to avoid CPC touch memory not valid anymore */
|
||||||
|
for (i = 0; i < adev->gfx.num_compute_rings; i++)
|
||||||
|
gfx_v8_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev)) {
|
if (amdgpu_sriov_vf(adev)) {
|
||||||
pr_debug("For SRIOV client, shouldn't do anything.\n");
|
pr_debug("For SRIOV client, shouldn't do anything.\n");
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -66,38 +66,70 @@ MODULE_FIRMWARE("amdgpu/raven_rlc.bin");
|
|||||||
|
|
||||||
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
|
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
|
||||||
{
|
{
|
||||||
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
|
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_BASE),
|
||||||
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0)},
|
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE),
|
||||||
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_SIZE),
|
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID0),
|
||||||
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID1), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID1)},
|
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID0) },
|
||||||
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_SIZE),
|
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_BASE),
|
||||||
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID2), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID2)},
|
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID1_SIZE),
|
||||||
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_SIZE),
|
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID1),
|
||||||
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID3), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID3)},
|
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID1) },
|
||||||
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_SIZE),
|
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_BASE),
|
||||||
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID4), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID4)},
|
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID2_SIZE),
|
||||||
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_SIZE),
|
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID2),
|
||||||
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID5), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID5)},
|
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID2) },
|
||||||
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_SIZE),
|
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_BASE),
|
||||||
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID6), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID6)},
|
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID3_SIZE),
|
||||||
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_SIZE),
|
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID3),
|
||||||
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID7), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID7)},
|
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID3) },
|
||||||
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_SIZE),
|
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_BASE),
|
||||||
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID8), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID8)},
|
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID4_SIZE),
|
||||||
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_SIZE),
|
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID4),
|
||||||
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID9), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID9)},
|
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID4) },
|
||||||
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_SIZE),
|
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_BASE),
|
||||||
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID10), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID10)},
|
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID5_SIZE),
|
||||||
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_SIZE),
|
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID5),
|
||||||
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID11), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID11)},
|
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID5) },
|
||||||
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_SIZE),
|
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_BASE),
|
||||||
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID12), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID12)},
|
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID6_SIZE),
|
||||||
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_SIZE),
|
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID6),
|
||||||
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID13), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID13)},
|
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID6) },
|
||||||
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_SIZE),
|
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_BASE),
|
||||||
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID14), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID14)},
|
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID7_SIZE),
|
||||||
{SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_BASE), SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_SIZE),
|
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID7),
|
||||||
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID15), SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID15)}
|
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID7) },
|
||||||
|
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_BASE),
|
||||||
|
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID8_SIZE),
|
||||||
|
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID8),
|
||||||
|
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID8) },
|
||||||
|
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_BASE),
|
||||||
|
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID9_SIZE),
|
||||||
|
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID9),
|
||||||
|
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID9) },
|
||||||
|
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_BASE),
|
||||||
|
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID10_SIZE),
|
||||||
|
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID10),
|
||||||
|
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID10) },
|
||||||
|
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_BASE),
|
||||||
|
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID11_SIZE),
|
||||||
|
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID11),
|
||||||
|
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID11) },
|
||||||
|
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_BASE),
|
||||||
|
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID12_SIZE),
|
||||||
|
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID12),
|
||||||
|
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID12)},
|
||||||
|
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_BASE),
|
||||||
|
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID13_SIZE),
|
||||||
|
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID13),
|
||||||
|
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID13) },
|
||||||
|
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_BASE),
|
||||||
|
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID14_SIZE),
|
||||||
|
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID14),
|
||||||
|
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID14) },
|
||||||
|
{ SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_BASE),
|
||||||
|
SOC15_REG_OFFSET(GC, 0, mmGDS_VMID15_SIZE),
|
||||||
|
SOC15_REG_OFFSET(GC, 0, mmGDS_GWS_VMID15),
|
||||||
|
SOC15_REG_OFFSET(GC, 0, mmGDS_OA_VMID15) }
|
||||||
};
|
};
|
||||||
|
|
||||||
static const u32 golden_settings_gc_9_0[] =
|
static const u32 golden_settings_gc_9_0[] =
|
||||||
@ -352,6 +384,25 @@ err1:
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void gfx_v9_0_free_microcode(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
release_firmware(adev->gfx.pfp_fw);
|
||||||
|
adev->gfx.pfp_fw = NULL;
|
||||||
|
release_firmware(adev->gfx.me_fw);
|
||||||
|
adev->gfx.me_fw = NULL;
|
||||||
|
release_firmware(adev->gfx.ce_fw);
|
||||||
|
adev->gfx.ce_fw = NULL;
|
||||||
|
release_firmware(adev->gfx.rlc_fw);
|
||||||
|
adev->gfx.rlc_fw = NULL;
|
||||||
|
release_firmware(adev->gfx.mec_fw);
|
||||||
|
adev->gfx.mec_fw = NULL;
|
||||||
|
release_firmware(adev->gfx.mec2_fw);
|
||||||
|
adev->gfx.mec2_fw = NULL;
|
||||||
|
|
||||||
|
kfree(adev->gfx.rlc.register_list_format);
|
||||||
|
}
|
||||||
|
|
||||||
static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
|
static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
const char *chip_name;
|
const char *chip_name;
|
||||||
@ -1120,30 +1171,22 @@ static int gfx_v9_0_ngg_en(struct amdgpu_device *adev)
|
|||||||
{
|
{
|
||||||
struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
|
struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
|
||||||
int r;
|
int r;
|
||||||
u32 data;
|
u32 data, base;
|
||||||
u32 size;
|
|
||||||
u32 base;
|
|
||||||
|
|
||||||
if (!amdgpu_ngg)
|
if (!amdgpu_ngg)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Program buffer size */
|
/* Program buffer size */
|
||||||
data = 0;
|
data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE,
|
||||||
size = adev->gfx.ngg.buf[NGG_PRIM].size / 256;
|
adev->gfx.ngg.buf[NGG_PRIM].size >> 8);
|
||||||
data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE, size);
|
data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE,
|
||||||
|
adev->gfx.ngg.buf[NGG_POS].size >> 8);
|
||||||
size = adev->gfx.ngg.buf[NGG_POS].size / 256;
|
|
||||||
data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE, size);
|
|
||||||
|
|
||||||
WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
|
WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data);
|
||||||
|
|
||||||
data = 0;
|
data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE,
|
||||||
size = adev->gfx.ngg.buf[NGG_CNTL].size / 256;
|
adev->gfx.ngg.buf[NGG_CNTL].size >> 8);
|
||||||
data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE, size);
|
data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE,
|
||||||
|
adev->gfx.ngg.buf[NGG_PARAM].size >> 10);
|
||||||
size = adev->gfx.ngg.buf[NGG_PARAM].size / 1024;
|
|
||||||
data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE, size);
|
|
||||||
|
|
||||||
WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
|
WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data);
|
||||||
|
|
||||||
/* Program buffer base address */
|
/* Program buffer base address */
|
||||||
@ -1306,7 +1349,10 @@ static int gfx_v9_0_sw_init(void *handle)
|
|||||||
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
|
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
|
||||||
ring = &adev->gfx.gfx_ring[i];
|
ring = &adev->gfx.gfx_ring[i];
|
||||||
ring->ring_obj = NULL;
|
ring->ring_obj = NULL;
|
||||||
sprintf(ring->name, "gfx");
|
if (!i)
|
||||||
|
sprintf(ring->name, "gfx");
|
||||||
|
else
|
||||||
|
sprintf(ring->name, "gfx_%d", i);
|
||||||
ring->use_doorbell = true;
|
ring->use_doorbell = true;
|
||||||
ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
|
ring->doorbell_index = AMDGPU_DOORBELL64_GFX_RING0 << 1;
|
||||||
r = amdgpu_ring_init(adev, ring, 1024,
|
r = amdgpu_ring_init(adev, ring, 1024,
|
||||||
@ -1346,7 +1392,7 @@ static int gfx_v9_0_sw_init(void *handle)
|
|||||||
return r;
|
return r;
|
||||||
|
|
||||||
/* create MQD for all compute queues as wel as KIQ for SRIOV case */
|
/* create MQD for all compute queues as wel as KIQ for SRIOV case */
|
||||||
r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd));
|
r = amdgpu_gfx_compute_mqd_sw_init(adev, sizeof(struct v9_mqd_allocation));
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
@ -1398,9 +1444,11 @@ static int gfx_v9_0_sw_fini(void *handle)
|
|||||||
amdgpu_gfx_compute_mqd_sw_fini(adev);
|
amdgpu_gfx_compute_mqd_sw_fini(adev);
|
||||||
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
|
amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq);
|
||||||
amdgpu_gfx_kiq_fini(adev);
|
amdgpu_gfx_kiq_fini(adev);
|
||||||
|
amdgpu_bo_free_kernel(&adev->virt.csa_obj, &adev->virt.csa_vmid0_addr, NULL);
|
||||||
|
|
||||||
gfx_v9_0_mec_fini(adev);
|
gfx_v9_0_mec_fini(adev);
|
||||||
gfx_v9_0_ngg_fini(adev);
|
gfx_v9_0_ngg_fini(adev);
|
||||||
|
gfx_v9_0_free_microcode(adev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1740,11 +1788,7 @@ static int gfx_v9_0_init_rlc_save_restore_list(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
|
static void gfx_v9_0_enable_save_restore_machine(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
u32 tmp = 0;
|
WREG32_FIELD15(GC, 0, RLC_SRM_CNTL, SRM_ENABLE, 1);
|
||||||
|
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL));
|
|
||||||
tmp |= RLC_SRM_CNTL__SRM_ENABLE_MASK;
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_SRM_CNTL), tmp);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
|
static void pwr_10_0_gfxip_control_over_cgpg(struct amdgpu_device *adev,
|
||||||
@ -1822,16 +1866,11 @@ static void gfx_v9_0_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev
|
|||||||
uint32_t default_data = 0;
|
uint32_t default_data = 0;
|
||||||
|
|
||||||
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
|
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
|
||||||
|
data = REG_SET_FIELD(data, RLC_PG_CNTL,
|
||||||
if (enable == true) {
|
SMU_CLK_SLOWDOWN_ON_PU_ENABLE,
|
||||||
data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
|
enable ? 1 : 0);
|
||||||
if (default_data != data)
|
if (default_data != data)
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
|
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
|
||||||
} else {
|
|
||||||
data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
|
|
||||||
if(default_data != data)
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
|
static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev,
|
||||||
@ -1841,16 +1880,11 @@ static void gfx_v9_0_enable_sck_slow_down_on_power_down(struct amdgpu_device *ad
|
|||||||
uint32_t default_data = 0;
|
uint32_t default_data = 0;
|
||||||
|
|
||||||
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
|
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
|
||||||
|
data = REG_SET_FIELD(data, RLC_PG_CNTL,
|
||||||
if (enable == true) {
|
SMU_CLK_SLOWDOWN_ON_PD_ENABLE,
|
||||||
data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
|
enable ? 1 : 0);
|
||||||
if(default_data != data)
|
if(default_data != data)
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
|
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
|
||||||
} else {
|
|
||||||
data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
|
|
||||||
if(default_data != data)
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
|
static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
|
||||||
@ -1860,16 +1894,11 @@ static void gfx_v9_0_enable_cp_power_gating(struct amdgpu_device *adev,
|
|||||||
uint32_t default_data = 0;
|
uint32_t default_data = 0;
|
||||||
|
|
||||||
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
|
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
|
||||||
|
data = REG_SET_FIELD(data, RLC_PG_CNTL,
|
||||||
if (enable == true) {
|
CP_PG_DISABLE,
|
||||||
data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK;
|
enable ? 0 : 1);
|
||||||
if(default_data != data)
|
if(default_data != data)
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
|
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
|
||||||
} else {
|
|
||||||
data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK;
|
|
||||||
if(default_data != data)
|
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
|
static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
|
||||||
@ -1878,10 +1907,9 @@ static void gfx_v9_0_enable_gfx_cg_power_gating(struct amdgpu_device *adev,
|
|||||||
uint32_t data, default_data;
|
uint32_t data, default_data;
|
||||||
|
|
||||||
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
|
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
|
||||||
if (enable == true)
|
data = REG_SET_FIELD(data, RLC_PG_CNTL,
|
||||||
data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
|
GFX_POWER_GATING_ENABLE,
|
||||||
else
|
enable ? 1 : 0);
|
||||||
data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
|
|
||||||
if(default_data != data)
|
if(default_data != data)
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
|
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
|
||||||
}
|
}
|
||||||
@ -1892,10 +1920,9 @@ static void gfx_v9_0_enable_gfx_pipeline_powergating(struct amdgpu_device *adev,
|
|||||||
uint32_t data, default_data;
|
uint32_t data, default_data;
|
||||||
|
|
||||||
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
|
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
|
||||||
if (enable == true)
|
data = REG_SET_FIELD(data, RLC_PG_CNTL,
|
||||||
data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
|
GFX_PIPELINE_PG_ENABLE,
|
||||||
else
|
enable ? 1 : 0);
|
||||||
data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK;
|
|
||||||
if(default_data != data)
|
if(default_data != data)
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
|
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
|
||||||
|
|
||||||
@ -1910,10 +1937,9 @@ static void gfx_v9_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *ade
|
|||||||
uint32_t data, default_data;
|
uint32_t data, default_data;
|
||||||
|
|
||||||
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
|
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
|
||||||
if (enable == true)
|
data = REG_SET_FIELD(data, RLC_PG_CNTL,
|
||||||
data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
|
STATIC_PER_CU_PG_ENABLE,
|
||||||
else
|
enable ? 1 : 0);
|
||||||
data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
|
|
||||||
if(default_data != data)
|
if(default_data != data)
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
|
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
|
||||||
}
|
}
|
||||||
@ -1924,10 +1950,9 @@ static void gfx_v9_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *ad
|
|||||||
uint32_t data, default_data;
|
uint32_t data, default_data;
|
||||||
|
|
||||||
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
|
default_data = data = RREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL));
|
||||||
if (enable == true)
|
data = REG_SET_FIELD(data, RLC_PG_CNTL,
|
||||||
data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
|
DYN_PER_CU_PG_ENABLE,
|
||||||
else
|
enable ? 1 : 0);
|
||||||
data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
|
|
||||||
if(default_data != data)
|
if(default_data != data)
|
||||||
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
|
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_CNTL), data);
|
||||||
}
|
}
|
||||||
@ -1967,13 +1992,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
|
void gfx_v9_0_rlc_stop(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
u32 tmp = RREG32_SOC15(GC, 0, mmRLC_CNTL);
|
WREG32_FIELD15(GC, 0, RLC_CNTL, RLC_ENABLE_F32, 0);
|
||||||
|
|
||||||
tmp = REG_SET_FIELD(tmp, RLC_CNTL, RLC_ENABLE_F32, 0);
|
|
||||||
WREG32_SOC15(GC, 0, mmRLC_CNTL, tmp);
|
|
||||||
|
|
||||||
gfx_v9_0_enable_gui_idle_interrupt(adev, false);
|
gfx_v9_0_enable_gui_idle_interrupt(adev, false);
|
||||||
|
|
||||||
gfx_v9_0_wait_for_rlc_serdes(adev);
|
gfx_v9_0_wait_for_rlc_serdes(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2045,8 +2065,10 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
|
|||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev))
|
if (amdgpu_sriov_vf(adev)) {
|
||||||
|
gfx_v9_0_init_csb(adev);
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
gfx_v9_0_rlc_stop(adev);
|
gfx_v9_0_rlc_stop(adev);
|
||||||
|
|
||||||
@ -2463,6 +2485,13 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
|
|||||||
mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
|
mqd->compute_static_thread_mgmt_se3 = 0xffffffff;
|
||||||
mqd->compute_misc_reserved = 0x00000003;
|
mqd->compute_misc_reserved = 0x00000003;
|
||||||
|
|
||||||
|
mqd->dynamic_cu_mask_addr_lo =
|
||||||
|
lower_32_bits(ring->mqd_gpu_addr
|
||||||
|
+ offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
|
||||||
|
mqd->dynamic_cu_mask_addr_hi =
|
||||||
|
upper_32_bits(ring->mqd_gpu_addr
|
||||||
|
+ offsetof(struct v9_mqd_allocation, dynamic_cu_mask));
|
||||||
|
|
||||||
eop_base_addr = ring->eop_gpu_addr >> 8;
|
eop_base_addr = ring->eop_gpu_addr >> 8;
|
||||||
mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
|
mqd->cp_hqd_eop_base_addr_lo = eop_base_addr;
|
||||||
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
|
mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr);
|
||||||
@ -2486,10 +2515,10 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
|
|||||||
DOORBELL_SOURCE, 0);
|
DOORBELL_SOURCE, 0);
|
||||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
|
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
|
||||||
DOORBELL_HIT, 0);
|
DOORBELL_HIT, 0);
|
||||||
}
|
} else {
|
||||||
else
|
|
||||||
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
|
tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
|
||||||
DOORBELL_EN, 0);
|
DOORBELL_EN, 0);
|
||||||
|
}
|
||||||
|
|
||||||
mqd->cp_hqd_pq_doorbell_control = tmp;
|
mqd->cp_hqd_pq_doorbell_control = tmp;
|
||||||
|
|
||||||
@ -2692,10 +2721,10 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
|
|||||||
|
|
||||||
gfx_v9_0_kiq_setting(ring);
|
gfx_v9_0_kiq_setting(ring);
|
||||||
|
|
||||||
if (adev->gfx.in_reset) { /* for GPU_RESET case */
|
if (adev->in_sriov_reset) { /* for GPU_RESET case */
|
||||||
/* reset MQD to a clean status */
|
/* reset MQD to a clean status */
|
||||||
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
||||||
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
|
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
|
||||||
|
|
||||||
/* reset ring buffer */
|
/* reset ring buffer */
|
||||||
ring->wptr = 0;
|
ring->wptr = 0;
|
||||||
@ -2707,7 +2736,9 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
|
|||||||
soc15_grbm_select(adev, 0, 0, 0, 0);
|
soc15_grbm_select(adev, 0, 0, 0, 0);
|
||||||
mutex_unlock(&adev->srbm_mutex);
|
mutex_unlock(&adev->srbm_mutex);
|
||||||
} else {
|
} else {
|
||||||
memset((void *)mqd, 0, sizeof(*mqd));
|
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
|
||||||
|
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
|
||||||
|
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
|
||||||
mutex_lock(&adev->srbm_mutex);
|
mutex_lock(&adev->srbm_mutex);
|
||||||
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
|
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
|
||||||
gfx_v9_0_mqd_init(ring);
|
gfx_v9_0_mqd_init(ring);
|
||||||
@ -2716,7 +2747,7 @@ static int gfx_v9_0_kiq_init_queue(struct amdgpu_ring *ring)
|
|||||||
mutex_unlock(&adev->srbm_mutex);
|
mutex_unlock(&adev->srbm_mutex);
|
||||||
|
|
||||||
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
||||||
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
|
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -2728,8 +2759,10 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
|
|||||||
struct v9_mqd *mqd = ring->mqd_ptr;
|
struct v9_mqd *mqd = ring->mqd_ptr;
|
||||||
int mqd_idx = ring - &adev->gfx.compute_ring[0];
|
int mqd_idx = ring - &adev->gfx.compute_ring[0];
|
||||||
|
|
||||||
if (!adev->gfx.in_reset && !adev->gfx.in_suspend) {
|
if (!adev->in_sriov_reset && !adev->gfx.in_suspend) {
|
||||||
memset((void *)mqd, 0, sizeof(*mqd));
|
memset((void *)mqd, 0, sizeof(struct v9_mqd_allocation));
|
||||||
|
((struct v9_mqd_allocation *)mqd)->dynamic_cu_mask = 0xFFFFFFFF;
|
||||||
|
((struct v9_mqd_allocation *)mqd)->dynamic_rb_mask = 0xFFFFFFFF;
|
||||||
mutex_lock(&adev->srbm_mutex);
|
mutex_lock(&adev->srbm_mutex);
|
||||||
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
|
soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
|
||||||
gfx_v9_0_mqd_init(ring);
|
gfx_v9_0_mqd_init(ring);
|
||||||
@ -2737,11 +2770,11 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
|
|||||||
mutex_unlock(&adev->srbm_mutex);
|
mutex_unlock(&adev->srbm_mutex);
|
||||||
|
|
||||||
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
||||||
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(*mqd));
|
memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct v9_mqd_allocation));
|
||||||
} else if (adev->gfx.in_reset) { /* for GPU_RESET case */
|
} else if (adev->in_sriov_reset) { /* for GPU_RESET case */
|
||||||
/* reset MQD to a clean status */
|
/* reset MQD to a clean status */
|
||||||
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
if (adev->gfx.mec.mqd_backup[mqd_idx])
|
||||||
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(*mqd));
|
memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct v9_mqd_allocation));
|
||||||
|
|
||||||
/* reset ring buffer */
|
/* reset ring buffer */
|
||||||
ring->wptr = 0;
|
ring->wptr = 0;
|
||||||
@ -2882,12 +2915,70 @@ static int gfx_v9_0_hw_init(void *handle)
|
|||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int gfx_v9_0_kcq_disable(struct amdgpu_ring *kiq_ring,struct amdgpu_ring *ring)
|
||||||
|
{
|
||||||
|
struct amdgpu_device *adev = kiq_ring->adev;
|
||||||
|
uint32_t scratch, tmp = 0;
|
||||||
|
int r, i;
|
||||||
|
|
||||||
|
r = amdgpu_gfx_scratch_get(adev, &scratch);
|
||||||
|
if (r) {
|
||||||
|
DRM_ERROR("Failed to get scratch reg (%d).\n", r);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
WREG32(scratch, 0xCAFEDEAD);
|
||||||
|
|
||||||
|
r = amdgpu_ring_alloc(kiq_ring, 10);
|
||||||
|
if (r) {
|
||||||
|
DRM_ERROR("Failed to lock KIQ (%d).\n", r);
|
||||||
|
amdgpu_gfx_scratch_free(adev, scratch);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* unmap queues */
|
||||||
|
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_UNMAP_QUEUES, 4));
|
||||||
|
amdgpu_ring_write(kiq_ring, /* Q_sel: 0, vmid: 0, engine: 0, num_Q: 1 */
|
||||||
|
PACKET3_UNMAP_QUEUES_ACTION(1) | /* RESET_QUEUES */
|
||||||
|
PACKET3_UNMAP_QUEUES_QUEUE_SEL(0) |
|
||||||
|
PACKET3_UNMAP_QUEUES_ENGINE_SEL(0) |
|
||||||
|
PACKET3_UNMAP_QUEUES_NUM_QUEUES(1));
|
||||||
|
amdgpu_ring_write(kiq_ring, PACKET3_UNMAP_QUEUES_DOORBELL_OFFSET0(ring->doorbell_index));
|
||||||
|
amdgpu_ring_write(kiq_ring, 0);
|
||||||
|
amdgpu_ring_write(kiq_ring, 0);
|
||||||
|
amdgpu_ring_write(kiq_ring, 0);
|
||||||
|
/* write to scratch for completion */
|
||||||
|
amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_SET_UCONFIG_REG, 1));
|
||||||
|
amdgpu_ring_write(kiq_ring, (scratch - PACKET3_SET_UCONFIG_REG_START));
|
||||||
|
amdgpu_ring_write(kiq_ring, 0xDEADBEEF);
|
||||||
|
amdgpu_ring_commit(kiq_ring);
|
||||||
|
|
||||||
|
for (i = 0; i < adev->usec_timeout; i++) {
|
||||||
|
tmp = RREG32(scratch);
|
||||||
|
if (tmp == 0xDEADBEEF)
|
||||||
|
break;
|
||||||
|
DRM_UDELAY(1);
|
||||||
|
}
|
||||||
|
if (i >= adev->usec_timeout) {
|
||||||
|
DRM_ERROR("KCQ disabled failed (scratch(0x%04X)=0x%08X)\n", scratch, tmp);
|
||||||
|
r = -EINVAL;
|
||||||
|
}
|
||||||
|
amdgpu_gfx_scratch_free(adev, scratch);
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static int gfx_v9_0_hw_fini(void *handle)
|
static int gfx_v9_0_hw_fini(void *handle)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
int i;
|
||||||
|
|
||||||
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
|
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
|
||||||
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
|
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
|
||||||
|
|
||||||
|
/* disable KCQ to avoid CPC touch memory not valid anymore */
|
||||||
|
for (i = 0; i < adev->gfx.num_compute_rings; i++)
|
||||||
|
gfx_v9_0_kcq_disable(&adev->gfx.kiq.ring, &adev->gfx.compute_ring[i]);
|
||||||
|
|
||||||
if (amdgpu_sriov_vf(adev)) {
|
if (amdgpu_sriov_vf(adev)) {
|
||||||
pr_debug("For SRIOV client, shouldn't do anything.\n");
|
pr_debug("For SRIOV client, shouldn't do anything.\n");
|
||||||
return 0;
|
return 0;
|
||||||
@ -2930,15 +3021,10 @@ static bool gfx_v9_0_is_idle(void *handle)
|
|||||||
static int gfx_v9_0_wait_for_idle(void *handle)
|
static int gfx_v9_0_wait_for_idle(void *handle)
|
||||||
{
|
{
|
||||||
unsigned i;
|
unsigned i;
|
||||||
u32 tmp;
|
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
for (i = 0; i < adev->usec_timeout; i++) {
|
for (i = 0; i < adev->usec_timeout; i++) {
|
||||||
/* read MC_STATUS */
|
if (gfx_v9_0_is_idle(handle))
|
||||||
tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS) &
|
|
||||||
GRBM_STATUS__GUI_ACTIVE_MASK;
|
|
||||||
|
|
||||||
if (!REG_GET_FIELD(tmp, GRBM_STATUS, GUI_ACTIVE))
|
|
||||||
return 0;
|
return 0;
|
||||||
udelay(1);
|
udelay(1);
|
||||||
}
|
}
|
||||||
@ -3499,7 +3585,9 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
|||||||
u32 ref_and_mask, reg_mem_engine;
|
u32 ref_and_mask, reg_mem_engine;
|
||||||
struct nbio_hdp_flush_reg *nbio_hf_reg;
|
struct nbio_hdp_flush_reg *nbio_hf_reg;
|
||||||
|
|
||||||
if (ring->adev->asic_type == CHIP_VEGA10)
|
if (ring->adev->flags & AMD_IS_APU)
|
||||||
|
nbio_hf_reg = &nbio_v7_0_hdp_flush_reg;
|
||||||
|
else
|
||||||
nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
|
nbio_hf_reg = &nbio_v6_1_hdp_flush_reg;
|
||||||
|
|
||||||
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
|
if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
|
||||||
@ -3528,7 +3616,7 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
|||||||
static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
|
static void gfx_v9_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
|
||||||
{
|
{
|
||||||
gfx_v9_0_write_data_to_reg(ring, 0, true,
|
gfx_v9_0_write_data_to_reg(ring, 0, true,
|
||||||
SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 1);
|
SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
|
||||||
@ -3757,6 +3845,12 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring)
|
|||||||
amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
|
amdgpu_ring_write_multiple(ring, (void *)&de_payload, sizeof(de_payload) >> 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
|
||||||
|
{
|
||||||
|
amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
|
||||||
|
amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
|
||||||
|
}
|
||||||
|
|
||||||
static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
|
static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
|
||||||
{
|
{
|
||||||
uint32_t dw2 = 0;
|
uint32_t dw2 = 0;
|
||||||
@ -3764,6 +3858,8 @@ static void gfx_v9_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags)
|
|||||||
if (amdgpu_sriov_vf(ring->adev))
|
if (amdgpu_sriov_vf(ring->adev))
|
||||||
gfx_v9_0_ring_emit_ce_meta(ring);
|
gfx_v9_0_ring_emit_ce_meta(ring);
|
||||||
|
|
||||||
|
gfx_v9_0_ring_emit_tmz(ring, true);
|
||||||
|
|
||||||
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
|
dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */
|
||||||
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
|
if (flags & AMDGPU_HAVE_CTX_SWITCH) {
|
||||||
/* set load_global_config & load_global_uconfig */
|
/* set load_global_config & load_global_uconfig */
|
||||||
@ -3814,12 +3910,6 @@ static void gfx_v9_0_ring_emit_patch_cond_exec(struct amdgpu_ring *ring, unsigne
|
|||||||
ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
|
ring->ring[offset] = (ring->ring_size>>2) - offset + cur;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void gfx_v9_0_ring_emit_tmz(struct amdgpu_ring *ring, bool start)
|
|
||||||
{
|
|
||||||
amdgpu_ring_write(ring, PACKET3(PACKET3_FRAME_CONTROL, 0));
|
|
||||||
amdgpu_ring_write(ring, FRAME_CMD(start ? 0 : 1)); /* frame_end */
|
|
||||||
}
|
|
||||||
|
|
||||||
static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
|
static void gfx_v9_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
@ -319,6 +319,12 @@ void gfxhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev,
|
|||||||
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||||
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
|
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
|
||||||
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||||
|
if (!value) {
|
||||||
|
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
|
||||||
|
CRASH_ON_NO_RETRY_FAULT, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
|
||||||
|
CRASH_ON_RETRY_FAULT, 1);
|
||||||
|
}
|
||||||
WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
|
WREG32_SOC15(GC, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -831,7 +831,7 @@ static int gmc_v6_0_sw_init(void *handle)
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
amdgpu_vm_adjust_size(adev, 64, 4);
|
amdgpu_vm_adjust_size(adev, 64, 9);
|
||||||
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
|
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
|
||||||
|
|
||||||
adev->mc.mc_mask = 0xffffffffffULL;
|
adev->mc.mc_mask = 0xffffffffffULL;
|
||||||
@ -901,6 +901,8 @@ static int gmc_v6_0_sw_fini(void *handle)
|
|||||||
gmc_v6_0_gart_fini(adev);
|
gmc_v6_0_gart_fini(adev);
|
||||||
amdgpu_gem_force_release(adev);
|
amdgpu_gem_force_release(adev);
|
||||||
amdgpu_bo_fini(adev);
|
amdgpu_bo_fini(adev);
|
||||||
|
release_firmware(adev->mc.fw);
|
||||||
|
adev->mc.fw = NULL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -970,7 +970,7 @@ static int gmc_v7_0_sw_init(void *handle)
|
|||||||
* Currently set to 4GB ((1 << 20) 4k pages).
|
* Currently set to 4GB ((1 << 20) 4k pages).
|
||||||
* Max GPUVM size for cayman and SI is 40 bits.
|
* Max GPUVM size for cayman and SI is 40 bits.
|
||||||
*/
|
*/
|
||||||
amdgpu_vm_adjust_size(adev, 64, 4);
|
amdgpu_vm_adjust_size(adev, 64, 9);
|
||||||
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
|
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
|
||||||
|
|
||||||
/* Set the internal MC address mask
|
/* Set the internal MC address mask
|
||||||
@ -1050,6 +1050,8 @@ static int gmc_v7_0_sw_fini(void *handle)
|
|||||||
gmc_v7_0_gart_fini(adev);
|
gmc_v7_0_gart_fini(adev);
|
||||||
amdgpu_gem_force_release(adev);
|
amdgpu_gem_force_release(adev);
|
||||||
amdgpu_bo_fini(adev);
|
amdgpu_bo_fini(adev);
|
||||||
|
release_firmware(adev->mc.fw);
|
||||||
|
adev->mc.fw = NULL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -1067,7 +1067,7 @@ static int gmc_v8_0_sw_init(void *handle)
|
|||||||
* Currently set to 4GB ((1 << 20) 4k pages).
|
* Currently set to 4GB ((1 << 20) 4k pages).
|
||||||
* Max GPUVM size for cayman and SI is 40 bits.
|
* Max GPUVM size for cayman and SI is 40 bits.
|
||||||
*/
|
*/
|
||||||
amdgpu_vm_adjust_size(adev, 64, 4);
|
amdgpu_vm_adjust_size(adev, 64, 9);
|
||||||
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
|
adev->vm_manager.max_pfn = adev->vm_manager.vm_size << 18;
|
||||||
|
|
||||||
/* Set the internal MC address mask
|
/* Set the internal MC address mask
|
||||||
@ -1147,6 +1147,8 @@ static int gmc_v8_0_sw_fini(void *handle)
|
|||||||
gmc_v8_0_gart_fini(adev);
|
gmc_v8_0_gart_fini(adev);
|
||||||
amdgpu_gem_force_release(adev);
|
amdgpu_gem_force_release(adev);
|
||||||
amdgpu_bo_fini(adev);
|
amdgpu_bo_fini(adev);
|
||||||
|
release_firmware(adev->mc.fw);
|
||||||
|
adev->mc.fw = NULL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -32,6 +32,8 @@
|
|||||||
#include "vega10/DC/dce_12_0_offset.h"
|
#include "vega10/DC/dce_12_0_offset.h"
|
||||||
#include "vega10/DC/dce_12_0_sh_mask.h"
|
#include "vega10/DC/dce_12_0_sh_mask.h"
|
||||||
#include "vega10/vega10_enum.h"
|
#include "vega10/vega10_enum.h"
|
||||||
|
#include "vega10/MMHUB/mmhub_1_0_offset.h"
|
||||||
|
#include "vega10/ATHUB/athub_1_0_offset.h"
|
||||||
|
|
||||||
#include "soc15_common.h"
|
#include "soc15_common.h"
|
||||||
|
|
||||||
@ -71,13 +73,25 @@ static const u32 golden_settings_vega10_hdp[] =
|
|||||||
0xf6e, 0x0fffffff, 0x00000000,
|
0xf6e, 0x0fffffff, 0x00000000,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const u32 golden_settings_mmhub_1_0_0[] =
|
||||||
|
{
|
||||||
|
SOC15_REG_OFFSET(MMHUB, 0, mmDAGB1_WRCLI2), 0x00000007, 0xfe5fe0fa,
|
||||||
|
SOC15_REG_OFFSET(MMHUB, 0, mmMMEA1_DRAM_WR_CLI2GRP_MAP0), 0x00000030, 0x55555565
|
||||||
|
};
|
||||||
|
|
||||||
|
static const u32 golden_settings_athub_1_0_0[] =
|
||||||
|
{
|
||||||
|
SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL), 0x0000ff00, 0x00000800,
|
||||||
|
SOC15_REG_OFFSET(ATHUB, 0, mmRPB_ARB_CNTL2), 0x00ff00ff, 0x00080008
|
||||||
|
};
|
||||||
|
|
||||||
static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
|
static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
|
||||||
struct amdgpu_irq_src *src,
|
struct amdgpu_irq_src *src,
|
||||||
unsigned type,
|
unsigned type,
|
||||||
enum amdgpu_interrupt_state state)
|
enum amdgpu_interrupt_state state)
|
||||||
{
|
{
|
||||||
struct amdgpu_vmhub *hub;
|
struct amdgpu_vmhub *hub;
|
||||||
u32 tmp, reg, bits, i;
|
u32 tmp, reg, bits, i, j;
|
||||||
|
|
||||||
bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
bits = VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||||
VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||||
@ -89,43 +103,26 @@ static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
switch (state) {
|
switch (state) {
|
||||||
case AMDGPU_IRQ_STATE_DISABLE:
|
case AMDGPU_IRQ_STATE_DISABLE:
|
||||||
/* MM HUB */
|
for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
|
||||||
hub = &adev->vmhub[AMDGPU_MMHUB];
|
hub = &adev->vmhub[j];
|
||||||
for (i = 0; i< 16; i++) {
|
for (i = 0; i < 16; i++) {
|
||||||
reg = hub->vm_context0_cntl + i;
|
reg = hub->vm_context0_cntl + i;
|
||||||
tmp = RREG32(reg);
|
tmp = RREG32(reg);
|
||||||
tmp &= ~bits;
|
tmp &= ~bits;
|
||||||
WREG32(reg, tmp);
|
WREG32(reg, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* GFX HUB */
|
|
||||||
hub = &adev->vmhub[AMDGPU_GFXHUB];
|
|
||||||
for (i = 0; i < 16; i++) {
|
|
||||||
reg = hub->vm_context0_cntl + i;
|
|
||||||
tmp = RREG32(reg);
|
|
||||||
tmp &= ~bits;
|
|
||||||
WREG32(reg, tmp);
|
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case AMDGPU_IRQ_STATE_ENABLE:
|
case AMDGPU_IRQ_STATE_ENABLE:
|
||||||
/* MM HUB */
|
for (j = 0; j < AMDGPU_MAX_VMHUBS; j++) {
|
||||||
hub = &adev->vmhub[AMDGPU_MMHUB];
|
hub = &adev->vmhub[j];
|
||||||
for (i = 0; i< 16; i++) {
|
for (i = 0; i < 16; i++) {
|
||||||
reg = hub->vm_context0_cntl + i;
|
reg = hub->vm_context0_cntl + i;
|
||||||
tmp = RREG32(reg);
|
tmp = RREG32(reg);
|
||||||
tmp |= bits;
|
tmp |= bits;
|
||||||
WREG32(reg, tmp);
|
WREG32(reg, tmp);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* GFX HUB */
|
|
||||||
hub = &adev->vmhub[AMDGPU_GFXHUB];
|
|
||||||
for (i = 0; i < 16; i++) {
|
|
||||||
reg = hub->vm_context0_cntl + i;
|
|
||||||
tmp = RREG32(reg);
|
|
||||||
tmp |= bits;
|
|
||||||
WREG32(reg, tmp);
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -682,8 +679,17 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
|
|||||||
{
|
{
|
||||||
switch (adev->asic_type) {
|
switch (adev->asic_type) {
|
||||||
case CHIP_VEGA10:
|
case CHIP_VEGA10:
|
||||||
|
amdgpu_program_register_sequence(adev,
|
||||||
|
golden_settings_mmhub_1_0_0,
|
||||||
|
(const u32)ARRAY_SIZE(golden_settings_mmhub_1_0_0));
|
||||||
|
amdgpu_program_register_sequence(adev,
|
||||||
|
golden_settings_athub_1_0_0,
|
||||||
|
(const u32)ARRAY_SIZE(golden_settings_athub_1_0_0));
|
||||||
break;
|
break;
|
||||||
case CHIP_RAVEN:
|
case CHIP_RAVEN:
|
||||||
|
amdgpu_program_register_sequence(adev,
|
||||||
|
golden_settings_athub_1_0_0,
|
||||||
|
(const u32)ARRAY_SIZE(golden_settings_athub_1_0_0));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
@ -713,12 +719,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
/* After HDP is initialized, flush HDP.*/
|
|
||||||
if (adev->flags & AMD_IS_APU)
|
|
||||||
nbio_v7_0_hdp_flush(adev);
|
|
||||||
else
|
|
||||||
nbio_v6_1_hdp_flush(adev);
|
|
||||||
|
|
||||||
switch (adev->asic_type) {
|
switch (adev->asic_type) {
|
||||||
case CHIP_RAVEN:
|
case CHIP_RAVEN:
|
||||||
mmhub_v1_0_initialize_power_gating(adev);
|
mmhub_v1_0_initialize_power_gating(adev);
|
||||||
@ -736,13 +736,16 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
tmp = RREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL);
|
WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1);
|
||||||
tmp |= HDP_MISC_CNTL__FLUSH_INVALIDATE_CACHE_MASK;
|
|
||||||
WREG32_SOC15(HDP, 0, mmHDP_MISC_CNTL, tmp);
|
|
||||||
|
|
||||||
tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
|
tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL);
|
||||||
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
|
WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp);
|
||||||
|
|
||||||
|
/* After HDP is initialized, flush HDP.*/
|
||||||
|
if (adev->flags & AMD_IS_APU)
|
||||||
|
nbio_v7_0_hdp_flush(adev);
|
||||||
|
else
|
||||||
|
nbio_v6_1_hdp_flush(adev);
|
||||||
|
|
||||||
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
|
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
|
||||||
value = false;
|
value = false;
|
||||||
@ -751,7 +754,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
gfxhub_v1_0_set_fault_enable_default(adev, value);
|
gfxhub_v1_0_set_fault_enable_default(adev, value);
|
||||||
mmhub_v1_0_set_fault_enable_default(adev, value);
|
mmhub_v1_0_set_fault_enable_default(adev, value);
|
||||||
|
|
||||||
gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
|
gmc_v9_0_gart_flush_gpu_tlb(adev, 0);
|
||||||
|
|
||||||
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
|
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
|
||||||
@ -770,17 +772,11 @@ static int gmc_v9_0_hw_init(void *handle)
|
|||||||
gmc_v9_0_init_golden_registers(adev);
|
gmc_v9_0_init_golden_registers(adev);
|
||||||
|
|
||||||
if (adev->mode_info.num_crtc) {
|
if (adev->mode_info.num_crtc) {
|
||||||
u32 tmp;
|
|
||||||
|
|
||||||
/* Lockout access through VGA aperture*/
|
/* Lockout access through VGA aperture*/
|
||||||
tmp = RREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL);
|
WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
|
||||||
tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1);
|
|
||||||
WREG32_SOC15(DCE, 0, mmVGA_HDP_CONTROL, tmp);
|
|
||||||
|
|
||||||
/* disable VGA render */
|
/* disable VGA render */
|
||||||
tmp = RREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL);
|
WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
|
||||||
tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0);
|
|
||||||
WREG32_SOC15(DCE, 0, mmVGA_RENDER_CONTROL, tmp);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
r = gmc_v9_0_gart_enable(adev);
|
r = gmc_v9_0_gart_enable(adev);
|
||||||
@ -822,9 +818,7 @@ static int gmc_v9_0_suspend(void *handle)
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
gmc_v9_0_hw_fini(adev);
|
return gmc_v9_0_hw_fini(adev);
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gmc_v9_0_resume(void *handle)
|
static int gmc_v9_0_resume(void *handle)
|
||||||
|
@ -207,6 +207,19 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev)
|
|||||||
return (wptr & adev->irq.ih.ptr_mask);
|
return (wptr & adev->irq.ih.ptr_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* iceland_ih_prescreen_iv - prescreen an interrupt vector
|
||||||
|
*
|
||||||
|
* @adev: amdgpu_device pointer
|
||||||
|
*
|
||||||
|
* Returns true if the interrupt vector should be further processed.
|
||||||
|
*/
|
||||||
|
static bool iceland_ih_prescreen_iv(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
/* Process all interrupts */
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* iceland_ih_decode_iv - decode an interrupt vector
|
* iceland_ih_decode_iv - decode an interrupt vector
|
||||||
*
|
*
|
||||||
@ -412,6 +425,7 @@ static const struct amd_ip_funcs iceland_ih_ip_funcs = {
|
|||||||
|
|
||||||
static const struct amdgpu_ih_funcs iceland_ih_funcs = {
|
static const struct amdgpu_ih_funcs iceland_ih_funcs = {
|
||||||
.get_wptr = iceland_ih_get_wptr,
|
.get_wptr = iceland_ih_get_wptr,
|
||||||
|
.prescreen_iv = iceland_ih_prescreen_iv,
|
||||||
.decode_iv = iceland_ih_decode_iv,
|
.decode_iv = iceland_ih_decode_iv,
|
||||||
.set_rptr = iceland_ih_set_rptr
|
.set_rptr = iceland_ih_set_rptr
|
||||||
};
|
};
|
||||||
|
@ -42,7 +42,6 @@
|
|||||||
#define KV_MINIMUM_ENGINE_CLOCK 800
|
#define KV_MINIMUM_ENGINE_CLOCK 800
|
||||||
#define SMC_RAM_END 0x40000
|
#define SMC_RAM_END 0x40000
|
||||||
|
|
||||||
static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev);
|
|
||||||
static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);
|
static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);
|
||||||
static int kv_enable_nb_dpm(struct amdgpu_device *adev,
|
static int kv_enable_nb_dpm(struct amdgpu_device *adev,
|
||||||
bool enable);
|
bool enable);
|
||||||
@ -64,7 +63,7 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
|
|||||||
int min_temp, int max_temp);
|
int min_temp, int max_temp);
|
||||||
static int kv_init_fps_limits(struct amdgpu_device *adev);
|
static int kv_init_fps_limits(struct amdgpu_device *adev);
|
||||||
|
|
||||||
static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate);
|
static void kv_dpm_powergate_uvd(void *handle, bool gate);
|
||||||
static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
|
static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
|
||||||
static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
|
static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
|
||||||
static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
|
static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
|
||||||
@ -1245,8 +1244,9 @@ static void kv_update_requested_ps(struct amdgpu_device *adev,
|
|||||||
adev->pm.dpm.requested_ps = &pi->requested_rps;
|
adev->pm.dpm.requested_ps = &pi->requested_rps;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kv_dpm_enable_bapm(struct amdgpu_device *adev, bool enable)
|
static void kv_dpm_enable_bapm(void *handle, bool enable)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct kv_power_info *pi = kv_get_pi(adev);
|
struct kv_power_info *pi = kv_get_pi(adev);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -1672,8 +1672,9 @@ static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate)
|
|||||||
return kv_enable_acp_dpm(adev, !gate);
|
return kv_enable_acp_dpm(adev, !gate);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kv_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
|
static void kv_dpm_powergate_uvd(void *handle, bool gate)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct kv_power_info *pi = kv_get_pi(adev);
|
struct kv_power_info *pi = kv_get_pi(adev);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -1868,10 +1869,11 @@ static int kv_enable_nb_dpm(struct amdgpu_device *adev,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kv_dpm_force_performance_level(struct amdgpu_device *adev,
|
static int kv_dpm_force_performance_level(void *handle,
|
||||||
enum amd_dpm_forced_level level)
|
enum amd_dpm_forced_level level)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
|
if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
|
||||||
ret = kv_force_dpm_highest(adev);
|
ret = kv_force_dpm_highest(adev);
|
||||||
@ -1892,8 +1894,9 @@ static int kv_dpm_force_performance_level(struct amdgpu_device *adev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kv_dpm_pre_set_power_state(struct amdgpu_device *adev)
|
static int kv_dpm_pre_set_power_state(void *handle)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct kv_power_info *pi = kv_get_pi(adev);
|
struct kv_power_info *pi = kv_get_pi(adev);
|
||||||
struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
|
struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
|
||||||
struct amdgpu_ps *new_ps = &requested_ps;
|
struct amdgpu_ps *new_ps = &requested_ps;
|
||||||
@ -1907,8 +1910,9 @@ static int kv_dpm_pre_set_power_state(struct amdgpu_device *adev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kv_dpm_set_power_state(struct amdgpu_device *adev)
|
static int kv_dpm_set_power_state(void *handle)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct kv_power_info *pi = kv_get_pi(adev);
|
struct kv_power_info *pi = kv_get_pi(adev);
|
||||||
struct amdgpu_ps *new_ps = &pi->requested_rps;
|
struct amdgpu_ps *new_ps = &pi->requested_rps;
|
||||||
struct amdgpu_ps *old_ps = &pi->current_rps;
|
struct amdgpu_ps *old_ps = &pi->current_rps;
|
||||||
@ -1981,8 +1985,9 @@ static int kv_dpm_set_power_state(struct amdgpu_device *adev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kv_dpm_post_set_power_state(struct amdgpu_device *adev)
|
static void kv_dpm_post_set_power_state(void *handle)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct kv_power_info *pi = kv_get_pi(adev);
|
struct kv_power_info *pi = kv_get_pi(adev);
|
||||||
struct amdgpu_ps *new_ps = &pi->requested_rps;
|
struct amdgpu_ps *new_ps = &pi->requested_rps;
|
||||||
|
|
||||||
@ -2848,9 +2853,10 @@ static int kv_dpm_init(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
kv_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
|
kv_dpm_debugfs_print_current_performance_level(void *handle,
|
||||||
struct seq_file *m)
|
struct seq_file *m)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct kv_power_info *pi = kv_get_pi(adev);
|
struct kv_power_info *pi = kv_get_pi(adev);
|
||||||
u32 current_index =
|
u32 current_index =
|
||||||
(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
|
(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
|
||||||
@ -2875,11 +2881,12 @@ kv_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
kv_dpm_print_power_state(struct amdgpu_device *adev,
|
kv_dpm_print_power_state(void *handle, void *request_ps)
|
||||||
struct amdgpu_ps *rps)
|
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
|
||||||
struct kv_ps *ps = kv_get_ps(rps);
|
struct kv_ps *ps = kv_get_ps(rps);
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
amdgpu_dpm_print_class_info(rps->class, rps->class2);
|
amdgpu_dpm_print_class_info(rps->class, rps->class2);
|
||||||
amdgpu_dpm_print_cap_info(rps->caps);
|
amdgpu_dpm_print_cap_info(rps->caps);
|
||||||
@ -2905,13 +2912,14 @@ static void kv_dpm_fini(struct amdgpu_device *adev)
|
|||||||
amdgpu_free_extended_power_table(adev);
|
amdgpu_free_extended_power_table(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kv_dpm_display_configuration_changed(struct amdgpu_device *adev)
|
static void kv_dpm_display_configuration_changed(void *handle)
|
||||||
{
|
{
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 kv_dpm_get_sclk(struct amdgpu_device *adev, bool low)
|
static u32 kv_dpm_get_sclk(void *handle, bool low)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct kv_power_info *pi = kv_get_pi(adev);
|
struct kv_power_info *pi = kv_get_pi(adev);
|
||||||
struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
|
struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
|
||||||
|
|
||||||
@ -2921,18 +2929,20 @@ static u32 kv_dpm_get_sclk(struct amdgpu_device *adev, bool low)
|
|||||||
return requested_state->levels[requested_state->num_levels - 1].sclk;
|
return requested_state->levels[requested_state->num_levels - 1].sclk;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 kv_dpm_get_mclk(struct amdgpu_device *adev, bool low)
|
static u32 kv_dpm_get_mclk(void *handle, bool low)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct kv_power_info *pi = kv_get_pi(adev);
|
struct kv_power_info *pi = kv_get_pi(adev);
|
||||||
|
|
||||||
return pi->sys_info.bootup_uma_clk;
|
return pi->sys_info.bootup_uma_clk;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* get temperature in millidegrees */
|
/* get temperature in millidegrees */
|
||||||
static int kv_dpm_get_temp(struct amdgpu_device *adev)
|
static int kv_dpm_get_temp(void *handle)
|
||||||
{
|
{
|
||||||
u32 temp;
|
u32 temp;
|
||||||
int actual_temp = 0;
|
int actual_temp = 0;
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
temp = RREG32_SMC(0xC0300E0C);
|
temp = RREG32_SMC(0xC0300E0C);
|
||||||
|
|
||||||
@ -2950,7 +2960,6 @@ static int kv_dpm_early_init(void *handle)
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
kv_dpm_set_dpm_funcs(adev);
|
|
||||||
kv_dpm_set_irq_funcs(adev);
|
kv_dpm_set_irq_funcs(adev);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -3222,14 +3231,17 @@ static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1,
|
|||||||
(kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state));
|
(kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kv_check_state_equal(struct amdgpu_device *adev,
|
static int kv_check_state_equal(void *handle,
|
||||||
struct amdgpu_ps *cps,
|
void *current_ps,
|
||||||
struct amdgpu_ps *rps,
|
void *request_ps,
|
||||||
bool *equal)
|
bool *equal)
|
||||||
{
|
{
|
||||||
struct kv_ps *kv_cps;
|
struct kv_ps *kv_cps;
|
||||||
struct kv_ps *kv_rps;
|
struct kv_ps *kv_rps;
|
||||||
int i;
|
int i;
|
||||||
|
struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
|
||||||
|
struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
|
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@ -3262,9 +3274,10 @@ static int kv_check_state_equal(struct amdgpu_device *adev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kv_dpm_read_sensor(struct amdgpu_device *adev, int idx,
|
static int kv_dpm_read_sensor(void *handle, int idx,
|
||||||
void *value, int *size)
|
void *value, int *size)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct kv_power_info *pi = kv_get_pi(adev);
|
struct kv_power_info *pi = kv_get_pi(adev);
|
||||||
uint32_t sclk;
|
uint32_t sclk;
|
||||||
u32 pl_index =
|
u32 pl_index =
|
||||||
@ -3312,7 +3325,7 @@ const struct amd_ip_funcs kv_dpm_ip_funcs = {
|
|||||||
.set_powergating_state = kv_dpm_set_powergating_state,
|
.set_powergating_state = kv_dpm_set_powergating_state,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct amdgpu_dpm_funcs kv_dpm_funcs = {
|
const struct amd_pm_funcs kv_dpm_funcs = {
|
||||||
.get_temperature = &kv_dpm_get_temp,
|
.get_temperature = &kv_dpm_get_temp,
|
||||||
.pre_set_power_state = &kv_dpm_pre_set_power_state,
|
.pre_set_power_state = &kv_dpm_pre_set_power_state,
|
||||||
.set_power_state = &kv_dpm_set_power_state,
|
.set_power_state = &kv_dpm_set_power_state,
|
||||||
@ -3330,12 +3343,6 @@ static const struct amdgpu_dpm_funcs kv_dpm_funcs = {
|
|||||||
.read_sensor = &kv_dpm_read_sensor,
|
.read_sensor = &kv_dpm_read_sensor,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void kv_dpm_set_dpm_funcs(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
if (adev->pm.funcs == NULL)
|
|
||||||
adev->pm.funcs = &kv_dpm_funcs;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = {
|
static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = {
|
||||||
.set = kv_dpm_set_interrupt_state,
|
.set = kv_dpm_set_interrupt_state,
|
||||||
.process = kv_dpm_process_interrupt,
|
.process = kv_dpm_process_interrupt,
|
||||||
|
@ -273,7 +273,7 @@ static const struct pctl_data pctl0_data[] = {
|
|||||||
{0x135, 0x12a810},
|
{0x135, 0x12a810},
|
||||||
{0x149, 0x7a82c}
|
{0x149, 0x7a82c}
|
||||||
};
|
};
|
||||||
#define PCTL0_DATA_LEN (sizeof(pctl0_data)/sizeof(pctl0_data[0]))
|
#define PCTL0_DATA_LEN (ARRAY_SIZE(pctl0_data))
|
||||||
|
|
||||||
#define PCTL0_RENG_EXEC_END_PTR 0x151
|
#define PCTL0_RENG_EXEC_END_PTR 0x151
|
||||||
#define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640
|
#define PCTL0_STCTRL_REG_SAVE_RANGE0_BASE 0xa640
|
||||||
@ -309,7 +309,7 @@ static const struct pctl_data pctl1_data[] = {
|
|||||||
{0x1f0, 0x5000a7f6},
|
{0x1f0, 0x5000a7f6},
|
||||||
{0x1f1, 0x5000a7e4}
|
{0x1f1, 0x5000a7e4}
|
||||||
};
|
};
|
||||||
#define PCTL1_DATA_LEN (sizeof(pctl1_data)/sizeof(pctl1_data[0]))
|
#define PCTL1_DATA_LEN (ARRAY_SIZE(pctl1_data))
|
||||||
|
|
||||||
#define PCTL1_RENG_EXEC_END_PTR 0x1f1
|
#define PCTL1_RENG_EXEC_END_PTR 0x1f1
|
||||||
#define PCTL1_STCTRL_REG_SAVE_RANGE0_BASE 0xa000
|
#define PCTL1_STCTRL_REG_SAVE_RANGE0_BASE 0xa000
|
||||||
@ -561,6 +561,13 @@ void mmhub_v1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value)
|
|||||||
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||||
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
|
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
|
||||||
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value);
|
||||||
|
if (!value) {
|
||||||
|
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
|
||||||
|
CRASH_ON_NO_RETRY_FAULT, 1);
|
||||||
|
tmp = REG_SET_FIELD(tmp, VM_L2_PROTECTION_FAULT_CNTL,
|
||||||
|
CRASH_ON_RETRY_FAULT, 1);
|
||||||
|
}
|
||||||
|
|
||||||
WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
|
WREG32_SOC15(MMHUB, 0, mmVM_L2_PROTECTION_FAULT_CNTL, tmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,7 +24,7 @@
|
|||||||
#ifndef __MXGPU_AI_H__
|
#ifndef __MXGPU_AI_H__
|
||||||
#define __MXGPU_AI_H__
|
#define __MXGPU_AI_H__
|
||||||
|
|
||||||
#define AI_MAILBOX_TIMEDOUT 5000
|
#define AI_MAILBOX_TIMEDOUT 12000
|
||||||
|
|
||||||
enum idh_request {
|
enum idh_request {
|
||||||
IDH_REQ_GPU_INIT_ACCESS = 1,
|
IDH_REQ_GPU_INIT_ACCESS = 1,
|
||||||
|
@ -23,7 +23,7 @@
|
|||||||
#ifndef __MXGPU_VI_H__
|
#ifndef __MXGPU_VI_H__
|
||||||
#define __MXGPU_VI_H__
|
#define __MXGPU_VI_H__
|
||||||
|
|
||||||
#define VI_MAILBOX_TIMEDOUT 5000
|
#define VI_MAILBOX_TIMEDOUT 12000
|
||||||
#define VI_MAILBOX_RESET_TIME 12
|
#define VI_MAILBOX_RESET_TIME 12
|
||||||
|
|
||||||
/* VI mailbox messages request */
|
/* VI mailbox messages request */
|
||||||
|
@ -35,6 +35,8 @@
|
|||||||
#include "raven1/GC/gc_9_1_offset.h"
|
#include "raven1/GC/gc_9_1_offset.h"
|
||||||
#include "raven1/SDMA0/sdma0_4_1_offset.h"
|
#include "raven1/SDMA0/sdma0_4_1_offset.h"
|
||||||
|
|
||||||
|
MODULE_FIRMWARE("amdgpu/raven_asd.bin");
|
||||||
|
|
||||||
static int
|
static int
|
||||||
psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type)
|
psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type)
|
||||||
{
|
{
|
||||||
@ -136,15 +138,13 @@ int psp_v10_0_prep_cmd_buf(struct amdgpu_firmware_info *ucode, struct psp_gfx_cm
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
uint64_t fw_mem_mc_addr = ucode->mc_addr;
|
uint64_t fw_mem_mc_addr = ucode->mc_addr;
|
||||||
struct common_firmware_header *header;
|
|
||||||
|
|
||||||
memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
|
memset(cmd, 0, sizeof(struct psp_gfx_cmd_resp));
|
||||||
header = (struct common_firmware_header *)ucode->fw;
|
|
||||||
|
|
||||||
cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
|
cmd->cmd_id = GFX_CMD_ID_LOAD_IP_FW;
|
||||||
cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
|
cmd->cmd.cmd_load_ip_fw.fw_phy_addr_lo = lower_32_bits(fw_mem_mc_addr);
|
||||||
cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
|
cmd->cmd.cmd_load_ip_fw.fw_phy_addr_hi = upper_32_bits(fw_mem_mc_addr);
|
||||||
cmd->cmd.cmd_load_ip_fw.fw_size = le32_to_cpu(header->ucode_size_bytes);
|
cmd->cmd.cmd_load_ip_fw.fw_size = ucode->ucode_size;
|
||||||
|
|
||||||
ret = psp_v10_0_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
|
ret = psp_v10_0_get_fw_type(ucode, &cmd->cmd.cmd_load_ip_fw.fw_type);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -209,7 +209,7 @@ int psp_v10_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
|
int psp_v10_0_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct psp_ring *ring;
|
struct psp_ring *ring;
|
||||||
@ -229,6 +229,19 @@ int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type
|
|||||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
|
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
|
||||||
0x80000000, 0x80000000, false);
|
0x80000000, 0x80000000, false);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int psp_v10_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
struct psp_ring *ring = &psp->km_ring;
|
||||||
|
struct amdgpu_device *adev = psp->adev;
|
||||||
|
|
||||||
|
ret = psp_v10_0_ring_stop(psp, ring_type);
|
||||||
|
if (ret)
|
||||||
|
DRM_ERROR("Fail to stop psp ring\n");
|
||||||
|
|
||||||
amdgpu_bo_free_kernel(&adev->firmware.rbuf,
|
amdgpu_bo_free_kernel(&adev->firmware.rbuf,
|
||||||
&ring->ring_mem_mc_addr,
|
&ring->ring_mem_mc_addr,
|
||||||
(void **)&ring->ring_mem);
|
(void **)&ring->ring_mem);
|
||||||
@ -245,15 +258,20 @@ int psp_v10_0_cmd_submit(struct psp_context *psp,
|
|||||||
struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem;
|
struct psp_gfx_rb_frame * write_frame = psp->km_ring.ring_mem;
|
||||||
struct psp_ring *ring = &psp->km_ring;
|
struct psp_ring *ring = &psp->km_ring;
|
||||||
struct amdgpu_device *adev = psp->adev;
|
struct amdgpu_device *adev = psp->adev;
|
||||||
|
uint32_t ring_size_dw = ring->ring_size / 4;
|
||||||
|
uint32_t rb_frame_size_dw = sizeof(struct psp_gfx_rb_frame) / 4;
|
||||||
|
|
||||||
/* KM (GPCOM) prepare write pointer */
|
/* KM (GPCOM) prepare write pointer */
|
||||||
psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
|
psp_write_ptr_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67);
|
||||||
|
|
||||||
/* Update KM RB frame pointer to new frame */
|
/* Update KM RB frame pointer to new frame */
|
||||||
if ((psp_write_ptr_reg % ring->ring_size) == 0)
|
if ((psp_write_ptr_reg % ring_size_dw) == 0)
|
||||||
write_frame = ring->ring_mem;
|
write_frame = ring->ring_mem;
|
||||||
else
|
else
|
||||||
write_frame = ring->ring_mem + (psp_write_ptr_reg / (sizeof(struct psp_gfx_rb_frame) / 4));
|
write_frame = ring->ring_mem + (psp_write_ptr_reg / rb_frame_size_dw);
|
||||||
|
|
||||||
|
/* Initialize KM RB frame */
|
||||||
|
memset(write_frame, 0, sizeof(struct psp_gfx_rb_frame));
|
||||||
|
|
||||||
/* Update KM RB frame */
|
/* Update KM RB frame */
|
||||||
write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
|
write_frame->cmd_buf_addr_hi = upper_32_bits(cmd_buf_mc_addr);
|
||||||
@ -263,8 +281,7 @@ int psp_v10_0_cmd_submit(struct psp_context *psp,
|
|||||||
write_frame->fence_value = index;
|
write_frame->fence_value = index;
|
||||||
|
|
||||||
/* Update the write Pointer in DWORDs */
|
/* Update the write Pointer in DWORDs */
|
||||||
psp_write_ptr_reg += sizeof(struct psp_gfx_rb_frame) / 4;
|
psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw;
|
||||||
psp_write_ptr_reg = (psp_write_ptr_reg >= ring->ring_size) ? 0 : psp_write_ptr_reg;
|
|
||||||
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
|
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, psp_write_ptr_reg);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -390,3 +407,10 @@ bool psp_v10_0_compare_sram_data(struct psp_context *psp,
|
|||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
int psp_v10_0_mode1_reset(struct psp_context *psp)
|
||||||
|
{
|
||||||
|
DRM_INFO("psp mode 1 reset not supported now! \n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
@ -34,6 +34,8 @@ extern int psp_v10_0_ring_init(struct psp_context *psp,
|
|||||||
enum psp_ring_type ring_type);
|
enum psp_ring_type ring_type);
|
||||||
extern int psp_v10_0_ring_create(struct psp_context *psp,
|
extern int psp_v10_0_ring_create(struct psp_context *psp,
|
||||||
enum psp_ring_type ring_type);
|
enum psp_ring_type ring_type);
|
||||||
|
extern int psp_v10_0_ring_stop(struct psp_context *psp,
|
||||||
|
enum psp_ring_type ring_type);
|
||||||
extern int psp_v10_0_ring_destroy(struct psp_context *psp,
|
extern int psp_v10_0_ring_destroy(struct psp_context *psp,
|
||||||
enum psp_ring_type ring_type);
|
enum psp_ring_type ring_type);
|
||||||
extern int psp_v10_0_cmd_submit(struct psp_context *psp,
|
extern int psp_v10_0_cmd_submit(struct psp_context *psp,
|
||||||
@ -43,4 +45,6 @@ extern int psp_v10_0_cmd_submit(struct psp_context *psp,
|
|||||||
extern bool psp_v10_0_compare_sram_data(struct psp_context *psp,
|
extern bool psp_v10_0_compare_sram_data(struct psp_context *psp,
|
||||||
struct amdgpu_firmware_info *ucode,
|
struct amdgpu_firmware_info *ucode,
|
||||||
enum AMDGPU_UCODE_ID ucode_type);
|
enum AMDGPU_UCODE_ID ucode_type);
|
||||||
|
|
||||||
|
extern int psp_v10_0_mode1_reset(struct psp_context *psp);
|
||||||
#endif
|
#endif
|
||||||
|
@ -319,7 +319,7 @@ int psp_v3_1_ring_create(struct psp_context *psp, enum psp_ring_type ring_type)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
|
int psp_v3_1_ring_stop(struct psp_context *psp, enum psp_ring_type ring_type)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct psp_ring *ring;
|
struct psp_ring *ring;
|
||||||
@ -339,6 +339,19 @@ int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
|
|||||||
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
|
ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64),
|
||||||
0x80000000, 0x80000000, false);
|
0x80000000, 0x80000000, false);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int psp_v3_1_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
struct psp_ring *ring = &psp->km_ring;
|
||||||
|
struct amdgpu_device *adev = psp->adev;
|
||||||
|
|
||||||
|
ret = psp_v3_1_ring_stop(psp, ring_type);
|
||||||
|
if (ret)
|
||||||
|
DRM_ERROR("Fail to stop psp ring\n");
|
||||||
|
|
||||||
amdgpu_bo_free_kernel(&adev->firmware.rbuf,
|
amdgpu_bo_free_kernel(&adev->firmware.rbuf,
|
||||||
&ring->ring_mem_mc_addr,
|
&ring->ring_mem_mc_addr,
|
||||||
(void **)&ring->ring_mem);
|
(void **)&ring->ring_mem);
|
||||||
@ -517,3 +530,37 @@ bool psp_v3_1_smu_reload_quirk(struct psp_context *psp)
|
|||||||
reg = RREG32_SOC15(NBIO, 0, mmPCIE_DATA2);
|
reg = RREG32_SOC15(NBIO, 0, mmPCIE_DATA2);
|
||||||
return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false;
|
return (reg & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) ? true : false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int psp_v3_1_mode1_reset(struct psp_context *psp)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
uint32_t offset;
|
||||||
|
struct amdgpu_device *adev = psp->adev;
|
||||||
|
|
||||||
|
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64);
|
||||||
|
|
||||||
|
ret = psp_wait_for(psp, offset, 0x80000000, 0x8000FFFF, false);
|
||||||
|
|
||||||
|
if (ret) {
|
||||||
|
DRM_INFO("psp is not working correctly before mode1 reset!\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*send the mode 1 reset command*/
|
||||||
|
WREG32(offset, 0x70000);
|
||||||
|
|
||||||
|
mdelay(1000);
|
||||||
|
|
||||||
|
offset = SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_33);
|
||||||
|
|
||||||
|
ret = psp_wait_for(psp, offset, 0x80000000, 0x80000000, false);
|
||||||
|
|
||||||
|
if (ret) {
|
||||||
|
DRM_INFO("psp mode 1 reset failed!\n");
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
DRM_INFO("psp mode1 reset succeed \n");
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
@ -41,6 +41,8 @@ extern int psp_v3_1_ring_init(struct psp_context *psp,
|
|||||||
enum psp_ring_type ring_type);
|
enum psp_ring_type ring_type);
|
||||||
extern int psp_v3_1_ring_create(struct psp_context *psp,
|
extern int psp_v3_1_ring_create(struct psp_context *psp,
|
||||||
enum psp_ring_type ring_type);
|
enum psp_ring_type ring_type);
|
||||||
|
extern int psp_v3_1_ring_stop(struct psp_context *psp,
|
||||||
|
enum psp_ring_type ring_type);
|
||||||
extern int psp_v3_1_ring_destroy(struct psp_context *psp,
|
extern int psp_v3_1_ring_destroy(struct psp_context *psp,
|
||||||
enum psp_ring_type ring_type);
|
enum psp_ring_type ring_type);
|
||||||
extern int psp_v3_1_cmd_submit(struct psp_context *psp,
|
extern int psp_v3_1_cmd_submit(struct psp_context *psp,
|
||||||
@ -51,4 +53,5 @@ extern bool psp_v3_1_compare_sram_data(struct psp_context *psp,
|
|||||||
struct amdgpu_firmware_info *ucode,
|
struct amdgpu_firmware_info *ucode,
|
||||||
enum AMDGPU_UCODE_ID ucode_type);
|
enum AMDGPU_UCODE_ID ucode_type);
|
||||||
extern bool psp_v3_1_smu_reload_quirk(struct psp_context *psp);
|
extern bool psp_v3_1_smu_reload_quirk(struct psp_context *psp);
|
||||||
|
extern int psp_v3_1_mode1_reset(struct psp_context *psp);
|
||||||
#endif
|
#endif
|
||||||
|
@ -1324,8 +1324,13 @@ static void sdma_v2_4_set_buffer_funcs(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
|
static const struct amdgpu_vm_pte_funcs sdma_v2_4_vm_pte_funcs = {
|
||||||
|
.copy_pte_num_dw = 7,
|
||||||
.copy_pte = sdma_v2_4_vm_copy_pte,
|
.copy_pte = sdma_v2_4_vm_copy_pte,
|
||||||
|
|
||||||
.write_pte = sdma_v2_4_vm_write_pte,
|
.write_pte = sdma_v2_4_vm_write_pte,
|
||||||
|
|
||||||
|
.set_max_nums_pte_pde = 0x1fffff >> 3,
|
||||||
|
.set_pte_pde_num_dw = 10,
|
||||||
.set_pte_pde = sdma_v2_4_vm_set_pte_pde,
|
.set_pte_pde = sdma_v2_4_vm_set_pte_pde,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -379,8 +379,10 @@ static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring *ring)
|
|||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
|
|
||||||
if (ring->use_doorbell) {
|
if (ring->use_doorbell) {
|
||||||
|
u32 *wb = (u32 *)&adev->wb.wb[ring->wptr_offs];
|
||||||
|
|
||||||
/* XXX check if swapping is necessary on BE */
|
/* XXX check if swapping is necessary on BE */
|
||||||
adev->wb.wb[ring->wptr_offs] = lower_32_bits(ring->wptr) << 2;
|
WRITE_ONCE(*wb, (lower_32_bits(ring->wptr) << 2));
|
||||||
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr) << 2);
|
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr) << 2);
|
||||||
} else {
|
} else {
|
||||||
int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
|
int me = (ring == &ring->adev->sdma.instance[0].ring) ? 0 : 1;
|
||||||
@ -641,10 +643,11 @@ static void sdma_v3_0_enable(struct amdgpu_device *adev, bool enable)
|
|||||||
static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
|
static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
struct amdgpu_ring *ring;
|
struct amdgpu_ring *ring;
|
||||||
u32 rb_cntl, ib_cntl;
|
u32 rb_cntl, ib_cntl, wptr_poll_cntl;
|
||||||
u32 rb_bufsz;
|
u32 rb_bufsz;
|
||||||
u32 wb_offset;
|
u32 wb_offset;
|
||||||
u32 doorbell;
|
u32 doorbell;
|
||||||
|
u64 wptr_gpu_addr;
|
||||||
int i, j, r;
|
int i, j, r;
|
||||||
|
|
||||||
for (i = 0; i < adev->sdma.num_instances; i++) {
|
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||||
@ -707,6 +710,20 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
WREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i], doorbell);
|
WREG32(mmSDMA0_GFX_DOORBELL + sdma_offsets[i], doorbell);
|
||||||
|
|
||||||
|
/* setup the wptr shadow polling */
|
||||||
|
wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
|
||||||
|
|
||||||
|
WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_LO + sdma_offsets[i],
|
||||||
|
lower_32_bits(wptr_gpu_addr));
|
||||||
|
WREG32(mmSDMA0_GFX_RB_WPTR_POLL_ADDR_HI + sdma_offsets[i],
|
||||||
|
upper_32_bits(wptr_gpu_addr));
|
||||||
|
wptr_poll_cntl = RREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i]);
|
||||||
|
if (amdgpu_sriov_vf(adev))
|
||||||
|
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 1);
|
||||||
|
else
|
||||||
|
wptr_poll_cntl = REG_SET_FIELD(wptr_poll_cntl, SDMA0_GFX_RB_WPTR_POLL_CNTL, F32_POLL_ENABLE, 0);
|
||||||
|
WREG32(mmSDMA0_GFX_RB_WPTR_POLL_CNTL + sdma_offsets[i], wptr_poll_cntl);
|
||||||
|
|
||||||
/* enable DMA RB */
|
/* enable DMA RB */
|
||||||
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
|
rb_cntl = REG_SET_FIELD(rb_cntl, SDMA0_GFX_RB_CNTL, RB_ENABLE, 1);
|
||||||
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
|
WREG32(mmSDMA0_GFX_RB_CNTL + sdma_offsets[i], rb_cntl);
|
||||||
@ -1713,11 +1730,11 @@ static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib *ib,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = {
|
static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs = {
|
||||||
.copy_max_bytes = 0x1fffff,
|
.copy_max_bytes = 0x3fffe0, /* not 0x3fffff due to HW limitation */
|
||||||
.copy_num_dw = 7,
|
.copy_num_dw = 7,
|
||||||
.emit_copy_buffer = sdma_v3_0_emit_copy_buffer,
|
.emit_copy_buffer = sdma_v3_0_emit_copy_buffer,
|
||||||
|
|
||||||
.fill_max_bytes = 0x1fffff,
|
.fill_max_bytes = 0x3fffe0, /* not 0x3fffff due to HW limitation */
|
||||||
.fill_num_dw = 5,
|
.fill_num_dw = 5,
|
||||||
.emit_fill_buffer = sdma_v3_0_emit_fill_buffer,
|
.emit_fill_buffer = sdma_v3_0_emit_fill_buffer,
|
||||||
};
|
};
|
||||||
@ -1731,8 +1748,14 @@ static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
|
static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs = {
|
||||||
|
.copy_pte_num_dw = 7,
|
||||||
.copy_pte = sdma_v3_0_vm_copy_pte,
|
.copy_pte = sdma_v3_0_vm_copy_pte,
|
||||||
|
|
||||||
.write_pte = sdma_v3_0_vm_write_pte,
|
.write_pte = sdma_v3_0_vm_write_pte,
|
||||||
|
|
||||||
|
/* not 0x3fffff due to HW limitation */
|
||||||
|
.set_max_nums_pte_pde = 0x3fffe0 >> 3,
|
||||||
|
.set_pte_pde_num_dw = 10,
|
||||||
.set_pte_pde = sdma_v3_0_vm_set_pte_pde,
|
.set_pte_pde = sdma_v3_0_vm_set_pte_pde,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -54,7 +54,7 @@ static void sdma_v4_0_set_vm_pte_funcs(struct amdgpu_device *adev);
|
|||||||
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
|
static void sdma_v4_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||||
|
|
||||||
static const u32 golden_settings_sdma_4[] = {
|
static const u32 golden_settings_sdma_4[] = {
|
||||||
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831f07,
|
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831d07,
|
||||||
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xff000ff0, 0x3f000100,
|
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xff000ff0, 0x3f000100,
|
||||||
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0100, 0x00000100,
|
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0100, 0x00000100,
|
||||||
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
|
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
|
||||||
@ -89,7 +89,7 @@ static const u32 golden_settings_sdma_vg10[] = {
|
|||||||
|
|
||||||
static const u32 golden_settings_sdma_4_1[] =
|
static const u32 golden_settings_sdma_4_1[] =
|
||||||
{
|
{
|
||||||
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831f07,
|
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CHICKEN_BITS), 0xfe931f07, 0x02831d07,
|
||||||
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xffffffff, 0x3f000100,
|
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_CLK_CTRL), 0xffffffff, 0x3f000100,
|
||||||
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0111, 0x00000100,
|
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_IB_CNTL), 0x800f0111, 0x00000100,
|
||||||
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
|
SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_RB_WPTR_POLL_CNTL), 0xfffffff7, 0x00403000,
|
||||||
@ -398,7 +398,7 @@ static void sdma_v4_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
|
|||||||
{
|
{
|
||||||
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
|
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE) |
|
||||||
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
|
SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
|
||||||
amdgpu_ring_write(ring, SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0));
|
amdgpu_ring_write(ring, SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE));
|
||||||
amdgpu_ring_write(ring, 1);
|
amdgpu_ring_write(ring, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1264,6 +1264,11 @@ static int sdma_v4_0_sw_fini(void *handle)
|
|||||||
for (i = 0; i < adev->sdma.num_instances; i++)
|
for (i = 0; i < adev->sdma.num_instances; i++)
|
||||||
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
|
amdgpu_ring_fini(&adev->sdma.instance[i].ring);
|
||||||
|
|
||||||
|
for (i = 0; i < adev->sdma.num_instances; i++) {
|
||||||
|
release_firmware(adev->sdma.instance[i].fw);
|
||||||
|
adev->sdma.instance[i].fw = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1714,8 +1719,13 @@ static void sdma_v4_0_set_buffer_funcs(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
|
static const struct amdgpu_vm_pte_funcs sdma_v4_0_vm_pte_funcs = {
|
||||||
|
.copy_pte_num_dw = 7,
|
||||||
.copy_pte = sdma_v4_0_vm_copy_pte,
|
.copy_pte = sdma_v4_0_vm_copy_pte,
|
||||||
|
|
||||||
.write_pte = sdma_v4_0_vm_write_pte,
|
.write_pte = sdma_v4_0_vm_write_pte,
|
||||||
|
|
||||||
|
.set_max_nums_pte_pde = 0x400000 >> 3,
|
||||||
|
.set_pte_pde_num_dw = 10,
|
||||||
.set_pte_pde = sdma_v4_0_vm_set_pte_pde,
|
.set_pte_pde = sdma_v4_0_vm_set_pte_pde,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -887,8 +887,13 @@ static void si_dma_set_buffer_funcs(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
|
static const struct amdgpu_vm_pte_funcs si_dma_vm_pte_funcs = {
|
||||||
|
.copy_pte_num_dw = 5,
|
||||||
.copy_pte = si_dma_vm_copy_pte,
|
.copy_pte = si_dma_vm_copy_pte,
|
||||||
|
|
||||||
.write_pte = si_dma_vm_write_pte,
|
.write_pte = si_dma_vm_write_pte,
|
||||||
|
|
||||||
|
.set_max_nums_pte_pde = 0xffff8 >> 3,
|
||||||
|
.set_pte_pde_num_dw = 9,
|
||||||
.set_pte_pde = si_dma_vm_set_pte_pde,
|
.set_pte_pde = si_dma_vm_set_pte_pde,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1847,7 +1847,6 @@ static int si_calculate_sclk_params(struct amdgpu_device *adev,
|
|||||||
|
|
||||||
static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev);
|
static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev);
|
||||||
static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
|
static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
|
||||||
static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev);
|
|
||||||
static void si_dpm_set_irq_funcs(struct amdgpu_device *adev);
|
static void si_dpm_set_irq_funcs(struct amdgpu_device *adev);
|
||||||
|
|
||||||
static struct si_power_info *si_get_pi(struct amdgpu_device *adev)
|
static struct si_power_info *si_get_pi(struct amdgpu_device *adev)
|
||||||
@ -3060,9 +3059,9 @@ static int si_get_vce_clock_voltage(struct amdgpu_device *adev,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool si_dpm_vblank_too_short(struct amdgpu_device *adev)
|
static bool si_dpm_vblank_too_short(void *handle)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
|
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
|
||||||
/* we never hit the non-gddr5 limit so disable it */
|
/* we never hit the non-gddr5 limit so disable it */
|
||||||
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
|
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
|
||||||
@ -3871,9 +3870,10 @@ static int si_restrict_performance_levels_before_switch(struct amdgpu_device *ad
|
|||||||
0 : -EINVAL;
|
0 : -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int si_dpm_force_performance_level(struct amdgpu_device *adev,
|
static int si_dpm_force_performance_level(void *handle,
|
||||||
enum amd_dpm_forced_level level)
|
enum amd_dpm_forced_level level)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct amdgpu_ps *rps = adev->pm.dpm.current_ps;
|
struct amdgpu_ps *rps = adev->pm.dpm.current_ps;
|
||||||
struct si_ps *ps = si_get_ps(rps);
|
struct si_ps *ps = si_get_ps(rps);
|
||||||
u32 levels = ps->performance_level_count;
|
u32 levels = ps->performance_level_count;
|
||||||
@ -6575,11 +6575,12 @@ static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int si_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
|
static int si_dpm_get_fan_speed_percent(void *handle,
|
||||||
u32 *speed)
|
u32 *speed)
|
||||||
{
|
{
|
||||||
u32 duty, duty100;
|
u32 duty, duty100;
|
||||||
u64 tmp64;
|
u64 tmp64;
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
if (adev->pm.no_fan)
|
if (adev->pm.no_fan)
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
@ -6600,9 +6601,10 @@ static int si_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int si_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
|
static int si_dpm_set_fan_speed_percent(void *handle,
|
||||||
u32 speed)
|
u32 speed)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct si_power_info *si_pi = si_get_pi(adev);
|
struct si_power_info *si_pi = si_get_pi(adev);
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
u32 duty, duty100;
|
u32 duty, duty100;
|
||||||
@ -6633,8 +6635,10 @@ static int si_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void si_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
|
static void si_dpm_set_fan_control_mode(void *handle, u32 mode)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
if (mode) {
|
if (mode) {
|
||||||
/* stop auto-manage */
|
/* stop auto-manage */
|
||||||
if (adev->pm.dpm.fan.ucode_fan_control)
|
if (adev->pm.dpm.fan.ucode_fan_control)
|
||||||
@ -6649,8 +6653,9 @@ static void si_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 si_dpm_get_fan_control_mode(struct amdgpu_device *adev)
|
static u32 si_dpm_get_fan_control_mode(void *handle)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct si_power_info *si_pi = si_get_pi(adev);
|
struct si_power_info *si_pi = si_get_pi(adev);
|
||||||
u32 tmp;
|
u32 tmp;
|
||||||
|
|
||||||
@ -6946,8 +6951,9 @@ static void si_dpm_disable(struct amdgpu_device *adev)
|
|||||||
ni_update_current_ps(adev, boot_ps);
|
ni_update_current_ps(adev, boot_ps);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int si_dpm_pre_set_power_state(struct amdgpu_device *adev)
|
static int si_dpm_pre_set_power_state(void *handle)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
|
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
|
||||||
struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
|
struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
|
||||||
struct amdgpu_ps *new_ps = &requested_ps;
|
struct amdgpu_ps *new_ps = &requested_ps;
|
||||||
@ -6984,8 +6990,9 @@ static int si_power_control_set_level(struct amdgpu_device *adev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int si_dpm_set_power_state(struct amdgpu_device *adev)
|
static int si_dpm_set_power_state(void *handle)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
|
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
|
||||||
struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
|
struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
|
||||||
struct amdgpu_ps *old_ps = &eg_pi->current_rps;
|
struct amdgpu_ps *old_ps = &eg_pi->current_rps;
|
||||||
@ -7086,8 +7093,9 @@ static int si_dpm_set_power_state(struct amdgpu_device *adev)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void si_dpm_post_set_power_state(struct amdgpu_device *adev)
|
static void si_dpm_post_set_power_state(void *handle)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
|
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
|
||||||
struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
|
struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
|
||||||
|
|
||||||
@ -7103,8 +7111,10 @@ void si_dpm_reset_asic(struct amdgpu_device *adev)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static void si_dpm_display_configuration_changed(struct amdgpu_device *adev)
|
static void si_dpm_display_configuration_changed(void *handle)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
si_program_display_gap(adev);
|
si_program_display_gap(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -7486,9 +7496,10 @@ static void si_dpm_fini(struct amdgpu_device *adev)
|
|||||||
amdgpu_free_extended_power_table(adev);
|
amdgpu_free_extended_power_table(adev);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void si_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
|
static void si_dpm_debugfs_print_current_performance_level(void *handle,
|
||||||
struct seq_file *m)
|
struct seq_file *m)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
|
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
|
||||||
struct amdgpu_ps *rps = &eg_pi->current_rps;
|
struct amdgpu_ps *rps = &eg_pi->current_rps;
|
||||||
struct si_ps *ps = si_get_ps(rps);
|
struct si_ps *ps = si_get_ps(rps);
|
||||||
@ -7860,10 +7871,11 @@ static int si_dpm_set_powergating_state(void *handle,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* get temperature in millidegrees */
|
/* get temperature in millidegrees */
|
||||||
static int si_dpm_get_temp(struct amdgpu_device *adev)
|
static int si_dpm_get_temp(void *handle)
|
||||||
{
|
{
|
||||||
u32 temp;
|
u32 temp;
|
||||||
int actual_temp = 0;
|
int actual_temp = 0;
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
|
temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
|
||||||
CTF_TEMP_SHIFT;
|
CTF_TEMP_SHIFT;
|
||||||
@ -7878,8 +7890,9 @@ static int si_dpm_get_temp(struct amdgpu_device *adev)
|
|||||||
return actual_temp;
|
return actual_temp;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 si_dpm_get_sclk(struct amdgpu_device *adev, bool low)
|
static u32 si_dpm_get_sclk(void *handle, bool low)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
|
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
|
||||||
struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
|
struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
|
||||||
|
|
||||||
@ -7889,8 +7902,9 @@ static u32 si_dpm_get_sclk(struct amdgpu_device *adev, bool low)
|
|||||||
return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
|
return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u32 si_dpm_get_mclk(struct amdgpu_device *adev, bool low)
|
static u32 si_dpm_get_mclk(void *handle, bool low)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
|
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
|
||||||
struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
|
struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
|
||||||
|
|
||||||
@ -7900,9 +7914,11 @@ static u32 si_dpm_get_mclk(struct amdgpu_device *adev, bool low)
|
|||||||
return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
|
return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void si_dpm_print_power_state(struct amdgpu_device *adev,
|
static void si_dpm_print_power_state(void *handle,
|
||||||
struct amdgpu_ps *rps)
|
void *current_ps)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps;
|
||||||
struct si_ps *ps = si_get_ps(rps);
|
struct si_ps *ps = si_get_ps(rps);
|
||||||
struct rv7xx_pl *pl;
|
struct rv7xx_pl *pl;
|
||||||
int i;
|
int i;
|
||||||
@ -7927,7 +7943,6 @@ static int si_dpm_early_init(void *handle)
|
|||||||
|
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
si_dpm_set_dpm_funcs(adev);
|
|
||||||
si_dpm_set_irq_funcs(adev);
|
si_dpm_set_irq_funcs(adev);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -7942,20 +7957,23 @@ static inline bool si_are_power_levels_equal(const struct rv7xx_pl *si_cpl1,
|
|||||||
(si_cpl1->vddci == si_cpl2->vddci));
|
(si_cpl1->vddci == si_cpl2->vddci));
|
||||||
}
|
}
|
||||||
|
|
||||||
static int si_check_state_equal(struct amdgpu_device *adev,
|
static int si_check_state_equal(void *handle,
|
||||||
struct amdgpu_ps *cps,
|
void *current_ps,
|
||||||
struct amdgpu_ps *rps,
|
void *request_ps,
|
||||||
bool *equal)
|
bool *equal)
|
||||||
{
|
{
|
||||||
struct si_ps *si_cps;
|
struct si_ps *si_cps;
|
||||||
struct si_ps *si_rps;
|
struct si_ps *si_rps;
|
||||||
int i;
|
int i;
|
||||||
|
struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
|
||||||
|
struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
|
|
||||||
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
|
if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
si_cps = si_get_ps(cps);
|
si_cps = si_get_ps((struct amdgpu_ps *)cps);
|
||||||
si_rps = si_get_ps(rps);
|
si_rps = si_get_ps((struct amdgpu_ps *)rps);
|
||||||
|
|
||||||
if (si_cps == NULL) {
|
if (si_cps == NULL) {
|
||||||
printk("si_cps is NULL\n");
|
printk("si_cps is NULL\n");
|
||||||
@ -7983,9 +8001,10 @@ static int si_check_state_equal(struct amdgpu_device *adev,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int si_dpm_read_sensor(struct amdgpu_device *adev, int idx,
|
static int si_dpm_read_sensor(void *handle, int idx,
|
||||||
void *value, int *size)
|
void *value, int *size)
|
||||||
{
|
{
|
||||||
|
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||||
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
|
struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
|
||||||
struct amdgpu_ps *rps = &eg_pi->current_rps;
|
struct amdgpu_ps *rps = &eg_pi->current_rps;
|
||||||
struct si_ps *ps = si_get_ps(rps);
|
struct si_ps *ps = si_get_ps(rps);
|
||||||
@ -8041,7 +8060,7 @@ const struct amd_ip_funcs si_dpm_ip_funcs = {
|
|||||||
.set_powergating_state = si_dpm_set_powergating_state,
|
.set_powergating_state = si_dpm_set_powergating_state,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct amdgpu_dpm_funcs si_dpm_funcs = {
|
const struct amd_pm_funcs si_dpm_funcs = {
|
||||||
.get_temperature = &si_dpm_get_temp,
|
.get_temperature = &si_dpm_get_temp,
|
||||||
.pre_set_power_state = &si_dpm_pre_set_power_state,
|
.pre_set_power_state = &si_dpm_pre_set_power_state,
|
||||||
.set_power_state = &si_dpm_set_power_state,
|
.set_power_state = &si_dpm_set_power_state,
|
||||||
@ -8062,12 +8081,6 @@ static const struct amdgpu_dpm_funcs si_dpm_funcs = {
|
|||||||
.read_sensor = &si_dpm_read_sensor,
|
.read_sensor = &si_dpm_read_sensor,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void si_dpm_set_dpm_funcs(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
if (adev->pm.funcs == NULL)
|
|
||||||
adev->pm.funcs = &si_dpm_funcs;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = {
|
static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = {
|
||||||
.set = si_dpm_set_interrupt_state,
|
.set = si_dpm_set_interrupt_state,
|
||||||
.process = si_dpm_process_interrupt,
|
.process = si_dpm_process_interrupt,
|
||||||
|
@ -246,6 +246,7 @@ enum si_display_gap
|
|||||||
};
|
};
|
||||||
|
|
||||||
extern const struct amd_ip_funcs si_dpm_ip_funcs;
|
extern const struct amd_ip_funcs si_dpm_ip_funcs;
|
||||||
|
extern const struct amd_pm_funcs si_dpm_funcs;
|
||||||
|
|
||||||
struct ni_leakage_coeffients
|
struct ni_leakage_coeffients
|
||||||
{
|
{
|
||||||
|
@ -118,6 +118,19 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev)
|
|||||||
return (wptr & adev->irq.ih.ptr_mask);
|
return (wptr & adev->irq.ih.ptr_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* si_ih_prescreen_iv - prescreen an interrupt vector
|
||||||
|
*
|
||||||
|
* @adev: amdgpu_device pointer
|
||||||
|
*
|
||||||
|
* Returns true if the interrupt vector should be further processed.
|
||||||
|
*/
|
||||||
|
static bool si_ih_prescreen_iv(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
/* Process all interrupts */
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
static void si_ih_decode_iv(struct amdgpu_device *adev,
|
static void si_ih_decode_iv(struct amdgpu_device *adev,
|
||||||
struct amdgpu_iv_entry *entry)
|
struct amdgpu_iv_entry *entry)
|
||||||
{
|
{
|
||||||
@ -288,6 +301,7 @@ static const struct amd_ip_funcs si_ih_ip_funcs = {
|
|||||||
|
|
||||||
static const struct amdgpu_ih_funcs si_ih_funcs = {
|
static const struct amdgpu_ih_funcs si_ih_funcs = {
|
||||||
.get_wptr = si_ih_get_wptr,
|
.get_wptr = si_ih_get_wptr,
|
||||||
|
.prescreen_iv = si_ih_prescreen_iv,
|
||||||
.decode_iv = si_ih_decode_iv,
|
.decode_iv = si_ih_decode_iv,
|
||||||
.set_rptr = si_ih_set_rptr
|
.set_rptr = si_ih_set_rptr
|
||||||
};
|
};
|
||||||
|
@ -407,18 +407,27 @@ static int soc15_read_register(struct amdgpu_device *adev, u32 se_num,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev)
|
static int soc15_asic_reset(struct amdgpu_device *adev)
|
||||||
{
|
{
|
||||||
u32 i;
|
u32 i;
|
||||||
|
|
||||||
dev_info(adev->dev, "GPU pci config reset\n");
|
amdgpu_atombios_scratch_regs_engine_hung(adev, true);
|
||||||
|
|
||||||
|
dev_info(adev->dev, "GPU reset\n");
|
||||||
|
|
||||||
/* disable BM */
|
/* disable BM */
|
||||||
pci_clear_master(adev->pdev);
|
pci_clear_master(adev->pdev);
|
||||||
/* reset */
|
|
||||||
amdgpu_pci_config_reset(adev);
|
|
||||||
|
|
||||||
udelay(100);
|
pci_save_state(adev->pdev);
|
||||||
|
|
||||||
|
for (i = 0; i < AMDGPU_MAX_IP_NUM; i++) {
|
||||||
|
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP){
|
||||||
|
adev->ip_blocks[i].version->funcs->soft_reset((void *)adev);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pci_restore_state(adev->pdev);
|
||||||
|
|
||||||
/* wait for asic to come out of reset */
|
/* wait for asic to come out of reset */
|
||||||
for (i = 0; i < adev->usec_timeout; i++) {
|
for (i = 0; i < adev->usec_timeout; i++) {
|
||||||
@ -430,14 +439,6 @@ static void soc15_gpu_pci_config_reset(struct amdgpu_device *adev)
|
|||||||
udelay(1);
|
udelay(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
static int soc15_asic_reset(struct amdgpu_device *adev)
|
|
||||||
{
|
|
||||||
amdgpu_atombios_scratch_regs_engine_hung(adev, true);
|
|
||||||
|
|
||||||
soc15_gpu_pci_config_reset(adev);
|
|
||||||
|
|
||||||
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
|
amdgpu_atombios_scratch_regs_engine_hung(adev, false);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -218,6 +218,19 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev)
|
|||||||
return (wptr & adev->irq.ih.ptr_mask);
|
return (wptr & adev->irq.ih.ptr_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* tonga_ih_prescreen_iv - prescreen an interrupt vector
|
||||||
|
*
|
||||||
|
* @adev: amdgpu_device pointer
|
||||||
|
*
|
||||||
|
* Returns true if the interrupt vector should be further processed.
|
||||||
|
*/
|
||||||
|
static bool tonga_ih_prescreen_iv(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
/* Process all interrupts */
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* tonga_ih_decode_iv - decode an interrupt vector
|
* tonga_ih_decode_iv - decode an interrupt vector
|
||||||
*
|
*
|
||||||
@ -478,6 +491,7 @@ static const struct amd_ip_funcs tonga_ih_ip_funcs = {
|
|||||||
|
|
||||||
static const struct amdgpu_ih_funcs tonga_ih_funcs = {
|
static const struct amdgpu_ih_funcs tonga_ih_funcs = {
|
||||||
.get_wptr = tonga_ih_get_wptr,
|
.get_wptr = tonga_ih_get_wptr,
|
||||||
|
.prescreen_iv = tonga_ih_prescreen_iv,
|
||||||
.decode_iv = tonga_ih_decode_iv,
|
.decode_iv = tonga_ih_decode_iv,
|
||||||
.set_rptr = tonga_ih_set_rptr
|
.set_rptr = tonga_ih_set_rptr
|
||||||
};
|
};
|
||||||
|
@ -1161,7 +1161,7 @@ static void uvd_v7_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
|
|||||||
*/
|
*/
|
||||||
static void uvd_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
|
static void uvd_v7_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
|
||||||
{
|
{
|
||||||
amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 0));
|
amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0));
|
||||||
amdgpu_ring_write(ring, 1);
|
amdgpu_ring_write(ring, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1011,10 +1011,6 @@ static int vce_v4_0_process_interrupt(struct amdgpu_device *adev,
|
|||||||
{
|
{
|
||||||
DRM_DEBUG("IH: VCE\n");
|
DRM_DEBUG("IH: VCE\n");
|
||||||
|
|
||||||
WREG32_P(SOC15_REG_OFFSET(VCE, 0, mmVCE_SYS_INT_STATUS),
|
|
||||||
VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK,
|
|
||||||
~VCE_SYS_INT_STATUS__VCE_SYS_INT_TRAP_INTERRUPT_INT_MASK);
|
|
||||||
|
|
||||||
switch (entry->src_data[0]) {
|
switch (entry->src_data[0]) {
|
||||||
case 0:
|
case 0:
|
||||||
case 1:
|
case 1:
|
||||||
|
@ -812,7 +812,7 @@ static void vcn_v1_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64
|
|||||||
*/
|
*/
|
||||||
static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
|
static void vcn_v1_0_dec_ring_emit_hdp_invalidate(struct amdgpu_ring *ring)
|
||||||
{
|
{
|
||||||
amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_DEBUG0), 0));
|
amdgpu_ring_write(ring, PACKET0(SOC15_REG_OFFSET(HDP, 0, mmHDP_READ_CACHE_INVALIDATE), 0));
|
||||||
amdgpu_ring_write(ring, 1);
|
amdgpu_ring_write(ring, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -219,13 +219,91 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev)
|
|||||||
wptr, adev->irq.ih.rptr, tmp);
|
wptr, adev->irq.ih.rptr, tmp);
|
||||||
adev->irq.ih.rptr = tmp;
|
adev->irq.ih.rptr = tmp;
|
||||||
|
|
||||||
tmp = RREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
|
tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL));
|
||||||
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
|
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
|
||||||
WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), tmp);
|
WREG32_NO_KIQ(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_CNTL), tmp);
|
||||||
}
|
}
|
||||||
return (wptr & adev->irq.ih.ptr_mask);
|
return (wptr & adev->irq.ih.ptr_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* vega10_ih_prescreen_iv - prescreen an interrupt vector
|
||||||
|
*
|
||||||
|
* @adev: amdgpu_device pointer
|
||||||
|
*
|
||||||
|
* Returns true if the interrupt vector should be further processed.
|
||||||
|
*/
|
||||||
|
static bool vega10_ih_prescreen_iv(struct amdgpu_device *adev)
|
||||||
|
{
|
||||||
|
u32 ring_index = adev->irq.ih.rptr >> 2;
|
||||||
|
u32 dw0, dw3, dw4, dw5;
|
||||||
|
u16 pasid;
|
||||||
|
u64 addr, key;
|
||||||
|
struct amdgpu_vm *vm;
|
||||||
|
int r;
|
||||||
|
|
||||||
|
dw0 = le32_to_cpu(adev->irq.ih.ring[ring_index + 0]);
|
||||||
|
dw3 = le32_to_cpu(adev->irq.ih.ring[ring_index + 3]);
|
||||||
|
dw4 = le32_to_cpu(adev->irq.ih.ring[ring_index + 4]);
|
||||||
|
dw5 = le32_to_cpu(adev->irq.ih.ring[ring_index + 5]);
|
||||||
|
|
||||||
|
/* Filter retry page faults, let only the first one pass. If
|
||||||
|
* there are too many outstanding faults, ignore them until
|
||||||
|
* some faults get cleared.
|
||||||
|
*/
|
||||||
|
switch (dw0 & 0xff) {
|
||||||
|
case AMDGPU_IH_CLIENTID_VMC:
|
||||||
|
case AMDGPU_IH_CLIENTID_UTCL2:
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
/* Not a VM fault */
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Not a retry fault */
|
||||||
|
if (!(dw5 & 0x80))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
pasid = dw3 & 0xffff;
|
||||||
|
/* No PASID, can't identify faulting process */
|
||||||
|
if (!pasid)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
addr = ((u64)(dw5 & 0xf) << 44) | ((u64)dw4 << 12);
|
||||||
|
key = AMDGPU_VM_FAULT(pasid, addr);
|
||||||
|
r = amdgpu_ih_add_fault(adev, key);
|
||||||
|
|
||||||
|
/* Hash table is full or the fault is already being processed,
|
||||||
|
* ignore further page faults
|
||||||
|
*/
|
||||||
|
if (r != 0)
|
||||||
|
goto ignore_iv;
|
||||||
|
|
||||||
|
/* Track retry faults in per-VM fault FIFO. */
|
||||||
|
spin_lock(&adev->vm_manager.pasid_lock);
|
||||||
|
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
|
||||||
|
spin_unlock(&adev->vm_manager.pasid_lock);
|
||||||
|
if (WARN_ON_ONCE(!vm)) {
|
||||||
|
/* VM not found, process it normally */
|
||||||
|
amdgpu_ih_clear_fault(adev, key);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
/* No locking required with single writer and single reader */
|
||||||
|
r = kfifo_put(&vm->faults, key);
|
||||||
|
if (!r) {
|
||||||
|
/* FIFO is full. Ignore it until there is space */
|
||||||
|
amdgpu_ih_clear_fault(adev, key);
|
||||||
|
goto ignore_iv;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* It's the first fault for this address, process it normally */
|
||||||
|
return true;
|
||||||
|
|
||||||
|
ignore_iv:
|
||||||
|
adev->irq.ih.rptr += 32;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* vega10_ih_decode_iv - decode an interrupt vector
|
* vega10_ih_decode_iv - decode an interrupt vector
|
||||||
*
|
*
|
||||||
@ -310,6 +388,14 @@ static int vega10_ih_sw_init(void *handle)
|
|||||||
adev->irq.ih.use_doorbell = true;
|
adev->irq.ih.use_doorbell = true;
|
||||||
adev->irq.ih.doorbell_index = AMDGPU_DOORBELL64_IH << 1;
|
adev->irq.ih.doorbell_index = AMDGPU_DOORBELL64_IH << 1;
|
||||||
|
|
||||||
|
adev->irq.ih.faults = kmalloc(sizeof(*adev->irq.ih.faults), GFP_KERNEL);
|
||||||
|
if (!adev->irq.ih.faults)
|
||||||
|
return -ENOMEM;
|
||||||
|
INIT_CHASH_TABLE(adev->irq.ih.faults->hash,
|
||||||
|
AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
|
||||||
|
spin_lock_init(&adev->irq.ih.faults->lock);
|
||||||
|
adev->irq.ih.faults->count = 0;
|
||||||
|
|
||||||
r = amdgpu_irq_init(adev);
|
r = amdgpu_irq_init(adev);
|
||||||
|
|
||||||
return r;
|
return r;
|
||||||
@ -322,6 +408,9 @@ static int vega10_ih_sw_fini(void *handle)
|
|||||||
amdgpu_irq_fini(adev);
|
amdgpu_irq_fini(adev);
|
||||||
amdgpu_ih_ring_fini(adev);
|
amdgpu_ih_ring_fini(adev);
|
||||||
|
|
||||||
|
kfree(adev->irq.ih.faults);
|
||||||
|
adev->irq.ih.faults = NULL;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -410,6 +499,7 @@ const struct amd_ip_funcs vega10_ih_ip_funcs = {
|
|||||||
|
|
||||||
static const struct amdgpu_ih_funcs vega10_ih_funcs = {
|
static const struct amdgpu_ih_funcs vega10_ih_funcs = {
|
||||||
.get_wptr = vega10_ih_get_wptr,
|
.get_wptr = vega10_ih_get_wptr,
|
||||||
|
.prescreen_iv = vega10_ih_prescreen_iv,
|
||||||
.decode_iv = vega10_ih_decode_iv,
|
.decode_iv = vega10_ih_decode_iv,
|
||||||
.set_rptr = vega10_ih_set_rptr
|
.set_rptr = vega10_ih_set_rptr
|
||||||
};
|
};
|
||||||
|
@ -168,13 +168,6 @@ static bool device_iommu_pasid_init(struct kfd_dev *kfd)
|
|||||||
pasid_limit = min_t(unsigned int,
|
pasid_limit = min_t(unsigned int,
|
||||||
(unsigned int)(1 << kfd->device_info->max_pasid_bits),
|
(unsigned int)(1 << kfd->device_info->max_pasid_bits),
|
||||||
iommu_info.max_pasids);
|
iommu_info.max_pasids);
|
||||||
/*
|
|
||||||
* last pasid is used for kernel queues doorbells
|
|
||||||
* in the future the last pasid might be used for a kernel thread.
|
|
||||||
*/
|
|
||||||
pasid_limit = min_t(unsigned int,
|
|
||||||
pasid_limit,
|
|
||||||
kfd->doorbell_process_limit - 1);
|
|
||||||
|
|
||||||
err = amd_iommu_init_device(kfd->pdev, pasid_limit);
|
err = amd_iommu_init_device(kfd->pdev, pasid_limit);
|
||||||
if (err < 0) {
|
if (err < 0) {
|
||||||
|
@ -24,16 +24,15 @@
|
|||||||
#include <linux/mman.h>
|
#include <linux/mman.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
|
#include <linux/idr.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This extension supports a kernel level doorbells management for
|
* This extension supports a kernel level doorbells management for the
|
||||||
* the kernel queues.
|
* kernel queues using the first doorbell page reserved for the kernel.
|
||||||
* Basically the last doorbells page is devoted to kernel queues
|
|
||||||
* and that's assures that any user process won't get access to the
|
|
||||||
* kernel doorbells page
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define KERNEL_DOORBELL_PASID 1
|
static DEFINE_IDA(doorbell_ida);
|
||||||
|
static unsigned int max_doorbell_slices;
|
||||||
#define KFD_SIZE_OF_DOORBELL_IN_BYTES 4
|
#define KFD_SIZE_OF_DOORBELL_IN_BYTES 4
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -84,13 +83,16 @@ int kfd_doorbell_init(struct kfd_dev *kfd)
|
|||||||
(doorbell_aperture_size - doorbell_start_offset) /
|
(doorbell_aperture_size - doorbell_start_offset) /
|
||||||
doorbell_process_allocation();
|
doorbell_process_allocation();
|
||||||
else
|
else
|
||||||
doorbell_process_limit = 0;
|
return -ENOSPC;
|
||||||
|
|
||||||
|
if (!max_doorbell_slices ||
|
||||||
|
doorbell_process_limit < max_doorbell_slices)
|
||||||
|
max_doorbell_slices = doorbell_process_limit;
|
||||||
|
|
||||||
kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address +
|
kfd->doorbell_base = kfd->shared_resources.doorbell_physical_address +
|
||||||
doorbell_start_offset;
|
doorbell_start_offset;
|
||||||
|
|
||||||
kfd->doorbell_id_offset = doorbell_start_offset / sizeof(u32);
|
kfd->doorbell_id_offset = doorbell_start_offset / sizeof(u32);
|
||||||
kfd->doorbell_process_limit = doorbell_process_limit - 1;
|
|
||||||
|
|
||||||
kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base,
|
kfd->doorbell_kernel_ptr = ioremap(kfd->doorbell_base,
|
||||||
doorbell_process_allocation());
|
doorbell_process_allocation());
|
||||||
@ -185,11 +187,10 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
|
|||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Calculating the kernel doorbell offset using "faked" kernel
|
* Calculating the kernel doorbell offset using the first
|
||||||
* pasid that allocated for kernel queues only
|
* doorbell page.
|
||||||
*/
|
*/
|
||||||
*doorbell_off = KERNEL_DOORBELL_PASID * (doorbell_process_allocation() /
|
*doorbell_off = kfd->doorbell_id_offset + inx;
|
||||||
sizeof(u32)) + inx;
|
|
||||||
|
|
||||||
pr_debug("Get kernel queue doorbell\n"
|
pr_debug("Get kernel queue doorbell\n"
|
||||||
" doorbell offset == 0x%08X\n"
|
" doorbell offset == 0x%08X\n"
|
||||||
@ -228,11 +229,12 @@ unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
|
|||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
* doorbell_id_offset accounts for doorbells taken by KGD.
|
* doorbell_id_offset accounts for doorbells taken by KGD.
|
||||||
* pasid * doorbell_process_allocation/sizeof(u32) adjusts
|
* index * doorbell_process_allocation/sizeof(u32) adjusts to
|
||||||
* to the process's doorbells
|
* the process's doorbells.
|
||||||
*/
|
*/
|
||||||
return kfd->doorbell_id_offset +
|
return kfd->doorbell_id_offset +
|
||||||
process->pasid * (doorbell_process_allocation()/sizeof(u32)) +
|
process->doorbell_index
|
||||||
|
* doorbell_process_allocation() / sizeof(u32) +
|
||||||
queue_id;
|
queue_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -250,5 +252,21 @@ phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
|
|||||||
struct kfd_process *process)
|
struct kfd_process *process)
|
||||||
{
|
{
|
||||||
return dev->doorbell_base +
|
return dev->doorbell_base +
|
||||||
process->pasid * doorbell_process_allocation();
|
process->doorbell_index * doorbell_process_allocation();
|
||||||
|
}
|
||||||
|
|
||||||
|
int kfd_alloc_process_doorbells(struct kfd_process *process)
|
||||||
|
{
|
||||||
|
int r = ida_simple_get(&doorbell_ida, 1, max_doorbell_slices,
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (r > 0)
|
||||||
|
process->doorbell_index = r;
|
||||||
|
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
void kfd_free_process_doorbells(struct kfd_process *process)
|
||||||
|
{
|
||||||
|
if (process->doorbell_index)
|
||||||
|
ida_simple_remove(&doorbell_ida, process->doorbell_index);
|
||||||
}
|
}
|
||||||
|
@ -103,10 +103,6 @@ static int __init kfd_module_init(void)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = kfd_pasid_init();
|
|
||||||
if (err < 0)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
err = kfd_chardev_init();
|
err = kfd_chardev_init();
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
goto err_ioctl;
|
goto err_ioctl;
|
||||||
@ -126,7 +122,6 @@ static int __init kfd_module_init(void)
|
|||||||
err_topology:
|
err_topology:
|
||||||
kfd_chardev_exit();
|
kfd_chardev_exit();
|
||||||
err_ioctl:
|
err_ioctl:
|
||||||
kfd_pasid_exit();
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -137,7 +132,6 @@ static void __exit kfd_module_exit(void)
|
|||||||
kfd_process_destroy_wq();
|
kfd_process_destroy_wq();
|
||||||
kfd_topology_shutdown();
|
kfd_topology_shutdown();
|
||||||
kfd_chardev_exit();
|
kfd_chardev_exit();
|
||||||
kfd_pasid_exit();
|
|
||||||
dev_info(kfd_device, "Removed module\n");
|
dev_info(kfd_device, "Removed module\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -20,78 +20,64 @@
|
|||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/slab.h>
|
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include "kfd_priv.h"
|
#include "kfd_priv.h"
|
||||||
|
|
||||||
static unsigned long *pasid_bitmap;
|
static unsigned int pasid_bits = 16;
|
||||||
static unsigned int pasid_limit;
|
static const struct kfd2kgd_calls *kfd2kgd;
|
||||||
static DEFINE_MUTEX(pasid_mutex);
|
|
||||||
|
|
||||||
int kfd_pasid_init(void)
|
|
||||||
{
|
|
||||||
pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
|
|
||||||
|
|
||||||
pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long),
|
|
||||||
GFP_KERNEL);
|
|
||||||
if (!pasid_bitmap)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
set_bit(0, pasid_bitmap); /* PASID 0 is reserved. */
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void kfd_pasid_exit(void)
|
|
||||||
{
|
|
||||||
kfree(pasid_bitmap);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool kfd_set_pasid_limit(unsigned int new_limit)
|
bool kfd_set_pasid_limit(unsigned int new_limit)
|
||||||
{
|
{
|
||||||
if (new_limit < pasid_limit) {
|
if (new_limit < 2)
|
||||||
bool ok;
|
return false;
|
||||||
|
|
||||||
mutex_lock(&pasid_mutex);
|
if (new_limit < (1U << pasid_bits)) {
|
||||||
|
if (kfd2kgd)
|
||||||
|
/* We've already allocated user PASIDs, too late to
|
||||||
|
* change the limit
|
||||||
|
*/
|
||||||
|
return false;
|
||||||
|
|
||||||
/* ensure that no pasids >= new_limit are in-use */
|
while (new_limit < (1U << pasid_bits))
|
||||||
ok = (find_next_bit(pasid_bitmap, pasid_limit, new_limit) ==
|
pasid_bits--;
|
||||||
pasid_limit);
|
|
||||||
if (ok)
|
|
||||||
pasid_limit = new_limit;
|
|
||||||
|
|
||||||
mutex_unlock(&pasid_mutex);
|
|
||||||
|
|
||||||
return ok;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline unsigned int kfd_get_pasid_limit(void)
|
unsigned int kfd_get_pasid_limit(void)
|
||||||
{
|
{
|
||||||
return pasid_limit;
|
return 1U << pasid_bits;
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned int kfd_pasid_alloc(void)
|
unsigned int kfd_pasid_alloc(void)
|
||||||
{
|
{
|
||||||
unsigned int found;
|
int r;
|
||||||
|
|
||||||
mutex_lock(&pasid_mutex);
|
/* Find the first best KFD device for calling KGD */
|
||||||
|
if (!kfd2kgd) {
|
||||||
|
struct kfd_dev *dev = NULL;
|
||||||
|
unsigned int i = 0;
|
||||||
|
|
||||||
found = find_first_zero_bit(pasid_bitmap, pasid_limit);
|
while ((dev = kfd_topology_enum_kfd_devices(i)) != NULL) {
|
||||||
if (found == pasid_limit)
|
if (dev && dev->kfd2kgd) {
|
||||||
found = 0;
|
kfd2kgd = dev->kfd2kgd;
|
||||||
else
|
break;
|
||||||
set_bit(found, pasid_bitmap);
|
}
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
|
||||||
mutex_unlock(&pasid_mutex);
|
if (!kfd2kgd)
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
return found;
|
r = kfd2kgd->alloc_pasid(pasid_bits);
|
||||||
|
|
||||||
|
return r > 0 ? r : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void kfd_pasid_free(unsigned int pasid)
|
void kfd_pasid_free(unsigned int pasid)
|
||||||
{
|
{
|
||||||
if (!WARN_ON(pasid == 0 || pasid >= pasid_limit))
|
if (kfd2kgd)
|
||||||
clear_bit(pasid, pasid_bitmap);
|
kfd2kgd->free_pasid(pasid);
|
||||||
}
|
}
|
||||||
|
@ -157,9 +157,6 @@ struct kfd_dev {
|
|||||||
* to HW doorbell, GFX reserved some
|
* to HW doorbell, GFX reserved some
|
||||||
* at the start)
|
* at the start)
|
||||||
*/
|
*/
|
||||||
size_t doorbell_process_limit; /* Number of processes we have doorbell
|
|
||||||
* space for.
|
|
||||||
*/
|
|
||||||
u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
|
u32 __iomem *doorbell_kernel_ptr; /* This is a pointer for a doorbells
|
||||||
* page used by kernel queue
|
* page used by kernel queue
|
||||||
*/
|
*/
|
||||||
@ -495,6 +492,7 @@ struct kfd_process {
|
|||||||
struct rcu_head rcu;
|
struct rcu_head rcu;
|
||||||
|
|
||||||
unsigned int pasid;
|
unsigned int pasid;
|
||||||
|
unsigned int doorbell_index;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* List of kfd_process_device structures,
|
* List of kfd_process_device structures,
|
||||||
@ -583,6 +581,10 @@ void write_kernel_doorbell(u32 __iomem *db, u32 value);
|
|||||||
unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
|
unsigned int kfd_queue_id_to_doorbell(struct kfd_dev *kfd,
|
||||||
struct kfd_process *process,
|
struct kfd_process *process,
|
||||||
unsigned int queue_id);
|
unsigned int queue_id);
|
||||||
|
phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
|
||||||
|
struct kfd_process *process);
|
||||||
|
int kfd_alloc_process_doorbells(struct kfd_process *process);
|
||||||
|
void kfd_free_process_doorbells(struct kfd_process *process);
|
||||||
|
|
||||||
/* GTT Sub-Allocator */
|
/* GTT Sub-Allocator */
|
||||||
|
|
||||||
@ -694,8 +696,6 @@ int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
|
|||||||
void pm_release_ib(struct packet_manager *pm);
|
void pm_release_ib(struct packet_manager *pm);
|
||||||
|
|
||||||
uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
|
uint64_t kfd_get_number_elems(struct kfd_dev *kfd);
|
||||||
phys_addr_t kfd_get_process_doorbells(struct kfd_dev *dev,
|
|
||||||
struct kfd_process *process);
|
|
||||||
|
|
||||||
/* Events */
|
/* Events */
|
||||||
extern const struct kfd_event_interrupt_class event_interrupt_class_cik;
|
extern const struct kfd_event_interrupt_class event_interrupt_class_cik;
|
||||||
|
@ -183,6 +183,7 @@ static void kfd_process_wq_release(struct work_struct *work)
|
|||||||
kfd_event_free_process(p);
|
kfd_event_free_process(p);
|
||||||
|
|
||||||
kfd_pasid_free(p->pasid);
|
kfd_pasid_free(p->pasid);
|
||||||
|
kfd_free_process_doorbells(p);
|
||||||
|
|
||||||
mutex_unlock(&p->mutex);
|
mutex_unlock(&p->mutex);
|
||||||
|
|
||||||
@ -288,6 +289,9 @@ static struct kfd_process *create_process(const struct task_struct *thread)
|
|||||||
if (process->pasid == 0)
|
if (process->pasid == 0)
|
||||||
goto err_alloc_pasid;
|
goto err_alloc_pasid;
|
||||||
|
|
||||||
|
if (kfd_alloc_process_doorbells(process) < 0)
|
||||||
|
goto err_alloc_doorbells;
|
||||||
|
|
||||||
mutex_init(&process->mutex);
|
mutex_init(&process->mutex);
|
||||||
|
|
||||||
process->mm = thread->mm;
|
process->mm = thread->mm;
|
||||||
@ -329,6 +333,8 @@ err_process_pqm_init:
|
|||||||
mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
|
mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm);
|
||||||
err_mmu_notifier:
|
err_mmu_notifier:
|
||||||
mutex_destroy(&process->mutex);
|
mutex_destroy(&process->mutex);
|
||||||
|
kfd_free_process_doorbells(process);
|
||||||
|
err_alloc_doorbells:
|
||||||
kfd_pasid_free(process->pasid);
|
kfd_pasid_free(process->pasid);
|
||||||
err_alloc_pasid:
|
err_alloc_pasid:
|
||||||
kfree(process->queues);
|
kfree(process->queues);
|
||||||
|
@ -25,6 +25,8 @@
|
|||||||
|
|
||||||
#define AMD_MAX_USEC_TIMEOUT 200000 /* 200 ms */
|
#define AMD_MAX_USEC_TIMEOUT 200000 /* 200 ms */
|
||||||
|
|
||||||
|
struct seq_file;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Supported ASIC types
|
* Supported ASIC types
|
||||||
*/
|
*/
|
||||||
@ -144,6 +146,12 @@ enum amd_fan_ctrl_mode {
|
|||||||
AMD_FAN_CTRL_AUTO = 2,
|
AMD_FAN_CTRL_AUTO = 2,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum pp_clock_type {
|
||||||
|
PP_SCLK,
|
||||||
|
PP_MCLK,
|
||||||
|
PP_PCIE,
|
||||||
|
};
|
||||||
|
|
||||||
/* CG flags */
|
/* CG flags */
|
||||||
#define AMD_CG_SUPPORT_GFX_MGCG (1 << 0)
|
#define AMD_CG_SUPPORT_GFX_MGCG (1 << 0)
|
||||||
#define AMD_CG_SUPPORT_GFX_MGLS (1 << 1)
|
#define AMD_CG_SUPPORT_GFX_MGLS (1 << 1)
|
||||||
@ -249,4 +257,62 @@ struct amd_ip_funcs {
|
|||||||
void (*get_clockgating_state)(void *handle, u32 *flags);
|
void (*get_clockgating_state)(void *handle, u32 *flags);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum amd_pp_task;
|
||||||
|
|
||||||
|
struct pp_states_info;
|
||||||
|
|
||||||
|
struct amd_pm_funcs {
|
||||||
|
int (*get_temperature)(void *handle);
|
||||||
|
int (*pre_set_power_state)(void *handle);
|
||||||
|
int (*set_power_state)(void *handle);
|
||||||
|
void (*post_set_power_state)(void *handle);
|
||||||
|
void (*display_configuration_changed)(void *handle);
|
||||||
|
u32 (*get_sclk)(void *handle, bool low);
|
||||||
|
u32 (*get_mclk)(void *handle, bool low);
|
||||||
|
void (*print_power_state)(void *handle, void *ps);
|
||||||
|
void (*debugfs_print_current_performance_level)(void *handle, struct seq_file *m);
|
||||||
|
int (*force_performance_level)(void *handle, enum amd_dpm_forced_level level);
|
||||||
|
bool (*vblank_too_short)(void *handle);
|
||||||
|
void (*powergate_uvd)(void *handle, bool gate);
|
||||||
|
void (*powergate_vce)(void *handle, bool gate);
|
||||||
|
void (*enable_bapm)(void *handle, bool enable);
|
||||||
|
void (*set_fan_control_mode)(void *handle, u32 mode);
|
||||||
|
u32 (*get_fan_control_mode)(void *handle);
|
||||||
|
int (*set_fan_speed_percent)(void *handle, u32 speed);
|
||||||
|
int (*get_fan_speed_percent)(void *handle, u32 *speed);
|
||||||
|
int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask);
|
||||||
|
int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf);
|
||||||
|
int (*get_sclk_od)(void *handle);
|
||||||
|
int (*set_sclk_od)(void *handle, uint32_t value);
|
||||||
|
int (*get_mclk_od)(void *handle);
|
||||||
|
int (*set_mclk_od)(void *handle, uint32_t value);
|
||||||
|
int (*check_state_equal)(void *handle,
|
||||||
|
void *cps,
|
||||||
|
void *rps,
|
||||||
|
bool *equal);
|
||||||
|
int (*read_sensor)(void *handle, int idx, void *value,
|
||||||
|
int *size);
|
||||||
|
|
||||||
|
struct amd_vce_state* (*get_vce_clock_state)(void *handle, u32 idx);
|
||||||
|
int (*reset_power_profile_state)(void *handle,
|
||||||
|
struct amd_pp_profile *request);
|
||||||
|
int (*get_power_profile_state)(void *handle,
|
||||||
|
struct amd_pp_profile *query);
|
||||||
|
int (*set_power_profile_state)(void *handle,
|
||||||
|
struct amd_pp_profile *request);
|
||||||
|
int (*switch_power_profile)(void *handle,
|
||||||
|
enum amd_pp_profile_type type);
|
||||||
|
int (*load_firmware)(void *handle);
|
||||||
|
int (*wait_for_fw_loading_complete)(void *handle);
|
||||||
|
enum amd_dpm_forced_level (*get_performance_level)(void *handle);
|
||||||
|
enum amd_pm_state_type (*get_current_power_state)(void *handle);
|
||||||
|
int (*dispatch_tasks)(void *handle, enum amd_pp_task task_id,
|
||||||
|
void *input, void *output);
|
||||||
|
int (*get_fan_speed_rpm)(void *handle, uint32_t *rpm);
|
||||||
|
int (*get_pp_num_states)(void *handle, struct pp_states_info *data);
|
||||||
|
int (*get_pp_table)(void *handle, char **table);
|
||||||
|
int (*set_pp_table)(void *handle, const char *buf, size_t size);
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
#endif /* __AMD_SHARED_H__ */
|
#endif /* __AMD_SHARED_H__ */
|
||||||
|
@ -5454,5 +5454,7 @@
|
|||||||
#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0
|
#define ROM_SW_DATA_64__ROM_SW_DATA__SHIFT 0x0
|
||||||
#define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002
|
#define CURRENT_PG_STATUS__VCE_PG_STATUS_MASK 0x00000002
|
||||||
#define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004
|
#define CURRENT_PG_STATUS__UVD_PG_STATUS_MASK 0x00000004
|
||||||
|
#define SMC_SYSCON_MISC_CNTL__pre_fetcher_en_MASK 0x1
|
||||||
|
#define SMC_SYSCON_MISC_CNTL__pre_fetcher_en__SHIFT 0
|
||||||
|
|
||||||
#endif /* SMU_7_0_1_SH_MASK_H */
|
#endif /* SMU_7_0_1_SH_MASK_H */
|
||||||
|
@ -1017,6 +1017,19 @@ struct atom_14nm_combphy_tmds_vs_set
|
|||||||
uint8_t margin_deemph_lane0__deemph_sel_val;
|
uint8_t margin_deemph_lane0__deemph_sel_val;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct atom_i2c_reg_info {
|
||||||
|
uint8_t ucI2cRegIndex;
|
||||||
|
uint8_t ucI2cRegVal;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct atom_hdmi_retimer_redriver_set {
|
||||||
|
uint8_t HdmiSlvAddr;
|
||||||
|
uint8_t HdmiRegNum;
|
||||||
|
uint8_t Hdmi6GRegNum;
|
||||||
|
struct atom_i2c_reg_info HdmiRegSetting[9]; //For non 6G Hz use
|
||||||
|
struct atom_i2c_reg_info Hdmi6GhzRegSetting[3]; //For 6G Hz use.
|
||||||
|
};
|
||||||
|
|
||||||
struct atom_integrated_system_info_v1_11
|
struct atom_integrated_system_info_v1_11
|
||||||
{
|
{
|
||||||
struct atom_common_table_header table_header;
|
struct atom_common_table_header table_header;
|
||||||
@ -1052,7 +1065,11 @@ struct atom_integrated_system_info_v1_11
|
|||||||
struct atom_14nm_dpphy_dp_tuningset dp_tuningset;
|
struct atom_14nm_dpphy_dp_tuningset dp_tuningset;
|
||||||
struct atom_14nm_dpphy_dp_tuningset dp_hbr3_tuningset;
|
struct atom_14nm_dpphy_dp_tuningset dp_hbr3_tuningset;
|
||||||
struct atom_camera_data camera_info;
|
struct atom_camera_data camera_info;
|
||||||
uint32_t reserved[138];
|
struct atom_hdmi_retimer_redriver_set dp0_retimer_set; //for DP0
|
||||||
|
struct atom_hdmi_retimer_redriver_set dp1_retimer_set; //for DP1
|
||||||
|
struct atom_hdmi_retimer_redriver_set dp2_retimer_set; //for DP2
|
||||||
|
struct atom_hdmi_retimer_redriver_set dp3_retimer_set; //for DP3
|
||||||
|
uint32_t reserved[108];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -100,6 +100,7 @@ enum cgs_system_info_id {
|
|||||||
CGS_SYSTEM_INFO_GFX_SE_INFO,
|
CGS_SYSTEM_INFO_GFX_SE_INFO,
|
||||||
CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID,
|
CGS_SYSTEM_INFO_PCIE_SUB_SYS_ID,
|
||||||
CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID,
|
CGS_SYSTEM_INFO_PCIE_SUB_SYS_VENDOR_ID,
|
||||||
|
CGS_SYSTEM_INFO_PCIE_BUS_DEVFN,
|
||||||
CGS_SYSTEM_INFO_ID_MAXIMUM,
|
CGS_SYSTEM_INFO_ID_MAXIMUM,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -193,8 +194,6 @@ struct cgs_acpi_method_info {
|
|||||||
* @type: memory type
|
* @type: memory type
|
||||||
* @size: size in bytes
|
* @size: size in bytes
|
||||||
* @align: alignment in bytes
|
* @align: alignment in bytes
|
||||||
* @min_offset: minimum offset from start of heap
|
|
||||||
* @max_offset: maximum offset from start of heap
|
|
||||||
* @handle: memory handle (output)
|
* @handle: memory handle (output)
|
||||||
*
|
*
|
||||||
* The memory types CGS_GPU_MEM_TYPE_*_CONTIG_FB force contiguous
|
* The memory types CGS_GPU_MEM_TYPE_*_CONTIG_FB force contiguous
|
||||||
@ -216,7 +215,6 @@ struct cgs_acpi_method_info {
|
|||||||
*/
|
*/
|
||||||
typedef int (*cgs_alloc_gpu_mem_t)(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
|
typedef int (*cgs_alloc_gpu_mem_t)(struct cgs_device *cgs_device, enum cgs_gpu_mem_type type,
|
||||||
uint64_t size, uint64_t align,
|
uint64_t size, uint64_t align,
|
||||||
uint64_t min_offset, uint64_t max_offset,
|
|
||||||
cgs_handle_t *handle);
|
cgs_handle_t *handle);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -310,6 +308,22 @@ typedef uint32_t (*cgs_read_ind_register_t)(struct cgs_device *cgs_device, enum
|
|||||||
typedef void (*cgs_write_ind_register_t)(struct cgs_device *cgs_device, enum cgs_ind_reg space,
|
typedef void (*cgs_write_ind_register_t)(struct cgs_device *cgs_device, enum cgs_ind_reg space,
|
||||||
unsigned index, uint32_t value);
|
unsigned index, uint32_t value);
|
||||||
|
|
||||||
|
#define CGS_REG_FIELD_SHIFT(reg, field) reg##__##field##__SHIFT
|
||||||
|
#define CGS_REG_FIELD_MASK(reg, field) reg##__##field##_MASK
|
||||||
|
|
||||||
|
#define CGS_REG_SET_FIELD(orig_val, reg, field, field_val) \
|
||||||
|
(((orig_val) & ~CGS_REG_FIELD_MASK(reg, field)) | \
|
||||||
|
(CGS_REG_FIELD_MASK(reg, field) & ((field_val) << CGS_REG_FIELD_SHIFT(reg, field))))
|
||||||
|
|
||||||
|
#define CGS_REG_GET_FIELD(value, reg, field) \
|
||||||
|
(((value) & CGS_REG_FIELD_MASK(reg, field)) >> CGS_REG_FIELD_SHIFT(reg, field))
|
||||||
|
|
||||||
|
#define CGS_WREG32_FIELD(device, reg, field, val) \
|
||||||
|
cgs_write_register(device, mm##reg, (cgs_read_register(device, mm##reg) & ~CGS_REG_FIELD_MASK(reg, field)) | (val) << CGS_REG_FIELD_SHIFT(reg, field))
|
||||||
|
|
||||||
|
#define CGS_WREG32_FIELD_IND(device, space, reg, field, val) \
|
||||||
|
cgs_write_ind_register(device, space, ix##reg, (cgs_read_ind_register(device, space, ix##reg) & ~CGS_REG_FIELD_MASK(reg, field)) | (val) << CGS_REG_FIELD_SHIFT(reg, field))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* cgs_get_pci_resource() - provide access to a device resource (PCI BAR)
|
* cgs_get_pci_resource() - provide access to a device resource (PCI BAR)
|
||||||
* @cgs_device: opaque device handle
|
* @cgs_device: opaque device handle
|
||||||
@ -463,8 +477,8 @@ struct cgs_device
|
|||||||
#define CGS_OS_CALL(func,dev,...) \
|
#define CGS_OS_CALL(func,dev,...) \
|
||||||
(((struct cgs_device *)dev)->os_ops->func(dev, ##__VA_ARGS__))
|
(((struct cgs_device *)dev)->os_ops->func(dev, ##__VA_ARGS__))
|
||||||
|
|
||||||
#define cgs_alloc_gpu_mem(dev,type,size,align,min_off,max_off,handle) \
|
#define cgs_alloc_gpu_mem(dev,type,size,align,handle) \
|
||||||
CGS_CALL(alloc_gpu_mem,dev,type,size,align,min_off,max_off,handle)
|
CGS_CALL(alloc_gpu_mem,dev,type,size,align,handle)
|
||||||
#define cgs_free_gpu_mem(dev,handle) \
|
#define cgs_free_gpu_mem(dev,handle) \
|
||||||
CGS_CALL(free_gpu_mem,dev,handle)
|
CGS_CALL(free_gpu_mem,dev,handle)
|
||||||
#define cgs_gmap_gpu_mem(dev,handle,mcaddr) \
|
#define cgs_gmap_gpu_mem(dev,handle,mcaddr) \
|
||||||
|
@ -112,6 +112,9 @@ struct tile_config {
|
|||||||
*
|
*
|
||||||
* @get_max_engine_clock_in_mhz: Retrieves maximum GPU clock in MHz
|
* @get_max_engine_clock_in_mhz: Retrieves maximum GPU clock in MHz
|
||||||
*
|
*
|
||||||
|
* @alloc_pasid: Allocate a PASID
|
||||||
|
* @free_pasid: Free a PASID
|
||||||
|
*
|
||||||
* @program_sh_mem_settings: A function that should initiate the memory
|
* @program_sh_mem_settings: A function that should initiate the memory
|
||||||
* properties such as main aperture memory type (cache / non cached) and
|
* properties such as main aperture memory type (cache / non cached) and
|
||||||
* secondary aperture base address, size and memory type.
|
* secondary aperture base address, size and memory type.
|
||||||
@ -160,6 +163,9 @@ struct kfd2kgd_calls {
|
|||||||
|
|
||||||
uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd);
|
uint32_t (*get_max_engine_clock_in_mhz)(struct kgd_dev *kgd);
|
||||||
|
|
||||||
|
int (*alloc_pasid)(unsigned int bits);
|
||||||
|
void (*free_pasid)(unsigned int pasid);
|
||||||
|
|
||||||
/* Register access functions */
|
/* Register access functions */
|
||||||
void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid,
|
void (*program_sh_mem_settings)(struct kgd_dev *kgd, uint32_t vmid,
|
||||||
uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
|
uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
|
||||||
|
366
drivers/gpu/drm/amd/include/linux/chash.h
Normal file
366
drivers/gpu/drm/amd/include/linux/chash.h
Normal file
@ -0,0 +1,366 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2017 Advanced Micro Devices, Inc.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||||
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||||
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _LINUX_CHASH_H
|
||||||
|
#define _LINUX_CHASH_H
|
||||||
|
|
||||||
|
#include <linux/types.h>
|
||||||
|
#include <linux/hash.h>
|
||||||
|
#include <linux/bug.h>
|
||||||
|
#include <asm/bitsperlong.h>
|
||||||
|
|
||||||
|
#if BITS_PER_LONG == 32
|
||||||
|
# define _CHASH_LONG_SHIFT 5
|
||||||
|
#elif BITS_PER_LONG == 64
|
||||||
|
# define _CHASH_LONG_SHIFT 6
|
||||||
|
#else
|
||||||
|
# error "Unexpected BITS_PER_LONG"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
struct __chash_table {
|
||||||
|
u8 bits;
|
||||||
|
u8 key_size;
|
||||||
|
unsigned int value_size;
|
||||||
|
u32 size_mask;
|
||||||
|
unsigned long *occup_bitmap, *valid_bitmap;
|
||||||
|
union {
|
||||||
|
u32 *keys32;
|
||||||
|
u64 *keys64;
|
||||||
|
};
|
||||||
|
u8 *values;
|
||||||
|
|
||||||
|
#ifdef CONFIG_CHASH_STATS
|
||||||
|
u64 hits, hits_steps, hits_time_ns;
|
||||||
|
u64 miss, miss_steps, miss_time_ns;
|
||||||
|
u64 relocs, reloc_dist;
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
#define __CHASH_BITMAP_SIZE(bits) \
|
||||||
|
(((1 << (bits)) + BITS_PER_LONG - 1) / BITS_PER_LONG)
|
||||||
|
#define __CHASH_ARRAY_SIZE(bits, size) \
|
||||||
|
((((size) << (bits)) + sizeof(long) - 1) / sizeof(long))
|
||||||
|
|
||||||
|
#define __CHASH_DATA_SIZE(bits, key_size, value_size) \
|
||||||
|
(__CHASH_BITMAP_SIZE(bits) * 2 + \
|
||||||
|
__CHASH_ARRAY_SIZE(bits, key_size) + \
|
||||||
|
__CHASH_ARRAY_SIZE(bits, value_size))
|
||||||
|
|
||||||
|
#define STRUCT_CHASH_TABLE(bits, key_size, value_size) \
|
||||||
|
struct { \
|
||||||
|
struct __chash_table table; \
|
||||||
|
unsigned long data \
|
||||||
|
[__CHASH_DATA_SIZE(bits, key_size, value_size)];\
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* struct chash_table - Dynamically allocated closed hash table
|
||||||
|
*
|
||||||
|
* Use this struct for dynamically allocated hash tables (using
|
||||||
|
* chash_table_alloc and chash_table_free), where the size is
|
||||||
|
* determined at runtime.
|
||||||
|
*/
|
||||||
|
struct chash_table {
|
||||||
|
struct __chash_table table;
|
||||||
|
unsigned long *data;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* DECLARE_CHASH_TABLE - macro to declare a closed hash table
|
||||||
|
* @table: name of the declared hash table
|
||||||
|
* @bts: Table size will be 2^bits entries
|
||||||
|
* @key_sz: Size of hash keys in bytes, 4 or 8
|
||||||
|
* @val_sz: Size of data values in bytes, can be 0
|
||||||
|
*
|
||||||
|
* This declares the hash table variable with a static size.
|
||||||
|
*
|
||||||
|
* The closed hash table stores key-value pairs with low memory and
|
||||||
|
* lookup overhead. In operation it performs no dynamic memory
|
||||||
|
* management. The data being stored does not require any
|
||||||
|
* list_heads. The hash table performs best with small @val_sz and as
|
||||||
|
* long as some space (about 50%) is left free in the table. But the
|
||||||
|
* table can still work reasonably efficiently even when filled up to
|
||||||
|
* about 90%. If bigger data items need to be stored and looked up,
|
||||||
|
* store the pointer to it as value in the hash table.
|
||||||
|
*
|
||||||
|
* @val_sz may be 0. This can be useful when all the stored
|
||||||
|
* information is contained in the key itself and the fact that it is
|
||||||
|
* in the hash table (or not).
|
||||||
|
*/
|
||||||
|
#define DECLARE_CHASH_TABLE(table, bts, key_sz, val_sz) \
|
||||||
|
STRUCT_CHASH_TABLE(bts, key_sz, val_sz) table
|
||||||
|
|
||||||
|
#ifdef CONFIG_CHASH_STATS
|
||||||
|
#define __CHASH_STATS_INIT(prefix), \
|
||||||
|
prefix.hits = 0, \
|
||||||
|
prefix.hits_steps = 0, \
|
||||||
|
prefix.hits_time_ns = 0, \
|
||||||
|
prefix.miss = 0, \
|
||||||
|
prefix.miss_steps = 0, \
|
||||||
|
prefix.miss_time_ns = 0, \
|
||||||
|
prefix.relocs = 0, \
|
||||||
|
prefix.reloc_dist = 0
|
||||||
|
#else
|
||||||
|
#define __CHASH_STATS_INIT(prefix)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define __CHASH_TABLE_INIT(prefix, data, bts, key_sz, val_sz) \
|
||||||
|
prefix.bits = (bts), \
|
||||||
|
prefix.key_size = (key_sz), \
|
||||||
|
prefix.value_size = (val_sz), \
|
||||||
|
prefix.size_mask = ((1 << bts) - 1), \
|
||||||
|
prefix.occup_bitmap = &data[0], \
|
||||||
|
prefix.valid_bitmap = &data \
|
||||||
|
[__CHASH_BITMAP_SIZE(bts)], \
|
||||||
|
prefix.keys64 = (u64 *)&data \
|
||||||
|
[__CHASH_BITMAP_SIZE(bts) * 2], \
|
||||||
|
prefix.values = (u8 *)&data \
|
||||||
|
[__CHASH_BITMAP_SIZE(bts) * 2 + \
|
||||||
|
__CHASH_ARRAY_SIZE(bts, key_sz)] \
|
||||||
|
__CHASH_STATS_INIT(prefix)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* DEFINE_CHASH_TABLE - macro to define and initialize a closed hash table
|
||||||
|
* @tbl: name of the declared hash table
|
||||||
|
* @bts: Table size will be 2^bits entries
|
||||||
|
* @key_sz: Size of hash keys in bytes, 4 or 8
|
||||||
|
* @val_sz: Size of data values in bytes, can be 0
|
||||||
|
*
|
||||||
|
* Note: the macro can be used for global and local hash table variables.
|
||||||
|
*/
|
||||||
|
#define DEFINE_CHASH_TABLE(tbl, bts, key_sz, val_sz) \
|
||||||
|
DECLARE_CHASH_TABLE(tbl, bts, key_sz, val_sz) = { \
|
||||||
|
.table = { \
|
||||||
|
__CHASH_TABLE_INIT(, (tbl).data, bts, key_sz, val_sz) \
|
||||||
|
}, \
|
||||||
|
.data = {0} \
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* INIT_CHASH_TABLE - Initialize a hash table declared by DECLARE_CHASH_TABLE
|
||||||
|
* @tbl: name of the declared hash table
|
||||||
|
* @bts: Table size will be 2^bits entries
|
||||||
|
* @key_sz: Size of hash keys in bytes, 4 or 8
|
||||||
|
* @val_sz: Size of data values in bytes, can be 0
|
||||||
|
*/
|
||||||
|
#define INIT_CHASH_TABLE(tbl, bts, key_sz, val_sz) \
|
||||||
|
__CHASH_TABLE_INIT(((tbl).table), (tbl).data, bts, key_sz, val_sz)
|
||||||
|
|
||||||
|
int chash_table_alloc(struct chash_table *table, u8 bits, u8 key_size,
|
||||||
|
unsigned int value_size, gfp_t gfp_mask);
|
||||||
|
void chash_table_free(struct chash_table *table);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* chash_table_dump_stats - Dump statistics of a closed hash table
|
||||||
|
* @tbl: Pointer to the table structure
|
||||||
|
*
|
||||||
|
* Dumps some performance statistics of the table gathered in operation
|
||||||
|
* in the kernel log using pr_debug. If CONFIG_DYNAMIC_DEBUG is enabled,
|
||||||
|
* user must turn on messages for chash.c (file chash.c +p).
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_CHASH_STATS
|
||||||
|
#define chash_table_dump_stats(tbl) __chash_table_dump_stats(&(*tbl).table)
|
||||||
|
|
||||||
|
void __chash_table_dump_stats(struct __chash_table *table);
|
||||||
|
#else
|
||||||
|
#define chash_table_dump_stats(tbl)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* chash_table_reset_stats - Reset statistics of a closed hash table
|
||||||
|
* @tbl: Pointer to the table structure
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_CHASH_STATS
|
||||||
|
#define chash_table_reset_stats(tbl) __chash_table_reset_stats(&(*tbl).table)
|
||||||
|
|
||||||
|
static inline void __chash_table_reset_stats(struct __chash_table *table)
|
||||||
|
{
|
||||||
|
(void)table __CHASH_STATS_INIT((*table));
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
#define chash_table_reset_stats(tbl)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* chash_table_copy_in - Copy a new value into the hash table
|
||||||
|
* @tbl: Pointer to the table structure
|
||||||
|
* @key: Key of the entry to add or update
|
||||||
|
* @value: Pointer to value to copy, may be NULL
|
||||||
|
*
|
||||||
|
* If @key already has an entry, its value is replaced. Otherwise a
|
||||||
|
* new entry is added. If @value is NULL, the value is left unchanged
|
||||||
|
* or uninitialized. Returns 1 if an entry already existed, 0 if a new
|
||||||
|
* entry was added or %-ENOMEM if there was no free space in the
|
||||||
|
* table.
|
||||||
|
*/
|
||||||
|
#define chash_table_copy_in(tbl, key, value) \
|
||||||
|
__chash_table_copy_in(&(*tbl).table, key, value)
|
||||||
|
|
||||||
|
int __chash_table_copy_in(struct __chash_table *table, u64 key,
|
||||||
|
const void *value);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* chash_table_copy_out - Copy a value out of the hash table
|
||||||
|
* @tbl: Pointer to the table structure
|
||||||
|
* @key: Key of the entry to find
|
||||||
|
* @value: Pointer to value to copy, may be NULL
|
||||||
|
*
|
||||||
|
* If @value is not NULL and the table has a non-0 value_size, the
|
||||||
|
* value at @key is copied to @value. Returns the slot index of the
|
||||||
|
* entry or %-EINVAL if @key was not found.
|
||||||
|
*/
|
||||||
|
#define chash_table_copy_out(tbl, key, value) \
|
||||||
|
__chash_table_copy_out(&(*tbl).table, key, value, false)
|
||||||
|
|
||||||
|
int __chash_table_copy_out(struct __chash_table *table, u64 key,
|
||||||
|
void *value, bool remove);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* chash_table_remove - Remove an entry from the hash table
|
||||||
|
* @tbl: Pointer to the table structure
|
||||||
|
* @key: Key of the entry to find
|
||||||
|
* @value: Pointer to value to copy, may be NULL
|
||||||
|
*
|
||||||
|
* If @value is not NULL and the table has a non-0 value_size, the
|
||||||
|
* value at @key is copied to @value. The entry is removed from the
|
||||||
|
* table. Returns the slot index of the removed entry or %-EINVAL if
|
||||||
|
* @key was not found.
|
||||||
|
*/
|
||||||
|
#define chash_table_remove(tbl, key, value) \
|
||||||
|
__chash_table_copy_out(&(*tbl).table, key, value, true)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Low level iterator API used internally by the above functions.
|
||||||
|
*/
|
||||||
|
struct chash_iter {
|
||||||
|
struct __chash_table *table;
|
||||||
|
unsigned long mask;
|
||||||
|
int slot;
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* CHASH_ITER_INIT - Initialize a hash table iterator
|
||||||
|
* @tbl: Pointer to hash table to iterate over
|
||||||
|
* @s: Initial slot number
|
||||||
|
*/
|
||||||
|
#define CHASH_ITER_INIT(table, s) { \
|
||||||
|
table, \
|
||||||
|
1UL << ((s) & (BITS_PER_LONG - 1)), \
|
||||||
|
s \
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* CHASH_ITER_SET - Set hash table iterator to new slot
|
||||||
|
* @iter: Iterator
|
||||||
|
* @s: Slot number
|
||||||
|
*/
|
||||||
|
#define CHASH_ITER_SET(iter, s) \
|
||||||
|
(iter).mask = 1UL << ((s) & (BITS_PER_LONG - 1)), \
|
||||||
|
(iter).slot = (s)
|
||||||
|
/**
|
||||||
|
* CHASH_ITER_INC - Increment hash table iterator
|
||||||
|
* @table: Hash table to iterate over
|
||||||
|
*
|
||||||
|
* Wraps around at the end.
|
||||||
|
*/
|
||||||
|
#define CHASH_ITER_INC(iter) do { \
|
||||||
|
(iter).mask = (iter).mask << 1 | \
|
||||||
|
(iter).mask >> (BITS_PER_LONG - 1); \
|
||||||
|
(iter).slot = ((iter).slot + 1) & (iter).table->size_mask; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
static inline bool chash_iter_is_valid(const struct chash_iter iter)
|
||||||
|
{
|
||||||
|
BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
|
||||||
|
return !!(iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &
|
||||||
|
iter.mask);
|
||||||
|
}
|
||||||
|
static inline bool chash_iter_is_empty(const struct chash_iter iter)
|
||||||
|
{
|
||||||
|
BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
|
||||||
|
return !(iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &
|
||||||
|
iter.mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void chash_iter_set_valid(const struct chash_iter iter)
|
||||||
|
{
|
||||||
|
BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
|
||||||
|
iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] |= iter.mask;
|
||||||
|
iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] |= iter.mask;
|
||||||
|
}
|
||||||
|
static inline void chash_iter_set_invalid(const struct chash_iter iter)
|
||||||
|
{
|
||||||
|
BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
|
||||||
|
iter.table->valid_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &= ~iter.mask;
|
||||||
|
}
|
||||||
|
static inline void chash_iter_set_empty(const struct chash_iter iter)
|
||||||
|
{
|
||||||
|
BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
|
||||||
|
iter.table->occup_bitmap[iter.slot >> _CHASH_LONG_SHIFT] &= ~iter.mask;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 chash_iter_key32(const struct chash_iter iter)
|
||||||
|
{
|
||||||
|
BUG_ON(iter.table->key_size != 4);
|
||||||
|
BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
|
||||||
|
return iter.table->keys32[iter.slot];
|
||||||
|
}
|
||||||
|
static inline u64 chash_iter_key64(const struct chash_iter iter)
|
||||||
|
{
|
||||||
|
BUG_ON(iter.table->key_size != 8);
|
||||||
|
BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
|
||||||
|
return iter.table->keys64[iter.slot];
|
||||||
|
}
|
||||||
|
static inline u64 chash_iter_key(const struct chash_iter iter)
|
||||||
|
{
|
||||||
|
BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
|
||||||
|
return (iter.table->key_size == 4) ?
|
||||||
|
iter.table->keys32[iter.slot] : iter.table->keys64[iter.slot];
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 chash_iter_hash32(const struct chash_iter iter)
|
||||||
|
{
|
||||||
|
BUG_ON(iter.table->key_size != 4);
|
||||||
|
return hash_32(chash_iter_key32(iter), iter.table->bits);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 chash_iter_hash64(const struct chash_iter iter)
|
||||||
|
{
|
||||||
|
BUG_ON(iter.table->key_size != 8);
|
||||||
|
return hash_64(chash_iter_key64(iter), iter.table->bits);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 chash_iter_hash(const struct chash_iter iter)
|
||||||
|
{
|
||||||
|
return (iter.table->key_size == 4) ?
|
||||||
|
hash_32(chash_iter_key32(iter), iter.table->bits) :
|
||||||
|
hash_64(chash_iter_key64(iter), iter.table->bits);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void *chash_iter_value(const struct chash_iter iter)
|
||||||
|
{
|
||||||
|
BUG_ON((unsigned)iter.slot >= (1 << iter.table->bits));
|
||||||
|
return iter.table->values +
|
||||||
|
((unsigned long)iter.slot * iter.table->value_size);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* _LINUX_CHASH_H */
|
@ -284,8 +284,8 @@ struct v9_mqd {
|
|||||||
uint32_t gds_save_mask_hi;
|
uint32_t gds_save_mask_hi;
|
||||||
uint32_t ctx_save_base_addr_lo;
|
uint32_t ctx_save_base_addr_lo;
|
||||||
uint32_t ctx_save_base_addr_hi;
|
uint32_t ctx_save_base_addr_hi;
|
||||||
uint32_t reserved_126;
|
uint32_t dynamic_cu_mask_addr_lo;
|
||||||
uint32_t reserved_127;
|
uint32_t dynamic_cu_mask_addr_hi;
|
||||||
uint32_t cp_mqd_base_addr_lo;
|
uint32_t cp_mqd_base_addr_lo;
|
||||||
uint32_t cp_mqd_base_addr_hi;
|
uint32_t cp_mqd_base_addr_hi;
|
||||||
uint32_t cp_hqd_active;
|
uint32_t cp_hqd_active;
|
||||||
@ -672,6 +672,14 @@ struct v9_mqd {
|
|||||||
uint32_t reserved_511;
|
uint32_t reserved_511;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct v9_mqd_allocation {
|
||||||
|
struct v9_mqd mqd;
|
||||||
|
uint32_t wptr_poll_mem;
|
||||||
|
uint32_t rptr_report_mem;
|
||||||
|
uint32_t dynamic_cu_mask;
|
||||||
|
uint32_t dynamic_rb_mask;
|
||||||
|
};
|
||||||
|
|
||||||
/* from vega10 all CSA format is shifted to chain ib compatible mode */
|
/* from vega10 all CSA format is shifted to chain ib compatible mode */
|
||||||
struct v9_ce_ib_state {
|
struct v9_ce_ib_state {
|
||||||
/* section of non chained ib part */
|
/* section of non chained ib part */
|
||||||
|
@ -423,265 +423,6 @@ struct vi_mqd_allocation {
|
|||||||
uint32_t dynamic_rb_mask;
|
uint32_t dynamic_rb_mask;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct cz_mqd {
|
|
||||||
uint32_t header;
|
|
||||||
uint32_t compute_dispatch_initiator;
|
|
||||||
uint32_t compute_dim_x;
|
|
||||||
uint32_t compute_dim_y;
|
|
||||||
uint32_t compute_dim_z;
|
|
||||||
uint32_t compute_start_x;
|
|
||||||
uint32_t compute_start_y;
|
|
||||||
uint32_t compute_start_z;
|
|
||||||
uint32_t compute_num_thread_x;
|
|
||||||
uint32_t compute_num_thread_y;
|
|
||||||
uint32_t compute_num_thread_z;
|
|
||||||
uint32_t compute_pipelinestat_enable;
|
|
||||||
uint32_t compute_perfcount_enable;
|
|
||||||
uint32_t compute_pgm_lo;
|
|
||||||
uint32_t compute_pgm_hi;
|
|
||||||
uint32_t compute_tba_lo;
|
|
||||||
uint32_t compute_tba_hi;
|
|
||||||
uint32_t compute_tma_lo;
|
|
||||||
uint32_t compute_tma_hi;
|
|
||||||
uint32_t compute_pgm_rsrc1;
|
|
||||||
uint32_t compute_pgm_rsrc2;
|
|
||||||
uint32_t compute_vmid;
|
|
||||||
uint32_t compute_resource_limits;
|
|
||||||
uint32_t compute_static_thread_mgmt_se0;
|
|
||||||
uint32_t compute_static_thread_mgmt_se1;
|
|
||||||
uint32_t compute_tmpring_size;
|
|
||||||
uint32_t compute_static_thread_mgmt_se2;
|
|
||||||
uint32_t compute_static_thread_mgmt_se3;
|
|
||||||
uint32_t compute_restart_x;
|
|
||||||
uint32_t compute_restart_y;
|
|
||||||
uint32_t compute_restart_z;
|
|
||||||
uint32_t compute_thread_trace_enable;
|
|
||||||
uint32_t compute_misc_reserved;
|
|
||||||
uint32_t compute_dispatch_id;
|
|
||||||
uint32_t compute_threadgroup_id;
|
|
||||||
uint32_t compute_relaunch;
|
|
||||||
uint32_t compute_wave_restore_addr_lo;
|
|
||||||
uint32_t compute_wave_restore_addr_hi;
|
|
||||||
uint32_t compute_wave_restore_control;
|
|
||||||
uint32_t reserved_39;
|
|
||||||
uint32_t reserved_40;
|
|
||||||
uint32_t reserved_41;
|
|
||||||
uint32_t reserved_42;
|
|
||||||
uint32_t reserved_43;
|
|
||||||
uint32_t reserved_44;
|
|
||||||
uint32_t reserved_45;
|
|
||||||
uint32_t reserved_46;
|
|
||||||
uint32_t reserved_47;
|
|
||||||
uint32_t reserved_48;
|
|
||||||
uint32_t reserved_49;
|
|
||||||
uint32_t reserved_50;
|
|
||||||
uint32_t reserved_51;
|
|
||||||
uint32_t reserved_52;
|
|
||||||
uint32_t reserved_53;
|
|
||||||
uint32_t reserved_54;
|
|
||||||
uint32_t reserved_55;
|
|
||||||
uint32_t reserved_56;
|
|
||||||
uint32_t reserved_57;
|
|
||||||
uint32_t reserved_58;
|
|
||||||
uint32_t reserved_59;
|
|
||||||
uint32_t reserved_60;
|
|
||||||
uint32_t reserved_61;
|
|
||||||
uint32_t reserved_62;
|
|
||||||
uint32_t reserved_63;
|
|
||||||
uint32_t reserved_64;
|
|
||||||
uint32_t compute_user_data_0;
|
|
||||||
uint32_t compute_user_data_1;
|
|
||||||
uint32_t compute_user_data_2;
|
|
||||||
uint32_t compute_user_data_3;
|
|
||||||
uint32_t compute_user_data_4;
|
|
||||||
uint32_t compute_user_data_5;
|
|
||||||
uint32_t compute_user_data_6;
|
|
||||||
uint32_t compute_user_data_7;
|
|
||||||
uint32_t compute_user_data_8;
|
|
||||||
uint32_t compute_user_data_9;
|
|
||||||
uint32_t compute_user_data_10;
|
|
||||||
uint32_t compute_user_data_11;
|
|
||||||
uint32_t compute_user_data_12;
|
|
||||||
uint32_t compute_user_data_13;
|
|
||||||
uint32_t compute_user_data_14;
|
|
||||||
uint32_t compute_user_data_15;
|
|
||||||
uint32_t cp_compute_csinvoc_count_lo;
|
|
||||||
uint32_t cp_compute_csinvoc_count_hi;
|
|
||||||
uint32_t reserved_83;
|
|
||||||
uint32_t reserved_84;
|
|
||||||
uint32_t reserved_85;
|
|
||||||
uint32_t cp_mqd_query_time_lo;
|
|
||||||
uint32_t cp_mqd_query_time_hi;
|
|
||||||
uint32_t cp_mqd_connect_start_time_lo;
|
|
||||||
uint32_t cp_mqd_connect_start_time_hi;
|
|
||||||
uint32_t cp_mqd_connect_end_time_lo;
|
|
||||||
uint32_t cp_mqd_connect_end_time_hi;
|
|
||||||
uint32_t cp_mqd_connect_end_wf_count;
|
|
||||||
uint32_t cp_mqd_connect_end_pq_rptr;
|
|
||||||
uint32_t cp_mqd_connect_end_pq_wptr;
|
|
||||||
uint32_t cp_mqd_connect_end_ib_rptr;
|
|
||||||
uint32_t reserved_96;
|
|
||||||
uint32_t reserved_97;
|
|
||||||
uint32_t cp_mqd_save_start_time_lo;
|
|
||||||
uint32_t cp_mqd_save_start_time_hi;
|
|
||||||
uint32_t cp_mqd_save_end_time_lo;
|
|
||||||
uint32_t cp_mqd_save_end_time_hi;
|
|
||||||
uint32_t cp_mqd_restore_start_time_lo;
|
|
||||||
uint32_t cp_mqd_restore_start_time_hi;
|
|
||||||
uint32_t cp_mqd_restore_end_time_lo;
|
|
||||||
uint32_t cp_mqd_restore_end_time_hi;
|
|
||||||
uint32_t reserved_106;
|
|
||||||
uint32_t reserved_107;
|
|
||||||
uint32_t gds_cs_ctxsw_cnt0;
|
|
||||||
uint32_t gds_cs_ctxsw_cnt1;
|
|
||||||
uint32_t gds_cs_ctxsw_cnt2;
|
|
||||||
uint32_t gds_cs_ctxsw_cnt3;
|
|
||||||
uint32_t reserved_112;
|
|
||||||
uint32_t reserved_113;
|
|
||||||
uint32_t cp_pq_exe_status_lo;
|
|
||||||
uint32_t cp_pq_exe_status_hi;
|
|
||||||
uint32_t cp_packet_id_lo;
|
|
||||||
uint32_t cp_packet_id_hi;
|
|
||||||
uint32_t cp_packet_exe_status_lo;
|
|
||||||
uint32_t cp_packet_exe_status_hi;
|
|
||||||
uint32_t gds_save_base_addr_lo;
|
|
||||||
uint32_t gds_save_base_addr_hi;
|
|
||||||
uint32_t gds_save_mask_lo;
|
|
||||||
uint32_t gds_save_mask_hi;
|
|
||||||
uint32_t ctx_save_base_addr_lo;
|
|
||||||
uint32_t ctx_save_base_addr_hi;
|
|
||||||
uint32_t reserved_126;
|
|
||||||
uint32_t reserved_127;
|
|
||||||
uint32_t cp_mqd_base_addr_lo;
|
|
||||||
uint32_t cp_mqd_base_addr_hi;
|
|
||||||
uint32_t cp_hqd_active;
|
|
||||||
uint32_t cp_hqd_vmid;
|
|
||||||
uint32_t cp_hqd_persistent_state;
|
|
||||||
uint32_t cp_hqd_pipe_priority;
|
|
||||||
uint32_t cp_hqd_queue_priority;
|
|
||||||
uint32_t cp_hqd_quantum;
|
|
||||||
uint32_t cp_hqd_pq_base_lo;
|
|
||||||
uint32_t cp_hqd_pq_base_hi;
|
|
||||||
uint32_t cp_hqd_pq_rptr;
|
|
||||||
uint32_t cp_hqd_pq_rptr_report_addr_lo;
|
|
||||||
uint32_t cp_hqd_pq_rptr_report_addr_hi;
|
|
||||||
uint32_t cp_hqd_pq_wptr_poll_addr_lo;
|
|
||||||
uint32_t cp_hqd_pq_wptr_poll_addr_hi;
|
|
||||||
uint32_t cp_hqd_pq_doorbell_control;
|
|
||||||
uint32_t cp_hqd_pq_wptr;
|
|
||||||
uint32_t cp_hqd_pq_control;
|
|
||||||
uint32_t cp_hqd_ib_base_addr_lo;
|
|
||||||
uint32_t cp_hqd_ib_base_addr_hi;
|
|
||||||
uint32_t cp_hqd_ib_rptr;
|
|
||||||
uint32_t cp_hqd_ib_control;
|
|
||||||
uint32_t cp_hqd_iq_timer;
|
|
||||||
uint32_t cp_hqd_iq_rptr;
|
|
||||||
uint32_t cp_hqd_dequeue_request;
|
|
||||||
uint32_t cp_hqd_dma_offload;
|
|
||||||
uint32_t cp_hqd_sema_cmd;
|
|
||||||
uint32_t cp_hqd_msg_type;
|
|
||||||
uint32_t cp_hqd_atomic0_preop_lo;
|
|
||||||
uint32_t cp_hqd_atomic0_preop_hi;
|
|
||||||
uint32_t cp_hqd_atomic1_preop_lo;
|
|
||||||
uint32_t cp_hqd_atomic1_preop_hi;
|
|
||||||
uint32_t cp_hqd_hq_status0;
|
|
||||||
uint32_t cp_hqd_hq_control0;
|
|
||||||
uint32_t cp_mqd_control;
|
|
||||||
uint32_t cp_hqd_hq_status1;
|
|
||||||
uint32_t cp_hqd_hq_control1;
|
|
||||||
uint32_t cp_hqd_eop_base_addr_lo;
|
|
||||||
uint32_t cp_hqd_eop_base_addr_hi;
|
|
||||||
uint32_t cp_hqd_eop_control;
|
|
||||||
uint32_t cp_hqd_eop_rptr;
|
|
||||||
uint32_t cp_hqd_eop_wptr;
|
|
||||||
uint32_t cp_hqd_eop_done_events;
|
|
||||||
uint32_t cp_hqd_ctx_save_base_addr_lo;
|
|
||||||
uint32_t cp_hqd_ctx_save_base_addr_hi;
|
|
||||||
uint32_t cp_hqd_ctx_save_control;
|
|
||||||
uint32_t cp_hqd_cntl_stack_offset;
|
|
||||||
uint32_t cp_hqd_cntl_stack_size;
|
|
||||||
uint32_t cp_hqd_wg_state_offset;
|
|
||||||
uint32_t cp_hqd_ctx_save_size;
|
|
||||||
uint32_t cp_hqd_gds_resource_state;
|
|
||||||
uint32_t cp_hqd_error;
|
|
||||||
uint32_t cp_hqd_eop_wptr_mem;
|
|
||||||
uint32_t cp_hqd_eop_dones;
|
|
||||||
uint32_t reserved_182;
|
|
||||||
uint32_t reserved_183;
|
|
||||||
uint32_t reserved_184;
|
|
||||||
uint32_t reserved_185;
|
|
||||||
uint32_t reserved_186;
|
|
||||||
uint32_t reserved_187;
|
|
||||||
uint32_t reserved_188;
|
|
||||||
uint32_t reserved_189;
|
|
||||||
uint32_t reserved_190;
|
|
||||||
uint32_t reserved_191;
|
|
||||||
uint32_t iqtimer_pkt_header;
|
|
||||||
uint32_t iqtimer_pkt_dw0;
|
|
||||||
uint32_t iqtimer_pkt_dw1;
|
|
||||||
uint32_t iqtimer_pkt_dw2;
|
|
||||||
uint32_t iqtimer_pkt_dw3;
|
|
||||||
uint32_t iqtimer_pkt_dw4;
|
|
||||||
uint32_t iqtimer_pkt_dw5;
|
|
||||||
uint32_t iqtimer_pkt_dw6;
|
|
||||||
uint32_t iqtimer_pkt_dw7;
|
|
||||||
uint32_t iqtimer_pkt_dw8;
|
|
||||||
uint32_t iqtimer_pkt_dw9;
|
|
||||||
uint32_t iqtimer_pkt_dw10;
|
|
||||||
uint32_t iqtimer_pkt_dw11;
|
|
||||||
uint32_t iqtimer_pkt_dw12;
|
|
||||||
uint32_t iqtimer_pkt_dw13;
|
|
||||||
uint32_t iqtimer_pkt_dw14;
|
|
||||||
uint32_t iqtimer_pkt_dw15;
|
|
||||||
uint32_t iqtimer_pkt_dw16;
|
|
||||||
uint32_t iqtimer_pkt_dw17;
|
|
||||||
uint32_t iqtimer_pkt_dw18;
|
|
||||||
uint32_t iqtimer_pkt_dw19;
|
|
||||||
uint32_t iqtimer_pkt_dw20;
|
|
||||||
uint32_t iqtimer_pkt_dw21;
|
|
||||||
uint32_t iqtimer_pkt_dw22;
|
|
||||||
uint32_t iqtimer_pkt_dw23;
|
|
||||||
uint32_t iqtimer_pkt_dw24;
|
|
||||||
uint32_t iqtimer_pkt_dw25;
|
|
||||||
uint32_t iqtimer_pkt_dw26;
|
|
||||||
uint32_t iqtimer_pkt_dw27;
|
|
||||||
uint32_t iqtimer_pkt_dw28;
|
|
||||||
uint32_t iqtimer_pkt_dw29;
|
|
||||||
uint32_t iqtimer_pkt_dw30;
|
|
||||||
uint32_t iqtimer_pkt_dw31;
|
|
||||||
uint32_t reserved_225;
|
|
||||||
uint32_t reserved_226;
|
|
||||||
uint32_t reserved_227;
|
|
||||||
uint32_t set_resources_header;
|
|
||||||
uint32_t set_resources_dw1;
|
|
||||||
uint32_t set_resources_dw2;
|
|
||||||
uint32_t set_resources_dw3;
|
|
||||||
uint32_t set_resources_dw4;
|
|
||||||
uint32_t set_resources_dw5;
|
|
||||||
uint32_t set_resources_dw6;
|
|
||||||
uint32_t set_resources_dw7;
|
|
||||||
uint32_t reserved_236;
|
|
||||||
uint32_t reserved_237;
|
|
||||||
uint32_t reserved_238;
|
|
||||||
uint32_t reserved_239;
|
|
||||||
uint32_t queue_doorbell_id0;
|
|
||||||
uint32_t queue_doorbell_id1;
|
|
||||||
uint32_t queue_doorbell_id2;
|
|
||||||
uint32_t queue_doorbell_id3;
|
|
||||||
uint32_t queue_doorbell_id4;
|
|
||||||
uint32_t queue_doorbell_id5;
|
|
||||||
uint32_t queue_doorbell_id6;
|
|
||||||
uint32_t queue_doorbell_id7;
|
|
||||||
uint32_t queue_doorbell_id8;
|
|
||||||
uint32_t queue_doorbell_id9;
|
|
||||||
uint32_t queue_doorbell_id10;
|
|
||||||
uint32_t queue_doorbell_id11;
|
|
||||||
uint32_t queue_doorbell_id12;
|
|
||||||
uint32_t queue_doorbell_id13;
|
|
||||||
uint32_t queue_doorbell_id14;
|
|
||||||
uint32_t queue_doorbell_id15;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct vi_ce_ib_state {
|
struct vi_ce_ib_state {
|
||||||
uint32_t ce_ib_completion_status;
|
uint32_t ce_ib_completion_status;
|
||||||
uint32_t ce_constegnine_count;
|
uint32_t ce_constegnine_count;
|
||||||
|
27
drivers/gpu/drm/amd/lib/Kconfig
Normal file
27
drivers/gpu/drm/amd/lib/Kconfig
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
menu "AMD Library routines"
|
||||||
|
|
||||||
|
#
|
||||||
|
# Closed hash table
|
||||||
|
#
|
||||||
|
config CHASH
|
||||||
|
tristate "Closed hash table"
|
||||||
|
help
|
||||||
|
Statically sized closed hash table implementation with low
|
||||||
|
memory and CPU overhead.
|
||||||
|
|
||||||
|
config CHASH_STATS
|
||||||
|
bool "Closed hash table performance statistics"
|
||||||
|
depends on CHASH
|
||||||
|
default n
|
||||||
|
help
|
||||||
|
Enable collection of performance statistics for closed hash tables.
|
||||||
|
|
||||||
|
config CHASH_SELFTEST
|
||||||
|
bool "Closed hash table self test"
|
||||||
|
depends on CHASH
|
||||||
|
default n
|
||||||
|
help
|
||||||
|
Runs a selftest during module load. Several module parameters
|
||||||
|
are available to modify the behaviour of the test.
|
||||||
|
|
||||||
|
endmenu
|
11
drivers/gpu/drm/amd/lib/Makefile
Normal file
11
drivers/gpu/drm/amd/lib/Makefile
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
#
|
||||||
|
# Makefile for AMD library routines, which are used by AMD driver
|
||||||
|
# components.
|
||||||
|
#
|
||||||
|
# This is for common library routines that can be shared between AMD
|
||||||
|
# driver components or later moved to kernel/lib for sharing with
|
||||||
|
# other drivers.
|
||||||
|
|
||||||
|
ccflags-y := -I$(src)/../include
|
||||||
|
|
||||||
|
obj-$(CONFIG_CHASH) += chash.o
|
638
drivers/gpu/drm/amd/lib/chash.c
Normal file
638
drivers/gpu/drm/amd/lib/chash.c
Normal file
@ -0,0 +1,638 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 2017 Advanced Micro Devices, Inc.
|
||||||
|
*
|
||||||
|
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||||
|
* copy of this software and associated documentation files (the "Software"),
|
||||||
|
* to deal in the Software without restriction, including without limitation
|
||||||
|
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||||
|
* and/or sell copies of the Software, and to permit persons to whom the
|
||||||
|
* Software is furnished to do so, subject to the following conditions:
|
||||||
|
*
|
||||||
|
* The above copyright notice and this permission notice shall be included in
|
||||||
|
* all copies or substantial portions of the Software.
|
||||||
|
*
|
||||||
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||||
|
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||||
|
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||||
|
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||||
|
* OTHER DEALINGS IN THE SOFTWARE.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/types.h>
|
||||||
|
#include <linux/hash.h>
|
||||||
|
#include <linux/bug.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/sched/clock.h>
|
||||||
|
#include <asm/div64.h>
|
||||||
|
#include <linux/chash.h>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* chash_table_alloc - Allocate closed hash table
|
||||||
|
* @table: Pointer to the table structure
|
||||||
|
* @bits: Table size will be 2^bits entries
|
||||||
|
* @key_size: Size of hash keys in bytes, 4 or 8
|
||||||
|
* @value_size: Size of data values in bytes, can be 0
|
||||||
|
*/
|
||||||
|
int chash_table_alloc(struct chash_table *table, u8 bits, u8 key_size,
|
||||||
|
unsigned int value_size, gfp_t gfp_mask)
|
||||||
|
{
|
||||||
|
if (bits > 31)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (key_size != 4 && key_size != 8)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
table->data = kcalloc(__CHASH_DATA_SIZE(bits, key_size, value_size),
|
||||||
|
sizeof(long), gfp_mask);
|
||||||
|
if (!table->data)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
__CHASH_TABLE_INIT(table->table, table->data,
|
||||||
|
bits, key_size, value_size);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(chash_table_alloc);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* chash_table_free - Free closed hash table
|
||||||
|
* @table: Pointer to the table structure
|
||||||
|
*/
|
||||||
|
void chash_table_free(struct chash_table *table)
|
||||||
|
{
|
||||||
|
kfree(table->data);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(chash_table_free);
|
||||||
|
|
||||||
|
#ifdef CONFIG_CHASH_STATS
|
||||||
|
|
||||||
|
#define DIV_FRAC(nom, denom, quot, frac, frac_digits) do { \
|
||||||
|
u64 __nom = (nom); \
|
||||||
|
u64 __denom = (denom); \
|
||||||
|
u64 __quot, __frac; \
|
||||||
|
u32 __rem; \
|
||||||
|
\
|
||||||
|
while (__denom >> 32) { \
|
||||||
|
__nom >>= 1; \
|
||||||
|
__denom >>= 1; \
|
||||||
|
} \
|
||||||
|
__quot = __nom; \
|
||||||
|
__rem = do_div(__quot, __denom); \
|
||||||
|
__frac = __rem * (frac_digits) + (__denom >> 1); \
|
||||||
|
do_div(__frac, __denom); \
|
||||||
|
(quot) = __quot; \
|
||||||
|
(frac) = __frac; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
void __chash_table_dump_stats(struct __chash_table *table)
|
||||||
|
{
|
||||||
|
struct chash_iter iter = CHASH_ITER_INIT(table, 0);
|
||||||
|
u32 filled = 0, empty = 0, tombstones = 0;
|
||||||
|
u64 quot1, quot2;
|
||||||
|
u32 frac1, frac2;
|
||||||
|
|
||||||
|
do {
|
||||||
|
if (chash_iter_is_valid(iter))
|
||||||
|
filled++;
|
||||||
|
else if (chash_iter_is_empty(iter))
|
||||||
|
empty++;
|
||||||
|
else
|
||||||
|
tombstones++;
|
||||||
|
CHASH_ITER_INC(iter);
|
||||||
|
} while (iter.slot);
|
||||||
|
|
||||||
|
pr_debug("chash: key size %u, value size %u\n",
|
||||||
|
table->key_size, table->value_size);
|
||||||
|
pr_debug(" Slots total/filled/empty/tombstones: %u / %u / %u / %u\n",
|
||||||
|
1 << table->bits, filled, empty, tombstones);
|
||||||
|
if (table->hits > 0) {
|
||||||
|
DIV_FRAC(table->hits_steps, table->hits, quot1, frac1, 1000);
|
||||||
|
DIV_FRAC(table->hits * 1000, table->hits_time_ns,
|
||||||
|
quot2, frac2, 1000);
|
||||||
|
} else {
|
||||||
|
quot1 = quot2 = 0;
|
||||||
|
frac1 = frac2 = 0;
|
||||||
|
}
|
||||||
|
pr_debug(" Hits (avg.cost, rate): %llu (%llu.%03u, %llu.%03u M/s)\n",
|
||||||
|
table->hits, quot1, frac1, quot2, frac2);
|
||||||
|
if (table->miss > 0) {
|
||||||
|
DIV_FRAC(table->miss_steps, table->miss, quot1, frac1, 1000);
|
||||||
|
DIV_FRAC(table->miss * 1000, table->miss_time_ns,
|
||||||
|
quot2, frac2, 1000);
|
||||||
|
} else {
|
||||||
|
quot1 = quot2 = 0;
|
||||||
|
frac1 = frac2 = 0;
|
||||||
|
}
|
||||||
|
pr_debug(" Misses (avg.cost, rate): %llu (%llu.%03u, %llu.%03u M/s)\n",
|
||||||
|
table->miss, quot1, frac1, quot2, frac2);
|
||||||
|
if (table->hits + table->miss > 0) {
|
||||||
|
DIV_FRAC(table->hits_steps + table->miss_steps,
|
||||||
|
table->hits + table->miss, quot1, frac1, 1000);
|
||||||
|
DIV_FRAC((table->hits + table->miss) * 1000,
|
||||||
|
(table->hits_time_ns + table->miss_time_ns),
|
||||||
|
quot2, frac2, 1000);
|
||||||
|
} else {
|
||||||
|
quot1 = quot2 = 0;
|
||||||
|
frac1 = frac2 = 0;
|
||||||
|
}
|
||||||
|
pr_debug(" Total (avg.cost, rate): %llu (%llu.%03u, %llu.%03u M/s)\n",
|
||||||
|
table->hits + table->miss, quot1, frac1, quot2, frac2);
|
||||||
|
if (table->relocs > 0) {
|
||||||
|
DIV_FRAC(table->hits + table->miss, table->relocs,
|
||||||
|
quot1, frac1, 1000);
|
||||||
|
DIV_FRAC(table->reloc_dist, table->relocs, quot2, frac2, 1000);
|
||||||
|
pr_debug(" Relocations (freq, avg.dist): %llu (1:%llu.%03u, %llu.%03u)\n",
|
||||||
|
table->relocs, quot1, frac1, quot2, frac2);
|
||||||
|
} else {
|
||||||
|
pr_debug(" No relocations\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__chash_table_dump_stats);
|
||||||
|
|
||||||
|
#undef DIV_FRAC
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define CHASH_INC(table, a) ((a) = ((a) + 1) & (table)->size_mask)
|
||||||
|
#define CHASH_ADD(table, a, b) (((a) + (b)) & (table)->size_mask)
|
||||||
|
#define CHASH_SUB(table, a, b) (((a) - (b)) & (table)->size_mask)
|
||||||
|
#define CHASH_IN_RANGE(table, slot, first, last) \
|
||||||
|
(CHASH_SUB(table, slot, first) <= CHASH_SUB(table, last, first))
|
||||||
|
|
||||||
|
/*#define CHASH_DEBUG Uncomment this to enable verbose debug output*/
|
||||||
|
#ifdef CHASH_DEBUG
|
||||||
|
static void chash_table_dump(struct __chash_table *table)
|
||||||
|
{
|
||||||
|
struct chash_iter iter = CHASH_ITER_INIT(table, 0);
|
||||||
|
|
||||||
|
do {
|
||||||
|
if ((iter.slot & 3) == 0)
|
||||||
|
pr_debug("%04x: ", iter.slot);
|
||||||
|
|
||||||
|
if (chash_iter_is_valid(iter))
|
||||||
|
pr_debug("[%016llx] ", chash_iter_key(iter));
|
||||||
|
else if (chash_iter_is_empty(iter))
|
||||||
|
pr_debug("[ <empty> ] ");
|
||||||
|
else
|
||||||
|
pr_debug("[ <tombstone> ] ");
|
||||||
|
|
||||||
|
if ((iter.slot & 3) == 3)
|
||||||
|
pr_debug("\n");
|
||||||
|
|
||||||
|
CHASH_ITER_INC(iter);
|
||||||
|
} while (iter.slot);
|
||||||
|
|
||||||
|
if ((iter.slot & 3) != 0)
|
||||||
|
pr_debug("\n");
|
||||||
|
}
|
||||||
|
|
||||||
|
static int chash_table_check(struct __chash_table *table)
|
||||||
|
{
|
||||||
|
u32 hash;
|
||||||
|
struct chash_iter iter = CHASH_ITER_INIT(table, 0);
|
||||||
|
struct chash_iter cur = CHASH_ITER_INIT(table, 0);
|
||||||
|
|
||||||
|
do {
|
||||||
|
if (!chash_iter_is_valid(iter)) {
|
||||||
|
CHASH_ITER_INC(iter);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
hash = chash_iter_hash(iter);
|
||||||
|
CHASH_ITER_SET(cur, hash);
|
||||||
|
while (cur.slot != iter.slot) {
|
||||||
|
if (chash_iter_is_empty(cur)) {
|
||||||
|
pr_err("Path to element at %x with hash %x broken at slot %x\n",
|
||||||
|
iter.slot, hash, cur.slot);
|
||||||
|
chash_table_dump(table);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
CHASH_ITER_INC(cur);
|
||||||
|
}
|
||||||
|
|
||||||
|
CHASH_ITER_INC(iter);
|
||||||
|
} while (iter.slot);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static void chash_iter_relocate(struct chash_iter dst, struct chash_iter src)
|
||||||
|
{
|
||||||
|
BUG_ON(src.table == dst.table && src.slot == dst.slot);
|
||||||
|
BUG_ON(src.table->key_size != src.table->key_size);
|
||||||
|
BUG_ON(src.table->value_size != src.table->value_size);
|
||||||
|
|
||||||
|
if (dst.table->key_size == 4)
|
||||||
|
dst.table->keys32[dst.slot] = src.table->keys32[src.slot];
|
||||||
|
else
|
||||||
|
dst.table->keys64[dst.slot] = src.table->keys64[src.slot];
|
||||||
|
|
||||||
|
if (dst.table->value_size)
|
||||||
|
memcpy(chash_iter_value(dst), chash_iter_value(src),
|
||||||
|
dst.table->value_size);
|
||||||
|
|
||||||
|
chash_iter_set_valid(dst);
|
||||||
|
chash_iter_set_invalid(src);
|
||||||
|
|
||||||
|
#ifdef CONFIG_CHASH_STATS
|
||||||
|
if (src.table == dst.table) {
|
||||||
|
dst.table->relocs++;
|
||||||
|
dst.table->reloc_dist +=
|
||||||
|
CHASH_SUB(dst.table, src.slot, dst.slot);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* __chash_table_find - Helper for looking up a hash table entry
|
||||||
|
* @iter: Pointer to hash table iterator
|
||||||
|
* @key: Key of the entry to find
|
||||||
|
* @for_removal: set to true if the element will be removed soon
|
||||||
|
*
|
||||||
|
* Searches for an entry in the hash table with a given key. iter must
|
||||||
|
* be initialized by the caller to point to the home position of the
|
||||||
|
* hypothetical entry, i.e. it must be initialized with the hash table
|
||||||
|
* and the key's hash as the initial slot for the search.
|
||||||
|
*
|
||||||
|
* This function also does some local clean-up to speed up future
|
||||||
|
* look-ups by relocating entries to better slots and removing
|
||||||
|
* tombstones that are no longer needed.
|
||||||
|
*
|
||||||
|
* If @for_removal is true, the function avoids relocating the entry
|
||||||
|
* that is being returned.
|
||||||
|
*
|
||||||
|
* Returns 0 if the search is successful. In this case iter is updated
|
||||||
|
* to point to the found entry. Otherwise %-EINVAL is returned and the
|
||||||
|
* iter is updated to point to the first available slot for the given
|
||||||
|
* key. If the table is full, the slot is set to -1.
|
||||||
|
*/
|
||||||
|
static int chash_table_find(struct chash_iter *iter, u64 key,
|
||||||
|
bool for_removal)
|
||||||
|
{
|
||||||
|
#ifdef CONFIG_CHASH_STATS
|
||||||
|
u64 ts1 = local_clock();
|
||||||
|
#endif
|
||||||
|
u32 hash = iter->slot;
|
||||||
|
struct chash_iter first_redundant = CHASH_ITER_INIT(iter->table, -1);
|
||||||
|
int first_avail = (for_removal ? -2 : -1);
|
||||||
|
|
||||||
|
while (!chash_iter_is_valid(*iter) || chash_iter_key(*iter) != key) {
|
||||||
|
if (chash_iter_is_empty(*iter)) {
|
||||||
|
/* Found an empty slot, which ends the
|
||||||
|
* search. Clean up any preceding tombstones
|
||||||
|
* that are no longer needed because they lead
|
||||||
|
* to no-where
|
||||||
|
*/
|
||||||
|
if ((int)first_redundant.slot < 0)
|
||||||
|
goto not_found;
|
||||||
|
while (first_redundant.slot != iter->slot) {
|
||||||
|
if (!chash_iter_is_valid(first_redundant))
|
||||||
|
chash_iter_set_empty(first_redundant);
|
||||||
|
CHASH_ITER_INC(first_redundant);
|
||||||
|
}
|
||||||
|
#ifdef CHASH_DEBUG
|
||||||
|
chash_table_check(iter->table);
|
||||||
|
#endif
|
||||||
|
goto not_found;
|
||||||
|
} else if (!chash_iter_is_valid(*iter)) {
|
||||||
|
/* Found a tombstone. Remember it as candidate
|
||||||
|
* for relocating the entry we're looking for
|
||||||
|
* or for adding a new entry with the given key
|
||||||
|
*/
|
||||||
|
if (first_avail == -1)
|
||||||
|
first_avail = iter->slot;
|
||||||
|
/* Or mark it as the start of a series of
|
||||||
|
* potentially redundant tombstones
|
||||||
|
*/
|
||||||
|
else if (first_redundant.slot == -1)
|
||||||
|
CHASH_ITER_SET(first_redundant, iter->slot);
|
||||||
|
} else if (first_redundant.slot >= 0) {
|
||||||
|
/* Found a valid, occupied slot with a
|
||||||
|
* preceding series of tombstones. Relocate it
|
||||||
|
* to a better position that no longer depends
|
||||||
|
* on those tombstones
|
||||||
|
*/
|
||||||
|
u32 cur_hash = chash_iter_hash(*iter);
|
||||||
|
|
||||||
|
if (!CHASH_IN_RANGE(iter->table, cur_hash,
|
||||||
|
first_redundant.slot + 1,
|
||||||
|
iter->slot)) {
|
||||||
|
/* This entry has a hash at or before
|
||||||
|
* the first tombstone we found. We
|
||||||
|
* can relocate it to that tombstone
|
||||||
|
* and advance to the next tombstone
|
||||||
|
*/
|
||||||
|
chash_iter_relocate(first_redundant, *iter);
|
||||||
|
do {
|
||||||
|
CHASH_ITER_INC(first_redundant);
|
||||||
|
} while (chash_iter_is_valid(first_redundant));
|
||||||
|
} else if (cur_hash != iter->slot) {
|
||||||
|
/* Relocate entry to its home position
|
||||||
|
* or as close as possible so it no
|
||||||
|
* longer depends on any preceding
|
||||||
|
* tombstones
|
||||||
|
*/
|
||||||
|
struct chash_iter new_iter =
|
||||||
|
CHASH_ITER_INIT(iter->table, cur_hash);
|
||||||
|
|
||||||
|
while (new_iter.slot != iter->slot &&
|
||||||
|
chash_iter_is_valid(new_iter))
|
||||||
|
CHASH_ITER_INC(new_iter);
|
||||||
|
|
||||||
|
if (new_iter.slot != iter->slot)
|
||||||
|
chash_iter_relocate(new_iter, *iter);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
CHASH_ITER_INC(*iter);
|
||||||
|
if (iter->slot == hash) {
|
||||||
|
iter->slot = -1;
|
||||||
|
goto not_found;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_CHASH_STATS
|
||||||
|
iter->table->hits++;
|
||||||
|
iter->table->hits_steps += CHASH_SUB(iter->table, iter->slot, hash) + 1;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (first_avail >= 0) {
|
||||||
|
CHASH_ITER_SET(first_redundant, first_avail);
|
||||||
|
chash_iter_relocate(first_redundant, *iter);
|
||||||
|
iter->slot = first_redundant.slot;
|
||||||
|
iter->mask = first_redundant.mask;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_CHASH_STATS
|
||||||
|
iter->table->hits_time_ns += local_clock() - ts1;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
not_found:
|
||||||
|
#ifdef CONFIG_CHASH_STATS
|
||||||
|
iter->table->miss++;
|
||||||
|
iter->table->miss_steps += (iter->slot < 0) ?
|
||||||
|
(1 << iter->table->bits) :
|
||||||
|
CHASH_SUB(iter->table, iter->slot, hash) + 1;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (first_avail >= 0)
|
||||||
|
CHASH_ITER_SET(*iter, first_avail);
|
||||||
|
|
||||||
|
#ifdef CONFIG_CHASH_STATS
|
||||||
|
iter->table->miss_time_ns += local_clock() - ts1;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
int __chash_table_copy_in(struct __chash_table *table, u64 key,
|
||||||
|
const void *value)
|
||||||
|
{
|
||||||
|
u32 hash = (table->key_size == 4) ?
|
||||||
|
hash_32(key, table->bits) : hash_64(key, table->bits);
|
||||||
|
struct chash_iter iter = CHASH_ITER_INIT(table, hash);
|
||||||
|
int r = chash_table_find(&iter, key, false);
|
||||||
|
|
||||||
|
/* Found an existing entry */
|
||||||
|
if (!r) {
|
||||||
|
if (value && table->value_size)
|
||||||
|
memcpy(chash_iter_value(iter), value,
|
||||||
|
table->value_size);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Is there a place to add a new entry? */
|
||||||
|
if (iter.slot < 0) {
|
||||||
|
pr_err("Hash table overflow\n");
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
chash_iter_set_valid(iter);
|
||||||
|
|
||||||
|
if (table->key_size == 4)
|
||||||
|
table->keys32[iter.slot] = key;
|
||||||
|
else
|
||||||
|
table->keys64[iter.slot] = key;
|
||||||
|
if (value && table->value_size)
|
||||||
|
memcpy(chash_iter_value(iter), value, table->value_size);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__chash_table_copy_in);
|
||||||
|
|
||||||
|
int __chash_table_copy_out(struct __chash_table *table, u64 key,
|
||||||
|
void *value, bool remove)
|
||||||
|
{
|
||||||
|
u32 hash = (table->key_size == 4) ?
|
||||||
|
hash_32(key, table->bits) : hash_64(key, table->bits);
|
||||||
|
struct chash_iter iter = CHASH_ITER_INIT(table, hash);
|
||||||
|
int r = chash_table_find(&iter, key, remove);
|
||||||
|
|
||||||
|
if (r < 0)
|
||||||
|
return r;
|
||||||
|
|
||||||
|
if (value && table->value_size)
|
||||||
|
memcpy(value, chash_iter_value(iter), table->value_size);
|
||||||
|
|
||||||
|
if (remove)
|
||||||
|
chash_iter_set_invalid(iter);
|
||||||
|
|
||||||
|
return iter.slot;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(__chash_table_copy_out);
|
||||||
|
|
||||||
|
#ifdef CONFIG_CHASH_SELFTEST
|
||||||
|
/**
|
||||||
|
* chash_self_test - Run a self-test of the hash table implementation
|
||||||
|
* @bits: Table size will be 2^bits entries
|
||||||
|
* @key_size: Size of hash keys in bytes, 4 or 8
|
||||||
|
* @min_fill: Minimum fill level during the test
|
||||||
|
* @max_fill: Maximum fill level during the test
|
||||||
|
* @iterations: Number of test iterations
|
||||||
|
*
|
||||||
|
* The test adds and removes entries from a hash table, cycling the
|
||||||
|
* fill level between min_fill and max_fill entries. Also tests lookup
|
||||||
|
* and value retrieval.
|
||||||
|
*/
|
||||||
|
static int __init chash_self_test(u8 bits, u8 key_size,
|
||||||
|
int min_fill, int max_fill,
|
||||||
|
u64 iterations)
|
||||||
|
{
|
||||||
|
struct chash_table table;
|
||||||
|
int ret;
|
||||||
|
u64 add_count, rmv_count;
|
||||||
|
u64 value;
|
||||||
|
|
||||||
|
if (key_size == 4 && iterations > 0xffffffff)
|
||||||
|
return -EINVAL;
|
||||||
|
if (min_fill >= max_fill)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
ret = chash_table_alloc(&table, bits, key_size, sizeof(u64),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (ret) {
|
||||||
|
pr_err("chash_table_alloc failed: %d\n", ret);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (add_count = 0, rmv_count = 0; add_count < iterations;
|
||||||
|
add_count++) {
|
||||||
|
/* When we hit the max_fill level, remove entries down
|
||||||
|
* to min_fill
|
||||||
|
*/
|
||||||
|
if (add_count - rmv_count == max_fill) {
|
||||||
|
u64 find_count = rmv_count;
|
||||||
|
|
||||||
|
/* First try to find all entries that we're
|
||||||
|
* about to remove, confirm their value, test
|
||||||
|
* writing them back a second time.
|
||||||
|
*/
|
||||||
|
for (; add_count - find_count > min_fill;
|
||||||
|
find_count++) {
|
||||||
|
ret = chash_table_copy_out(&table, find_count,
|
||||||
|
&value);
|
||||||
|
if (ret < 0) {
|
||||||
|
pr_err("chash_table_copy_out failed: %d\n",
|
||||||
|
ret);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
if (value != ~find_count) {
|
||||||
|
pr_err("Wrong value retrieved for key 0x%llx, expected 0x%llx got 0x%llx\n",
|
||||||
|
find_count, ~find_count, value);
|
||||||
|
#ifdef CHASH_DEBUG
|
||||||
|
chash_table_dump(&table.table);
|
||||||
|
#endif
|
||||||
|
ret = -EFAULT;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
ret = chash_table_copy_in(&table, find_count,
|
||||||
|
&value);
|
||||||
|
if (ret != 1) {
|
||||||
|
pr_err("copy_in second time returned %d, expected 1\n",
|
||||||
|
ret);
|
||||||
|
ret = -EFAULT;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/* Remove them until we hit min_fill level */
|
||||||
|
for (; add_count - rmv_count > min_fill; rmv_count++) {
|
||||||
|
ret = chash_table_remove(&table, rmv_count,
|
||||||
|
NULL);
|
||||||
|
if (ret < 0) {
|
||||||
|
pr_err("chash_table_remove failed: %d\n",
|
||||||
|
ret);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Add a new value */
|
||||||
|
value = ~add_count;
|
||||||
|
ret = chash_table_copy_in(&table, add_count, &value);
|
||||||
|
if (ret != 0) {
|
||||||
|
pr_err("copy_in first time returned %d, expected 0\n",
|
||||||
|
ret);
|
||||||
|
ret = -EFAULT;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
chash_table_dump_stats(&table);
|
||||||
|
chash_table_reset_stats(&table);
|
||||||
|
|
||||||
|
out:
|
||||||
|
chash_table_free(&table);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static unsigned int chash_test_bits = 10;
|
||||||
|
MODULE_PARM_DESC(test_bits,
|
||||||
|
"Selftest number of hash bits ([4..20], default=10)");
|
||||||
|
module_param_named(test_bits, chash_test_bits, uint, 0444);
|
||||||
|
|
||||||
|
static unsigned int chash_test_keysize = 8;
|
||||||
|
MODULE_PARM_DESC(test_keysize, "Selftest keysize (4 or 8, default=8)");
|
||||||
|
module_param_named(test_keysize, chash_test_keysize, uint, 0444);
|
||||||
|
|
||||||
|
static unsigned int chash_test_minfill;
|
||||||
|
MODULE_PARM_DESC(test_minfill, "Selftest minimum #entries (default=50%)");
|
||||||
|
module_param_named(test_minfill, chash_test_minfill, uint, 0444);
|
||||||
|
|
||||||
|
static unsigned int chash_test_maxfill;
|
||||||
|
MODULE_PARM_DESC(test_maxfill, "Selftest maximum #entries (default=80%)");
|
||||||
|
module_param_named(test_maxfill, chash_test_maxfill, uint, 0444);
|
||||||
|
|
||||||
|
static unsigned long chash_test_iters;
|
||||||
|
MODULE_PARM_DESC(test_iters, "Selftest iterations (default=1000 x #entries)");
|
||||||
|
module_param_named(test_iters, chash_test_iters, ulong, 0444);
|
||||||
|
|
||||||
|
static int __init chash_init(void)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
u64 ts1_ns;
|
||||||
|
|
||||||
|
/* Skip self test on user errors */
|
||||||
|
if (chash_test_bits < 4 || chash_test_bits > 20) {
|
||||||
|
pr_err("chash: test_bits out of range [4..20].\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
if (chash_test_keysize != 4 && chash_test_keysize != 8) {
|
||||||
|
pr_err("chash: test_keysize invalid. Must be 4 or 8.\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!chash_test_minfill)
|
||||||
|
chash_test_minfill = (1 << chash_test_bits) / 2;
|
||||||
|
if (!chash_test_maxfill)
|
||||||
|
chash_test_maxfill = (1 << chash_test_bits) * 4 / 5;
|
||||||
|
if (!chash_test_iters)
|
||||||
|
chash_test_iters = (1 << chash_test_bits) * 1000;
|
||||||
|
|
||||||
|
if (chash_test_minfill >= (1 << chash_test_bits)) {
|
||||||
|
pr_err("chash: test_minfill too big. Must be < table size.\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
if (chash_test_maxfill >= (1 << chash_test_bits)) {
|
||||||
|
pr_err("chash: test_maxfill too big. Must be < table size.\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
if (chash_test_minfill >= chash_test_maxfill) {
|
||||||
|
pr_err("chash: test_minfill must be < test_maxfill.\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
if (chash_test_keysize == 4 && chash_test_iters > 0xffffffff) {
|
||||||
|
pr_err("chash: test_iters must be < 4G for 4 byte keys.\n");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
ts1_ns = local_clock();
|
||||||
|
ret = chash_self_test(chash_test_bits, chash_test_keysize,
|
||||||
|
chash_test_minfill, chash_test_maxfill,
|
||||||
|
chash_test_iters);
|
||||||
|
if (!ret) {
|
||||||
|
u64 ts_delta_us = local_clock() - ts1_ns;
|
||||||
|
u64 iters_per_second = (u64)chash_test_iters * 1000000;
|
||||||
|
|
||||||
|
do_div(ts_delta_us, 1000);
|
||||||
|
do_div(iters_per_second, ts_delta_us);
|
||||||
|
pr_info("chash: self test took %llu us, %llu iterations/s\n",
|
||||||
|
ts_delta_us, iters_per_second);
|
||||||
|
} else {
|
||||||
|
pr_err("chash: self test failed: %d\n", ret);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
module_init(chash_init);
|
||||||
|
|
||||||
|
#endif /* CONFIG_CHASH_SELFTEST */
|
||||||
|
|
||||||
|
MODULE_DESCRIPTION("Closed hash table");
|
||||||
|
MODULE_LICENSE("GPL and additional rights");
|
@ -4,12 +4,11 @@ subdir-ccflags-y += \
|
|||||||
-I$(FULL_AMD_PATH)/include/asic_reg \
|
-I$(FULL_AMD_PATH)/include/asic_reg \
|
||||||
-I$(FULL_AMD_PATH)/include \
|
-I$(FULL_AMD_PATH)/include \
|
||||||
-I$(FULL_AMD_PATH)/powerplay/smumgr\
|
-I$(FULL_AMD_PATH)/powerplay/smumgr\
|
||||||
-I$(FULL_AMD_PATH)/powerplay/hwmgr \
|
-I$(FULL_AMD_PATH)/powerplay/hwmgr
|
||||||
-I$(FULL_AMD_PATH)/powerplay/eventmgr
|
|
||||||
|
|
||||||
AMD_PP_PATH = ../powerplay
|
AMD_PP_PATH = ../powerplay
|
||||||
|
|
||||||
PP_LIBS = smumgr hwmgr eventmgr
|
PP_LIBS = smumgr hwmgr
|
||||||
|
|
||||||
AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(PP_LIBS)))
|
AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/powerplay/,$(PP_LIBS)))
|
||||||
|
|
||||||
|
@ -29,22 +29,19 @@
|
|||||||
#include "amd_powerplay.h"
|
#include "amd_powerplay.h"
|
||||||
#include "pp_instance.h"
|
#include "pp_instance.h"
|
||||||
#include "power_state.h"
|
#include "power_state.h"
|
||||||
#include "eventmanager.h"
|
|
||||||
|
|
||||||
|
|
||||||
static inline int pp_check(struct pp_instance *handle)
|
static inline int pp_check(struct pp_instance *handle)
|
||||||
{
|
{
|
||||||
if (handle == NULL || handle->pp_valid != PP_VALID)
|
if (handle == NULL || handle->pp_valid != PP_VALID)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (handle->smu_mgr == NULL || handle->smu_mgr->smumgr_funcs == NULL)
|
if (handle->hwmgr == NULL || handle->hwmgr->smumgr_funcs == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (handle->pm_en == 0)
|
if (handle->pm_en == 0)
|
||||||
return PP_DPM_DISABLED;
|
return PP_DPM_DISABLED;
|
||||||
|
|
||||||
if (handle->hwmgr == NULL || handle->hwmgr->hwmgr_func == NULL
|
if (handle->hwmgr->hwmgr_func == NULL)
|
||||||
|| handle->eventmgr == NULL)
|
|
||||||
return PP_DPM_DISABLED;
|
return PP_DPM_DISABLED;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -55,46 +52,32 @@ static int pp_early_init(void *handle)
|
|||||||
int ret;
|
int ret;
|
||||||
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
||||||
|
|
||||||
ret = smum_early_init(pp_handle);
|
ret = hwmgr_early_init(pp_handle);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return -EINVAL;
|
||||||
|
|
||||||
if ((pp_handle->pm_en == 0)
|
if ((pp_handle->pm_en == 0)
|
||||||
|| cgs_is_virtualization_enabled(pp_handle->device))
|
|| cgs_is_virtualization_enabled(pp_handle->device))
|
||||||
return PP_DPM_DISABLED;
|
return PP_DPM_DISABLED;
|
||||||
|
|
||||||
ret = hwmgr_early_init(pp_handle);
|
|
||||||
if (ret) {
|
|
||||||
pp_handle->pm_en = 0;
|
|
||||||
return PP_DPM_DISABLED;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = eventmgr_early_init(pp_handle);
|
|
||||||
if (ret) {
|
|
||||||
kfree(pp_handle->hwmgr);
|
|
||||||
pp_handle->hwmgr = NULL;
|
|
||||||
pp_handle->pm_en = 0;
|
|
||||||
return PP_DPM_DISABLED;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pp_sw_init(void *handle)
|
static int pp_sw_init(void *handle)
|
||||||
{
|
{
|
||||||
struct pp_smumgr *smumgr;
|
struct pp_hwmgr *hwmgr;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
||||||
|
|
||||||
ret = pp_check(pp_handle);
|
ret = pp_check(pp_handle);
|
||||||
|
|
||||||
if (ret == 0 || ret == PP_DPM_DISABLED) {
|
if (ret == 0 || ret == PP_DPM_DISABLED) {
|
||||||
smumgr = pp_handle->smu_mgr;
|
hwmgr = pp_handle->hwmgr;
|
||||||
|
|
||||||
if (smumgr->smumgr_funcs->smu_init == NULL)
|
if (hwmgr->smumgr_funcs->smu_init == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = smumgr->smumgr_funcs->smu_init(smumgr);
|
ret = hwmgr->smumgr_funcs->smu_init(hwmgr);
|
||||||
|
|
||||||
pr_info("amdgpu: powerplay sw initialized\n");
|
pr_info("amdgpu: powerplay sw initialized\n");
|
||||||
}
|
}
|
||||||
@ -103,40 +86,39 @@ static int pp_sw_init(void *handle)
|
|||||||
|
|
||||||
static int pp_sw_fini(void *handle)
|
static int pp_sw_fini(void *handle)
|
||||||
{
|
{
|
||||||
struct pp_smumgr *smumgr;
|
struct pp_hwmgr *hwmgr;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
||||||
|
|
||||||
ret = pp_check(pp_handle);
|
ret = pp_check(pp_handle);
|
||||||
if (ret == 0 || ret == PP_DPM_DISABLED) {
|
if (ret == 0 || ret == PP_DPM_DISABLED) {
|
||||||
smumgr = pp_handle->smu_mgr;
|
hwmgr = pp_handle->hwmgr;
|
||||||
|
|
||||||
if (smumgr->smumgr_funcs->smu_fini == NULL)
|
if (hwmgr->smumgr_funcs->smu_fini == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = smumgr->smumgr_funcs->smu_fini(smumgr);
|
ret = hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pp_hw_init(void *handle)
|
static int pp_hw_init(void *handle)
|
||||||
{
|
{
|
||||||
struct pp_smumgr *smumgr;
|
|
||||||
struct pp_eventmgr *eventmgr;
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
||||||
|
struct pp_hwmgr *hwmgr;
|
||||||
|
|
||||||
ret = pp_check(pp_handle);
|
ret = pp_check(pp_handle);
|
||||||
|
|
||||||
if (ret == 0 || ret == PP_DPM_DISABLED) {
|
if (ret == 0 || ret == PP_DPM_DISABLED) {
|
||||||
smumgr = pp_handle->smu_mgr;
|
hwmgr = pp_handle->hwmgr;
|
||||||
|
|
||||||
if (smumgr->smumgr_funcs->start_smu == NULL)
|
if (hwmgr->smumgr_funcs->start_smu == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if(smumgr->smumgr_funcs->start_smu(smumgr)) {
|
if(hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr)) {
|
||||||
pr_err("smc start failed\n");
|
pr_err("smc start failed\n");
|
||||||
smumgr->smumgr_funcs->smu_fini(smumgr);
|
hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
|
||||||
return -EINVAL;;
|
return -EINVAL;;
|
||||||
}
|
}
|
||||||
if (ret == PP_DPM_DISABLED)
|
if (ret == PP_DPM_DISABLED)
|
||||||
@ -146,38 +128,21 @@ static int pp_hw_init(void *handle)
|
|||||||
ret = hwmgr_hw_init(pp_handle);
|
ret = hwmgr_hw_init(pp_handle);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
eventmgr = pp_handle->eventmgr;
|
|
||||||
if (eventmgr->pp_eventmgr_init == NULL ||
|
|
||||||
eventmgr->pp_eventmgr_init(eventmgr))
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
err:
|
err:
|
||||||
pp_handle->pm_en = 0;
|
pp_handle->pm_en = 0;
|
||||||
kfree(pp_handle->eventmgr);
|
|
||||||
kfree(pp_handle->hwmgr);
|
|
||||||
pp_handle->hwmgr = NULL;
|
|
||||||
pp_handle->eventmgr = NULL;
|
|
||||||
return PP_DPM_DISABLED;
|
return PP_DPM_DISABLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pp_hw_fini(void *handle)
|
static int pp_hw_fini(void *handle)
|
||||||
{
|
{
|
||||||
struct pp_eventmgr *eventmgr;
|
|
||||||
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
ret = pp_check(pp_handle);
|
ret = pp_check(pp_handle);
|
||||||
|
if (ret == 0)
|
||||||
if (ret == 0) {
|
|
||||||
eventmgr = pp_handle->eventmgr;
|
|
||||||
|
|
||||||
if (eventmgr->pp_eventmgr_fini != NULL)
|
|
||||||
eventmgr->pp_eventmgr_fini(eventmgr);
|
|
||||||
|
|
||||||
hwmgr_hw_fini(pp_handle);
|
hwmgr_hw_fini(pp_handle);
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -244,8 +209,6 @@ static int pp_set_powergating_state(void *handle,
|
|||||||
|
|
||||||
static int pp_suspend(void *handle)
|
static int pp_suspend(void *handle)
|
||||||
{
|
{
|
||||||
struct pp_eventmgr *eventmgr;
|
|
||||||
struct pem_event_data event_data = { {0} };
|
|
||||||
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
@ -256,17 +219,12 @@ static int pp_suspend(void *handle)
|
|||||||
else if (ret != 0)
|
else if (ret != 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
eventmgr = pp_handle->eventmgr;
|
return hwmgr_hw_suspend(pp_handle);
|
||||||
pem_handle_event(eventmgr, AMD_PP_EVENT_SUSPEND, &event_data);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pp_resume(void *handle)
|
static int pp_resume(void *handle)
|
||||||
{
|
{
|
||||||
struct pp_eventmgr *eventmgr;
|
struct pp_hwmgr *hwmgr;
|
||||||
struct pem_event_data event_data = { {0} };
|
|
||||||
struct pp_smumgr *smumgr;
|
|
||||||
int ret, ret1;
|
int ret, ret1;
|
||||||
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
||||||
|
|
||||||
@ -275,26 +233,22 @@ static int pp_resume(void *handle)
|
|||||||
if (ret1 != 0 && ret1 != PP_DPM_DISABLED)
|
if (ret1 != 0 && ret1 != PP_DPM_DISABLED)
|
||||||
return ret1;
|
return ret1;
|
||||||
|
|
||||||
smumgr = pp_handle->smu_mgr;
|
hwmgr = pp_handle->hwmgr;
|
||||||
|
|
||||||
if (smumgr->smumgr_funcs->start_smu == NULL)
|
if (hwmgr->smumgr_funcs->start_smu == NULL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
ret = smumgr->smumgr_funcs->start_smu(smumgr);
|
ret = hwmgr->smumgr_funcs->start_smu(pp_handle->hwmgr);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_err("smc start failed\n");
|
pr_err("smc start failed\n");
|
||||||
smumgr->smumgr_funcs->smu_fini(smumgr);
|
hwmgr->smumgr_funcs->smu_fini(pp_handle->hwmgr);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret1 == PP_DPM_DISABLED)
|
if (ret1 == PP_DPM_DISABLED)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
eventmgr = pp_handle->eventmgr;
|
return hwmgr_hw_resume(pp_handle);
|
||||||
|
|
||||||
pem_handle_event(eventmgr, AMD_PP_EVENT_RESUME, &event_data);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct amd_ip_funcs pp_ip_funcs = {
|
const struct amd_ip_funcs pp_ip_funcs = {
|
||||||
@ -324,6 +278,42 @@ static int pp_dpm_fw_loading_complete(void *handle)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
|
||||||
|
enum amd_dpm_forced_level *level)
|
||||||
|
{
|
||||||
|
uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
|
||||||
|
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
|
||||||
|
AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
|
||||||
|
AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
|
||||||
|
|
||||||
|
if (!(hwmgr->dpm_level & profile_mode_mask)) {
|
||||||
|
/* enter umd pstate, save current level, disable gfx cg*/
|
||||||
|
if (*level & profile_mode_mask) {
|
||||||
|
hwmgr->saved_dpm_level = hwmgr->dpm_level;
|
||||||
|
hwmgr->en_umd_pstate = true;
|
||||||
|
cgs_set_clockgating_state(hwmgr->device,
|
||||||
|
AMD_IP_BLOCK_TYPE_GFX,
|
||||||
|
AMD_CG_STATE_UNGATE);
|
||||||
|
cgs_set_powergating_state(hwmgr->device,
|
||||||
|
AMD_IP_BLOCK_TYPE_GFX,
|
||||||
|
AMD_PG_STATE_UNGATE);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
/* exit umd pstate, restore level, enable gfx cg*/
|
||||||
|
if (!(*level & profile_mode_mask)) {
|
||||||
|
if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
|
||||||
|
*level = hwmgr->saved_dpm_level;
|
||||||
|
hwmgr->en_umd_pstate = false;
|
||||||
|
cgs_set_clockgating_state(hwmgr->device,
|
||||||
|
AMD_IP_BLOCK_TYPE_GFX,
|
||||||
|
AMD_CG_STATE_GATE);
|
||||||
|
cgs_set_powergating_state(hwmgr->device,
|
||||||
|
AMD_IP_BLOCK_TYPE_GFX,
|
||||||
|
AMD_PG_STATE_GATE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int pp_dpm_force_performance_level(void *handle,
|
static int pp_dpm_force_performance_level(void *handle,
|
||||||
enum amd_dpm_forced_level level)
|
enum amd_dpm_forced_level level)
|
||||||
{
|
{
|
||||||
@ -338,13 +328,22 @@ static int pp_dpm_force_performance_level(void *handle,
|
|||||||
|
|
||||||
hwmgr = pp_handle->hwmgr;
|
hwmgr = pp_handle->hwmgr;
|
||||||
|
|
||||||
|
if (level == hwmgr->dpm_level)
|
||||||
|
return 0;
|
||||||
|
|
||||||
if (hwmgr->hwmgr_func->force_dpm_level == NULL) {
|
if (hwmgr->hwmgr_func->force_dpm_level == NULL) {
|
||||||
pr_info("%s was not implemented.\n", __func__);
|
pr_info("%s was not implemented.\n", __func__);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&pp_handle->pp_lock);
|
mutex_lock(&pp_handle->pp_lock);
|
||||||
hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
|
pp_dpm_en_umd_pstate(hwmgr, &level);
|
||||||
|
hwmgr->request_dpm_level = level;
|
||||||
|
hwmgr_handle_task(pp_handle, AMD_PP_TASK_READJUST_POWER_STATE, NULL, NULL);
|
||||||
|
ret = hwmgr->hwmgr_func->force_dpm_level(hwmgr, level);
|
||||||
|
if (!ret)
|
||||||
|
hwmgr->dpm_level = hwmgr->request_dpm_level;
|
||||||
|
|
||||||
mutex_unlock(&pp_handle->pp_lock);
|
mutex_unlock(&pp_handle->pp_lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -369,11 +368,12 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
|
|||||||
return level;
|
return level;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pp_dpm_get_sclk(void *handle, bool low)
|
static uint32_t pp_dpm_get_sclk(void *handle, bool low)
|
||||||
{
|
{
|
||||||
struct pp_hwmgr *hwmgr;
|
struct pp_hwmgr *hwmgr;
|
||||||
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
uint32_t clk = 0;
|
||||||
|
|
||||||
ret = pp_check(pp_handle);
|
ret = pp_check(pp_handle);
|
||||||
|
|
||||||
@ -387,16 +387,17 @@ static int pp_dpm_get_sclk(void *handle, bool low)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
mutex_lock(&pp_handle->pp_lock);
|
mutex_lock(&pp_handle->pp_lock);
|
||||||
ret = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
|
clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
|
||||||
mutex_unlock(&pp_handle->pp_lock);
|
mutex_unlock(&pp_handle->pp_lock);
|
||||||
return ret;
|
return clk;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pp_dpm_get_mclk(void *handle, bool low)
|
static uint32_t pp_dpm_get_mclk(void *handle, bool low)
|
||||||
{
|
{
|
||||||
struct pp_hwmgr *hwmgr;
|
struct pp_hwmgr *hwmgr;
|
||||||
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
uint32_t clk = 0;
|
||||||
|
|
||||||
ret = pp_check(pp_handle);
|
ret = pp_check(pp_handle);
|
||||||
|
|
||||||
@ -410,12 +411,12 @@ static int pp_dpm_get_mclk(void *handle, bool low)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
mutex_lock(&pp_handle->pp_lock);
|
mutex_lock(&pp_handle->pp_lock);
|
||||||
ret = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
|
clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
|
||||||
mutex_unlock(&pp_handle->pp_lock);
|
mutex_unlock(&pp_handle->pp_lock);
|
||||||
return ret;
|
return clk;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pp_dpm_powergate_vce(void *handle, bool gate)
|
static void pp_dpm_powergate_vce(void *handle, bool gate)
|
||||||
{
|
{
|
||||||
struct pp_hwmgr *hwmgr;
|
struct pp_hwmgr *hwmgr;
|
||||||
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
||||||
@ -424,21 +425,20 @@ static int pp_dpm_powergate_vce(void *handle, bool gate)
|
|||||||
ret = pp_check(pp_handle);
|
ret = pp_check(pp_handle);
|
||||||
|
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
return ret;
|
return;
|
||||||
|
|
||||||
hwmgr = pp_handle->hwmgr;
|
hwmgr = pp_handle->hwmgr;
|
||||||
|
|
||||||
if (hwmgr->hwmgr_func->powergate_vce == NULL) {
|
if (hwmgr->hwmgr_func->powergate_vce == NULL) {
|
||||||
pr_info("%s was not implemented.\n", __func__);
|
pr_info("%s was not implemented.\n", __func__);
|
||||||
return 0;
|
return;
|
||||||
}
|
}
|
||||||
mutex_lock(&pp_handle->pp_lock);
|
mutex_lock(&pp_handle->pp_lock);
|
||||||
ret = hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
|
hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
|
||||||
mutex_unlock(&pp_handle->pp_lock);
|
mutex_unlock(&pp_handle->pp_lock);
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pp_dpm_powergate_uvd(void *handle, bool gate)
|
static void pp_dpm_powergate_uvd(void *handle, bool gate)
|
||||||
{
|
{
|
||||||
struct pp_hwmgr *hwmgr;
|
struct pp_hwmgr *hwmgr;
|
||||||
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
||||||
@ -447,74 +447,34 @@ static int pp_dpm_powergate_uvd(void *handle, bool gate)
|
|||||||
ret = pp_check(pp_handle);
|
ret = pp_check(pp_handle);
|
||||||
|
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
return ret;
|
return;
|
||||||
|
|
||||||
hwmgr = pp_handle->hwmgr;
|
hwmgr = pp_handle->hwmgr;
|
||||||
|
|
||||||
if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
|
if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
|
||||||
pr_info("%s was not implemented.\n", __func__);
|
pr_info("%s was not implemented.\n", __func__);
|
||||||
return 0;
|
return;
|
||||||
}
|
}
|
||||||
mutex_lock(&pp_handle->pp_lock);
|
mutex_lock(&pp_handle->pp_lock);
|
||||||
ret = hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
|
hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
|
||||||
mutex_unlock(&pp_handle->pp_lock);
|
mutex_unlock(&pp_handle->pp_lock);
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum PP_StateUILabel power_state_convert(enum amd_pm_state_type state)
|
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
|
||||||
{
|
|
||||||
switch (state) {
|
|
||||||
case POWER_STATE_TYPE_BATTERY:
|
|
||||||
return PP_StateUILabel_Battery;
|
|
||||||
case POWER_STATE_TYPE_BALANCED:
|
|
||||||
return PP_StateUILabel_Balanced;
|
|
||||||
case POWER_STATE_TYPE_PERFORMANCE:
|
|
||||||
return PP_StateUILabel_Performance;
|
|
||||||
default:
|
|
||||||
return PP_StateUILabel_None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id,
|
|
||||||
void *input, void *output)
|
void *input, void *output)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct pem_event_data data = { {0} };
|
|
||||||
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
||||||
|
|
||||||
ret = pp_check(pp_handle);
|
ret = pp_check(pp_handle);
|
||||||
|
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
mutex_lock(&pp_handle->pp_lock);
|
mutex_lock(&pp_handle->pp_lock);
|
||||||
switch (event_id) {
|
ret = hwmgr_handle_task(pp_handle, task_id, input, output);
|
||||||
case AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE:
|
|
||||||
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
|
|
||||||
break;
|
|
||||||
case AMD_PP_EVENT_ENABLE_USER_STATE:
|
|
||||||
{
|
|
||||||
enum amd_pm_state_type ps;
|
|
||||||
|
|
||||||
if (input == NULL) {
|
|
||||||
ret = -EINVAL;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
ps = *(unsigned long *)input;
|
|
||||||
|
|
||||||
data.requested_ui_label = power_state_convert(ps);
|
|
||||||
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
case AMD_PP_EVENT_COMPLETE_INIT:
|
|
||||||
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
|
|
||||||
break;
|
|
||||||
case AMD_PP_EVENT_READJUST_POWER_STATE:
|
|
||||||
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
mutex_unlock(&pp_handle->pp_lock);
|
mutex_unlock(&pp_handle->pp_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -562,7 +522,7 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
|
|||||||
return pm_type;
|
return pm_type;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
|
static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
|
||||||
{
|
{
|
||||||
struct pp_hwmgr *hwmgr;
|
struct pp_hwmgr *hwmgr;
|
||||||
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
||||||
@ -571,25 +531,25 @@ static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
|
|||||||
ret = pp_check(pp_handle);
|
ret = pp_check(pp_handle);
|
||||||
|
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
return ret;
|
return;
|
||||||
|
|
||||||
hwmgr = pp_handle->hwmgr;
|
hwmgr = pp_handle->hwmgr;
|
||||||
|
|
||||||
if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
|
if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
|
||||||
pr_info("%s was not implemented.\n", __func__);
|
pr_info("%s was not implemented.\n", __func__);
|
||||||
return 0;
|
return;
|
||||||
}
|
}
|
||||||
mutex_lock(&pp_handle->pp_lock);
|
mutex_lock(&pp_handle->pp_lock);
|
||||||
ret = hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
|
hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
|
||||||
mutex_unlock(&pp_handle->pp_lock);
|
mutex_unlock(&pp_handle->pp_lock);
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pp_dpm_get_fan_control_mode(void *handle)
|
static uint32_t pp_dpm_get_fan_control_mode(void *handle)
|
||||||
{
|
{
|
||||||
struct pp_hwmgr *hwmgr;
|
struct pp_hwmgr *hwmgr;
|
||||||
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
struct pp_instance *pp_handle = (struct pp_instance *)handle;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
uint32_t mode = 0;
|
||||||
|
|
||||||
ret = pp_check(pp_handle);
|
ret = pp_check(pp_handle);
|
||||||
|
|
||||||
@ -603,9 +563,9 @@ static int pp_dpm_get_fan_control_mode(void *handle)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
mutex_lock(&pp_handle->pp_lock);
|
mutex_lock(&pp_handle->pp_lock);
|
||||||
ret = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
|
mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
|
||||||
mutex_unlock(&pp_handle->pp_lock);
|
mutex_unlock(&pp_handle->pp_lock);
|
||||||
return ret;
|
return mode;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
|
static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
|
||||||
@ -1128,7 +1088,7 @@ static int pp_dpm_switch_power_profile(void *handle,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
const struct amd_powerplay_funcs pp_dpm_funcs = {
|
const struct amd_pm_funcs pp_dpm_funcs = {
|
||||||
.get_temperature = pp_dpm_get_temperature,
|
.get_temperature = pp_dpm_get_temperature,
|
||||||
.load_firmware = pp_dpm_load_fw,
|
.load_firmware = pp_dpm_load_fw,
|
||||||
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
|
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
|
||||||
@ -1189,15 +1149,9 @@ int amd_powerplay_destroy(void *handle)
|
|||||||
{
|
{
|
||||||
struct pp_instance *instance = (struct pp_instance *)handle;
|
struct pp_instance *instance = (struct pp_instance *)handle;
|
||||||
|
|
||||||
if (instance->pm_en) {
|
kfree(instance->hwmgr);
|
||||||
kfree(instance->eventmgr);
|
instance->hwmgr = NULL;
|
||||||
kfree(instance->hwmgr);
|
|
||||||
instance->hwmgr = NULL;
|
|
||||||
instance->eventmgr = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
kfree(instance->smu_mgr);
|
|
||||||
instance->smu_mgr = NULL;
|
|
||||||
kfree(instance);
|
kfree(instance);
|
||||||
instance = NULL;
|
instance = NULL;
|
||||||
return 0;
|
return 0;
|
||||||
@ -1206,18 +1160,16 @@ int amd_powerplay_destroy(void *handle)
|
|||||||
int amd_powerplay_reset(void *handle)
|
int amd_powerplay_reset(void *handle)
|
||||||
{
|
{
|
||||||
struct pp_instance *instance = (struct pp_instance *)handle;
|
struct pp_instance *instance = (struct pp_instance *)handle;
|
||||||
struct pp_eventmgr *eventmgr;
|
|
||||||
struct pem_event_data event_data = { {0} };
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
if (cgs_is_virtualization_enabled(instance->smu_mgr->device))
|
if (cgs_is_virtualization_enabled(instance->hwmgr->device))
|
||||||
return PP_DPM_DISABLED;
|
return PP_DPM_DISABLED;
|
||||||
|
|
||||||
ret = pp_check(instance);
|
ret = pp_check(instance);
|
||||||
if (ret != 0)
|
if (ret != 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
ret = pp_hw_fini(handle);
|
ret = pp_hw_fini(instance);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -1225,16 +1177,7 @@ int amd_powerplay_reset(void *handle)
|
|||||||
if (ret)
|
if (ret)
|
||||||
return PP_DPM_DISABLED;
|
return PP_DPM_DISABLED;
|
||||||
|
|
||||||
eventmgr = instance->eventmgr;
|
return hwmgr_handle_task(instance, AMD_PP_TASK_COMPLETE_INIT, NULL, NULL);
|
||||||
|
|
||||||
if (eventmgr->pp_eventmgr_init == NULL)
|
|
||||||
return PP_DPM_DISABLED;
|
|
||||||
|
|
||||||
ret = eventmgr->pp_eventmgr_init(eventmgr);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
return pem_handle_event(eventmgr, AMD_PP_EVENT_COMPLETE_INIT, &event_data);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* export this function to DAL */
|
/* export this function to DAL */
|
||||||
|
@ -1,11 +0,0 @@
|
|||||||
#
|
|
||||||
# Makefile for the 'event manager' sub-component of powerplay.
|
|
||||||
# It provides the event management services for the driver.
|
|
||||||
|
|
||||||
EVENT_MGR = eventmgr.o eventinit.o eventmanagement.o \
|
|
||||||
eventactionchains.o eventsubchains.o eventtasks.o psm.o
|
|
||||||
|
|
||||||
AMD_PP_EVENT = $(addprefix $(AMD_PP_PATH)/eventmgr/,$(EVENT_MGR))
|
|
||||||
|
|
||||||
AMD_POWERPLAY_FILES += $(AMD_PP_EVENT)
|
|
||||||
|
|
@ -1,291 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in
|
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
||||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
||||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
#include "eventmgr.h"
|
|
||||||
#include "eventactionchains.h"
|
|
||||||
#include "eventsubchains.h"
|
|
||||||
|
|
||||||
static const pem_event_action * const initialize_event[] = {
|
|
||||||
block_adjust_power_state_tasks,
|
|
||||||
power_budget_tasks,
|
|
||||||
system_config_tasks,
|
|
||||||
setup_asic_tasks,
|
|
||||||
enable_dynamic_state_management_tasks,
|
|
||||||
get_2d_performance_state_tasks,
|
|
||||||
set_performance_state_tasks,
|
|
||||||
initialize_thermal_controller_tasks,
|
|
||||||
conditionally_force_3d_performance_state_tasks,
|
|
||||||
process_vbios_eventinfo_tasks,
|
|
||||||
broadcast_power_policy_tasks,
|
|
||||||
NULL
|
|
||||||
};
|
|
||||||
|
|
||||||
const struct action_chain initialize_action_chain = {
|
|
||||||
"Initialize",
|
|
||||||
initialize_event
|
|
||||||
};
|
|
||||||
|
|
||||||
static const pem_event_action * const uninitialize_event[] = {
|
|
||||||
ungate_all_display_phys_tasks,
|
|
||||||
uninitialize_display_phy_access_tasks,
|
|
||||||
disable_gfx_voltage_island_power_gating_tasks,
|
|
||||||
disable_gfx_clock_gating_tasks,
|
|
||||||
uninitialize_thermal_controller_tasks,
|
|
||||||
set_boot_state_tasks,
|
|
||||||
adjust_power_state_tasks,
|
|
||||||
disable_dynamic_state_management_tasks,
|
|
||||||
disable_clock_power_gatings_tasks,
|
|
||||||
cleanup_asic_tasks,
|
|
||||||
prepare_for_pnp_stop_tasks,
|
|
||||||
NULL
|
|
||||||
};
|
|
||||||
|
|
||||||
const struct action_chain uninitialize_action_chain = {
|
|
||||||
"Uninitialize",
|
|
||||||
uninitialize_event
|
|
||||||
};
|
|
||||||
|
|
||||||
static const pem_event_action * const power_source_change_event_pp_enabled[] = {
|
|
||||||
set_power_source_tasks,
|
|
||||||
set_power_saving_state_tasks,
|
|
||||||
adjust_power_state_tasks,
|
|
||||||
enable_disable_fps_tasks,
|
|
||||||
set_nbmcu_state_tasks,
|
|
||||||
broadcast_power_policy_tasks,
|
|
||||||
NULL
|
|
||||||
};
|
|
||||||
|
|
||||||
const struct action_chain power_source_change_action_chain_pp_enabled = {
|
|
||||||
"Power source change - PowerPlay enabled",
|
|
||||||
power_source_change_event_pp_enabled
|
|
||||||
};
|
|
||||||
|
|
||||||
static const pem_event_action * const power_source_change_event_pp_disabled[] = {
|
|
||||||
set_power_source_tasks,
|
|
||||||
set_nbmcu_state_tasks,
|
|
||||||
NULL
|
|
||||||
};
|
|
||||||
|
|
||||||
const struct action_chain power_source_changes_action_chain_pp_disabled = {
|
|
||||||
"Power source change - PowerPlay disabled",
|
|
||||||
power_source_change_event_pp_disabled
|
|
||||||
};
|
|
||||||
|
|
||||||
static const pem_event_action * const power_source_change_event_hardware_dc[] = {
|
|
||||||
set_power_source_tasks,
|
|
||||||
set_power_saving_state_tasks,
|
|
||||||
adjust_power_state_tasks,
|
|
||||||
enable_disable_fps_tasks,
|
|
||||||
reset_hardware_dc_notification_tasks,
|
|
||||||
set_nbmcu_state_tasks,
|
|
||||||
broadcast_power_policy_tasks,
|
|
||||||
NULL
|
|
||||||
};
|
|
||||||
|
|
||||||
const struct action_chain power_source_change_action_chain_hardware_dc = {
|
|
||||||
"Power source change - with Hardware DC switching",
|
|
||||||
power_source_change_event_hardware_dc
|
|
||||||
};
|
|
||||||
|
|
||||||
static const pem_event_action * const suspend_event[] = {
|
|
||||||
reset_display_phy_access_tasks,
|
|
||||||
unregister_interrupt_tasks,
|
|
||||||
disable_gfx_voltage_island_power_gating_tasks,
|
|
||||||
disable_gfx_clock_gating_tasks,
|
|
||||||
notify_smu_suspend_tasks,
|
|
||||||
disable_smc_firmware_ctf_tasks,
|
|
||||||
set_boot_state_tasks,
|
|
||||||
adjust_power_state_tasks,
|
|
||||||
disable_fps_tasks,
|
|
||||||
vari_bright_suspend_tasks,
|
|
||||||
reset_fan_speed_to_default_tasks,
|
|
||||||
power_down_asic_tasks,
|
|
||||||
disable_stutter_mode_tasks,
|
|
||||||
set_connected_standby_tasks,
|
|
||||||
block_hw_access_tasks,
|
|
||||||
NULL
|
|
||||||
};
|
|
||||||
|
|
||||||
const struct action_chain suspend_action_chain = {
|
|
||||||
"Suspend",
|
|
||||||
suspend_event
|
|
||||||
};
|
|
||||||
|
|
||||||
static const pem_event_action * const resume_event[] = {
|
|
||||||
unblock_hw_access_tasks,
|
|
||||||
resume_connected_standby_tasks,
|
|
||||||
notify_smu_resume_tasks,
|
|
||||||
reset_display_configCounter_tasks,
|
|
||||||
update_dal_configuration_tasks,
|
|
||||||
vari_bright_resume_tasks,
|
|
||||||
setup_asic_tasks,
|
|
||||||
enable_stutter_mode_tasks, /*must do this in boot state and before SMC is started */
|
|
||||||
enable_dynamic_state_management_tasks,
|
|
||||||
enable_disable_bapm_tasks,
|
|
||||||
initialize_thermal_controller_tasks,
|
|
||||||
get_2d_performance_state_tasks,
|
|
||||||
set_performance_state_tasks,
|
|
||||||
adjust_power_state_tasks,
|
|
||||||
enable_disable_fps_tasks,
|
|
||||||
notify_hw_power_source_tasks,
|
|
||||||
process_vbios_event_info_tasks,
|
|
||||||
enable_gfx_clock_gating_tasks,
|
|
||||||
enable_gfx_voltage_island_power_gating_tasks,
|
|
||||||
reset_clock_gating_tasks,
|
|
||||||
notify_smu_vpu_recovery_end_tasks,
|
|
||||||
disable_vpu_cap_tasks,
|
|
||||||
execute_escape_sequence_tasks,
|
|
||||||
NULL
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
const struct action_chain resume_action_chain = {
|
|
||||||
"resume",
|
|
||||||
resume_event
|
|
||||||
};
|
|
||||||
|
|
||||||
static const pem_event_action * const complete_init_event[] = {
|
|
||||||
unblock_adjust_power_state_tasks,
|
|
||||||
adjust_power_state_tasks,
|
|
||||||
enable_gfx_clock_gating_tasks,
|
|
||||||
enable_gfx_voltage_island_power_gating_tasks,
|
|
||||||
notify_power_state_change_tasks,
|
|
||||||
NULL
|
|
||||||
};
|
|
||||||
|
|
||||||
const struct action_chain complete_init_action_chain = {
|
|
||||||
"complete init",
|
|
||||||
complete_init_event
|
|
||||||
};
|
|
||||||
|
|
||||||
static const pem_event_action * const enable_gfx_clock_gating_event[] = {
|
|
||||||
enable_gfx_clock_gating_tasks,
|
|
||||||
NULL
|
|
||||||
};
|
|
||||||
|
|
||||||
const struct action_chain enable_gfx_clock_gating_action_chain = {
|
|
||||||
"enable gfx clock gate",
|
|
||||||
enable_gfx_clock_gating_event
|
|
||||||
};
|
|
||||||
|
|
||||||
static const pem_event_action * const disable_gfx_clock_gating_event[] = {
|
|
||||||
disable_gfx_clock_gating_tasks,
|
|
||||||
NULL
|
|
||||||
};
|
|
||||||
|
|
||||||
const struct action_chain disable_gfx_clock_gating_action_chain = {
|
|
||||||
"disable gfx clock gate",
|
|
||||||
disable_gfx_clock_gating_event
|
|
||||||
};
|
|
||||||
|
|
||||||
static const pem_event_action * const enable_cgpg_event[] = {
|
|
||||||
enable_cgpg_tasks,
|
|
||||||
NULL
|
|
||||||
};
|
|
||||||
|
|
||||||
const struct action_chain enable_cgpg_action_chain = {
|
|
||||||
"eable cg pg",
|
|
||||||
enable_cgpg_event
|
|
||||||
};
|
|
||||||
|
|
||||||
static const pem_event_action * const disable_cgpg_event[] = {
|
|
||||||
disable_cgpg_tasks,
|
|
||||||
NULL
|
|
||||||
};
|
|
||||||
|
|
||||||
const struct action_chain disable_cgpg_action_chain = {
|
|
||||||
"disable cg pg",
|
|
||||||
disable_cgpg_event
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
/* Enable user _2d performance and activate */
|
|
||||||
|
|
||||||
static const pem_event_action * const enable_user_state_event[] = {
|
|
||||||
create_new_user_performance_state_tasks,
|
|
||||||
adjust_power_state_tasks,
|
|
||||||
NULL
|
|
||||||
};
|
|
||||||
|
|
||||||
const struct action_chain enable_user_state_action_chain = {
|
|
||||||
"Enable user state",
|
|
||||||
enable_user_state_event
|
|
||||||
};
|
|
||||||
|
|
||||||
static const pem_event_action * const enable_user_2d_performance_event[] = {
|
|
||||||
enable_user_2d_performance_tasks,
|
|
||||||
add_user_2d_performance_state_tasks,
|
|
||||||
set_performance_state_tasks,
|
|
||||||
adjust_power_state_tasks,
|
|
||||||
delete_user_2d_performance_state_tasks,
|
|
||||||
NULL
|
|
||||||
};
|
|
||||||
|
|
||||||
const struct action_chain enable_user_2d_performance_action_chain = {
|
|
||||||
"enable_user_2d_performance_event_activate",
|
|
||||||
enable_user_2d_performance_event
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
static const pem_event_action * const disable_user_2d_performance_event[] = {
|
|
||||||
disable_user_2d_performance_tasks,
|
|
||||||
delete_user_2d_performance_state_tasks,
|
|
||||||
NULL
|
|
||||||
};
|
|
||||||
|
|
||||||
const struct action_chain disable_user_2d_performance_action_chain = {
|
|
||||||
"disable_user_2d_performance_event",
|
|
||||||
disable_user_2d_performance_event
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
static const pem_event_action * const display_config_change_event[] = {
|
|
||||||
/* countDisplayConfigurationChangeEventTasks, */
|
|
||||||
unblock_adjust_power_state_tasks,
|
|
||||||
set_cpu_power_state,
|
|
||||||
notify_hw_power_source_tasks,
|
|
||||||
get_2d_performance_state_tasks,
|
|
||||||
set_performance_state_tasks,
|
|
||||||
/* updateDALConfigurationTasks,
|
|
||||||
variBrightDisplayConfigurationChangeTasks, */
|
|
||||||
adjust_power_state_tasks,
|
|
||||||
/*enableDisableFPSTasks,
|
|
||||||
setNBMCUStateTasks,
|
|
||||||
notifyPCIEDeviceReadyTasks,*/
|
|
||||||
NULL
|
|
||||||
};
|
|
||||||
|
|
||||||
const struct action_chain display_config_change_action_chain = {
|
|
||||||
"Display configuration change",
|
|
||||||
display_config_change_event
|
|
||||||
};
|
|
||||||
|
|
||||||
static const pem_event_action * const readjust_power_state_event[] = {
|
|
||||||
adjust_power_state_tasks,
|
|
||||||
NULL
|
|
||||||
};
|
|
||||||
|
|
||||||
const struct action_chain readjust_power_state_action_chain = {
|
|
||||||
"re-adjust power state",
|
|
||||||
readjust_power_state_event
|
|
||||||
};
|
|
||||||
|
|
@ -1,62 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in
|
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
||||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
||||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
#ifndef _EVENT_ACTION_CHAINS_H_
|
|
||||||
#define _EVENT_ACTION_CHAINS_H_
|
|
||||||
#include "eventmgr.h"
|
|
||||||
|
|
||||||
extern const struct action_chain initialize_action_chain;
|
|
||||||
|
|
||||||
extern const struct action_chain uninitialize_action_chain;
|
|
||||||
|
|
||||||
extern const struct action_chain power_source_change_action_chain_pp_enabled;
|
|
||||||
|
|
||||||
extern const struct action_chain power_source_changes_action_chain_pp_disabled;
|
|
||||||
|
|
||||||
extern const struct action_chain power_source_change_action_chain_hardware_dc;
|
|
||||||
|
|
||||||
extern const struct action_chain suspend_action_chain;
|
|
||||||
|
|
||||||
extern const struct action_chain resume_action_chain;
|
|
||||||
|
|
||||||
extern const struct action_chain complete_init_action_chain;
|
|
||||||
|
|
||||||
extern const struct action_chain enable_gfx_clock_gating_action_chain;
|
|
||||||
|
|
||||||
extern const struct action_chain disable_gfx_clock_gating_action_chain;
|
|
||||||
|
|
||||||
extern const struct action_chain enable_cgpg_action_chain;
|
|
||||||
|
|
||||||
extern const struct action_chain disable_cgpg_action_chain;
|
|
||||||
|
|
||||||
extern const struct action_chain enable_user_2d_performance_action_chain;
|
|
||||||
|
|
||||||
extern const struct action_chain disable_user_2d_performance_action_chain;
|
|
||||||
|
|
||||||
extern const struct action_chain enable_user_state_action_chain;
|
|
||||||
|
|
||||||
extern const struct action_chain readjust_power_state_action_chain;
|
|
||||||
|
|
||||||
extern const struct action_chain display_config_change_action_chain;
|
|
||||||
|
|
||||||
#endif /*_EVENT_ACTION_CHAINS_H_*/
|
|
||||||
|
|
@ -1,195 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in
|
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
||||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
||||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
#include "eventmgr.h"
|
|
||||||
#include "eventinit.h"
|
|
||||||
#include "ppinterrupt.h"
|
|
||||||
#include "hardwaremanager.h"
|
|
||||||
|
|
||||||
void pem_init_feature_info(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
|
|
||||||
/* PowerPlay info */
|
|
||||||
eventmgr->ui_state_info[PP_PowerSource_AC].default_ui_lable =
|
|
||||||
PP_StateUILabel_Performance;
|
|
||||||
|
|
||||||
eventmgr->ui_state_info[PP_PowerSource_AC].current_ui_label =
|
|
||||||
PP_StateUILabel_Performance;
|
|
||||||
|
|
||||||
eventmgr->ui_state_info[PP_PowerSource_DC].default_ui_lable =
|
|
||||||
PP_StateUILabel_Battery;
|
|
||||||
|
|
||||||
eventmgr->ui_state_info[PP_PowerSource_DC].current_ui_label =
|
|
||||||
PP_StateUILabel_Battery;
|
|
||||||
|
|
||||||
if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_PowerPlaySupport)) {
|
|
||||||
eventmgr->features[PP_Feature_PowerPlay].supported = true;
|
|
||||||
eventmgr->features[PP_Feature_PowerPlay].version = PEM_CURRENT_POWERPLAY_FEATURE_VERSION;
|
|
||||||
eventmgr->features[PP_Feature_PowerPlay].enabled_default = true;
|
|
||||||
eventmgr->features[PP_Feature_PowerPlay].enabled = true;
|
|
||||||
} else {
|
|
||||||
eventmgr->features[PP_Feature_PowerPlay].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_PowerPlay].enabled = false;
|
|
||||||
eventmgr->features[PP_Feature_PowerPlay].enabled_default = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
eventmgr->features[PP_Feature_Force3DClock].supported = true;
|
|
||||||
eventmgr->features[PP_Feature_Force3DClock].enabled = false;
|
|
||||||
eventmgr->features[PP_Feature_Force3DClock].enabled_default = false;
|
|
||||||
eventmgr->features[PP_Feature_Force3DClock].version = 1;
|
|
||||||
|
|
||||||
/* over drive*/
|
|
||||||
eventmgr->features[PP_Feature_User2DPerformance].version = 4;
|
|
||||||
eventmgr->features[PP_Feature_User3DPerformance].version = 4;
|
|
||||||
eventmgr->features[PP_Feature_OverdriveTest].version = 4;
|
|
||||||
|
|
||||||
eventmgr->features[PP_Feature_OverDrive].version = 4;
|
|
||||||
eventmgr->features[PP_Feature_OverDrive].enabled = false;
|
|
||||||
eventmgr->features[PP_Feature_OverDrive].enabled_default = false;
|
|
||||||
|
|
||||||
eventmgr->features[PP_Feature_User2DPerformance].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_User2DPerformance].enabled = false;
|
|
||||||
eventmgr->features[PP_Feature_User2DPerformance].enabled_default = false;
|
|
||||||
|
|
||||||
eventmgr->features[PP_Feature_User3DPerformance].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_User3DPerformance].enabled = false;
|
|
||||||
eventmgr->features[PP_Feature_User3DPerformance].enabled_default = false;
|
|
||||||
|
|
||||||
eventmgr->features[PP_Feature_OverdriveTest].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_OverdriveTest].enabled = false;
|
|
||||||
eventmgr->features[PP_Feature_OverdriveTest].enabled_default = false;
|
|
||||||
|
|
||||||
eventmgr->features[PP_Feature_OverDrive].supported = false;
|
|
||||||
|
|
||||||
eventmgr->features[PP_Feature_PowerBudgetWaiver].enabled_default = false;
|
|
||||||
eventmgr->features[PP_Feature_PowerBudgetWaiver].version = 1;
|
|
||||||
eventmgr->features[PP_Feature_PowerBudgetWaiver].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_PowerBudgetWaiver].enabled = false;
|
|
||||||
|
|
||||||
/* Multi UVD States support */
|
|
||||||
eventmgr->features[PP_Feature_MultiUVDState].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_MultiUVDState].enabled = false;
|
|
||||||
eventmgr->features[PP_Feature_MultiUVDState].enabled_default = false;
|
|
||||||
|
|
||||||
/* Dynamic UVD States support */
|
|
||||||
eventmgr->features[PP_Feature_DynamicUVDState].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_DynamicUVDState].enabled = false;
|
|
||||||
eventmgr->features[PP_Feature_DynamicUVDState].enabled_default = false;
|
|
||||||
|
|
||||||
/* VCE DPM support */
|
|
||||||
eventmgr->features[PP_Feature_VCEDPM].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_VCEDPM].enabled = false;
|
|
||||||
eventmgr->features[PP_Feature_VCEDPM].enabled_default = false;
|
|
||||||
|
|
||||||
/* ACP PowerGating support */
|
|
||||||
eventmgr->features[PP_Feature_ACP_POWERGATING].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_ACP_POWERGATING].enabled = false;
|
|
||||||
eventmgr->features[PP_Feature_ACP_POWERGATING].enabled_default = false;
|
|
||||||
|
|
||||||
/* PPM support */
|
|
||||||
eventmgr->features[PP_Feature_PPM].version = 1;
|
|
||||||
eventmgr->features[PP_Feature_PPM].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_PPM].enabled = false;
|
|
||||||
|
|
||||||
/* FFC support (enables fan and temp settings, Gemini needs temp settings) */
|
|
||||||
if (phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_ODFuzzyFanControlSupport) ||
|
|
||||||
phm_cap_enabled(eventmgr->platform_descriptor->platformCaps, PHM_PlatformCaps_GeminiRegulatorFanControlSupport)) {
|
|
||||||
eventmgr->features[PP_Feature_FFC].version = 1;
|
|
||||||
eventmgr->features[PP_Feature_FFC].supported = true;
|
|
||||||
eventmgr->features[PP_Feature_FFC].enabled = true;
|
|
||||||
eventmgr->features[PP_Feature_FFC].enabled_default = true;
|
|
||||||
} else {
|
|
||||||
eventmgr->features[PP_Feature_FFC].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_FFC].enabled = false;
|
|
||||||
eventmgr->features[PP_Feature_FFC].enabled_default = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
eventmgr->features[PP_Feature_VariBright].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_VariBright].enabled = false;
|
|
||||||
eventmgr->features[PP_Feature_VariBright].enabled_default = false;
|
|
||||||
|
|
||||||
eventmgr->features[PP_Feature_BACO].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_BACO].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_BACO].enabled_default = false;
|
|
||||||
|
|
||||||
/* PowerDown feature support */
|
|
||||||
eventmgr->features[PP_Feature_PowerDown].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_PowerDown].enabled = false;
|
|
||||||
eventmgr->features[PP_Feature_PowerDown].enabled_default = false;
|
|
||||||
|
|
||||||
eventmgr->features[PP_Feature_FPS].version = 1;
|
|
||||||
eventmgr->features[PP_Feature_FPS].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_FPS].enabled_default = false;
|
|
||||||
eventmgr->features[PP_Feature_FPS].enabled = false;
|
|
||||||
|
|
||||||
eventmgr->features[PP_Feature_ViPG].version = 1;
|
|
||||||
eventmgr->features[PP_Feature_ViPG].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_ViPG].enabled_default = false;
|
|
||||||
eventmgr->features[PP_Feature_ViPG].enabled = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int thermal_interrupt_callback(void *private_data,
|
|
||||||
unsigned src_id, const uint32_t *iv_entry)
|
|
||||||
{
|
|
||||||
/* TO DO hanle PEM_Event_ThermalNotification (struct pp_eventmgr *)private_data*/
|
|
||||||
pr_info("current thermal is out of range \n");
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int pem_register_interrupts(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
int result = 0;
|
|
||||||
struct pp_interrupt_registration_info info;
|
|
||||||
|
|
||||||
info.call_back = thermal_interrupt_callback;
|
|
||||||
info.context = eventmgr;
|
|
||||||
|
|
||||||
result = phm_register_thermal_interrupt(eventmgr->hwmgr, &info);
|
|
||||||
|
|
||||||
/* TODO:
|
|
||||||
* 2. Register CTF event interrupt
|
|
||||||
* 3. Register for vbios events interrupt
|
|
||||||
* 4. Register External Throttle Interrupt
|
|
||||||
* 5. Register Smc To Host Interrupt
|
|
||||||
* */
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
int pem_unregister_interrupts(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void pem_uninit_featureInfo(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
eventmgr->features[PP_Feature_MultiUVDState].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_VariBright].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_PowerBudgetWaiver].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_OverDrive].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_OverdriveTest].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_User3DPerformance].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_User2DPerformance].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_PowerPlay].supported = false;
|
|
||||||
eventmgr->features[PP_Feature_Force3DClock].supported = false;
|
|
||||||
}
|
|
@ -1,215 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in
|
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
||||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
||||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
#include "eventmanagement.h"
|
|
||||||
#include "eventmgr.h"
|
|
||||||
#include "eventactionchains.h"
|
|
||||||
|
|
||||||
int pem_init_event_action_chains(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < AMD_PP_EVENT_MAX; i++)
|
|
||||||
eventmgr->event_chain[i] = NULL;
|
|
||||||
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_SUSPEND] = pem_get_suspend_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_INITIALIZE] = pem_get_initialize_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_UNINITIALIZE] = pem_get_uninitialize_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_POWER_SOURCE_CHANGE] = pem_get_power_source_change_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_HIBERNATE] = pem_get_hibernate_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_RESUME] = pem_get_resume_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_THERMAL_NOTIFICATION] = pem_get_thermal_notification_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_VBIOS_NOTIFICATION] = pem_get_vbios_notification_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_ENTER_THERMAL_STATE] = pem_get_enter_thermal_state_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_EXIT_THERMAL_STATE] = pem_get_exit_thermal_state_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_ENABLE_POWER_PLAY] = pem_get_enable_powerplay_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_DISABLE_POWER_PLAY] = pem_get_disable_powerplay_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_ENABLE_OVER_DRIVE_TEST] = pem_get_enable_overdrive_test_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_DISABLE_OVER_DRIVE_TEST] = pem_get_disable_overdrive_test_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_ENABLE_GFX_CLOCK_GATING] = pem_get_enable_gfx_clock_gating_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_DISABLE_GFX_CLOCK_GATING] = pem_get_disable_gfx_clock_gating_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_ENABLE_CGPG] = pem_get_enable_cgpg_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_DISABLE_CGPG] = pem_get_disable_cgpg_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_COMPLETE_INIT] = pem_get_complete_init_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_SCREEN_ON] = pem_get_screen_on_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_SCREEN_OFF] = pem_get_screen_off_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_PRE_SUSPEND] = pem_get_pre_suspend_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_PRE_RESUME] = pem_get_pre_resume_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_ENABLE_USER_STATE] = pem_enable_user_state_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_READJUST_POWER_STATE] = pem_readjust_power_state_action_chain(eventmgr);
|
|
||||||
eventmgr->event_chain[AMD_PP_EVENT_DISPLAY_CONFIG_CHANGE] = pem_display_config_change_action_chain(eventmgr);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
int pem_excute_event_chain(struct pp_eventmgr *eventmgr, const struct action_chain *event_chain, struct pem_event_data *event_data)
|
|
||||||
{
|
|
||||||
const pem_event_action * const *paction_chain;
|
|
||||||
const pem_event_action *psub_chain;
|
|
||||||
int tmp_result = 0;
|
|
||||||
int result = 0;
|
|
||||||
|
|
||||||
if (eventmgr == NULL || event_chain == NULL || event_data == NULL)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
for (paction_chain = event_chain->action_chain; NULL != *paction_chain; paction_chain++) {
|
|
||||||
if (0 != result)
|
|
||||||
return result;
|
|
||||||
|
|
||||||
for (psub_chain = *paction_chain; NULL != *psub_chain; psub_chain++) {
|
|
||||||
tmp_result = (*psub_chain)(eventmgr, event_data);
|
|
||||||
if (0 == result)
|
|
||||||
result = tmp_result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_get_suspend_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return &suspend_action_chain;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_get_initialize_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return &initialize_action_chain;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_get_uninitialize_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return &uninitialize_action_chain;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_get_power_source_change_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return &power_source_change_action_chain_pp_enabled; /* other case base on feature info*/
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_get_resume_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return &resume_action_chain;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_get_hibernate_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_get_thermal_notification_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_get_vbios_notification_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_get_enter_thermal_state_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_get_exit_thermal_state_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_get_enable_powerplay_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_get_disable_powerplay_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_get_enable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_get_disable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_get_enable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return &enable_gfx_clock_gating_action_chain;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_get_disable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return &disable_gfx_clock_gating_action_chain;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_get_enable_cgpg_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return &enable_cgpg_action_chain;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_get_disable_cgpg_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return &disable_cgpg_action_chain;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_get_complete_init_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return &complete_init_action_chain;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_get_screen_on_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_get_screen_off_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_get_pre_suspend_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_get_pre_resume_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_enable_user_state_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return &enable_user_state_action_chain;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_readjust_power_state_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return &readjust_power_state_action_chain;
|
|
||||||
}
|
|
||||||
|
|
||||||
const struct action_chain *pem_display_config_change_action_chain(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return &display_config_change_action_chain;
|
|
||||||
}
|
|
@ -1,59 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in
|
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
||||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
||||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
#ifndef _EVENT_MANAGEMENT_H_
|
|
||||||
#define _EVENT_MANAGEMENT_H_
|
|
||||||
|
|
||||||
#include "eventmgr.h"
|
|
||||||
|
|
||||||
int pem_init_event_action_chains(struct pp_eventmgr *eventmgr);
|
|
||||||
int pem_excute_event_chain(struct pp_eventmgr *eventmgr, const struct action_chain *event_chain, struct pem_event_data *event_data);
|
|
||||||
const struct action_chain *pem_get_suspend_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
const struct action_chain *pem_get_initialize_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
const struct action_chain *pem_get_uninitialize_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
const struct action_chain *pem_get_power_source_change_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
const struct action_chain *pem_get_resume_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
const struct action_chain *pem_get_hibernate_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
const struct action_chain *pem_get_thermal_notification_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
const struct action_chain *pem_get_vbios_notification_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
const struct action_chain *pem_get_enter_thermal_state_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
const struct action_chain *pem_get_exit_thermal_state_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
const struct action_chain *pem_get_enable_powerplay_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
const struct action_chain *pem_get_disable_powerplay_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
const struct action_chain *pem_get_enable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
const struct action_chain *pem_get_disable_overdrive_test_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
const struct action_chain *pem_get_enable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
const struct action_chain *pem_get_disable_gfx_clock_gating_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
const struct action_chain *pem_get_enable_cgpg_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
const struct action_chain *pem_get_disable_cgpg_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
const struct action_chain *pem_get_complete_init_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
const struct action_chain *pem_get_screen_on_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
const struct action_chain *pem_get_screen_off_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
const struct action_chain *pem_get_pre_suspend_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
const struct action_chain *pem_get_pre_resume_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
|
|
||||||
extern const struct action_chain *pem_enable_user_state_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
extern const struct action_chain *pem_readjust_power_state_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
const struct action_chain *pem_display_config_change_action_chain(struct pp_eventmgr *eventmgr);
|
|
||||||
|
|
||||||
|
|
||||||
#endif /* _EVENT_MANAGEMENT_H_ */
|
|
@ -1,104 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright 2015 Advanced Micro Devices, Inc.
|
|
||||||
*
|
|
||||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
|
||||||
* copy of this software and associated documentation files (the "Software"),
|
|
||||||
* to deal in the Software without restriction, including without limitation
|
|
||||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
|
||||||
* and/or sell copies of the Software, and to permit persons to whom the
|
|
||||||
* Software is furnished to do so, subject to the following conditions:
|
|
||||||
*
|
|
||||||
* The above copyright notice and this permission notice shall be included in
|
|
||||||
* all copies or substantial portions of the Software.
|
|
||||||
*
|
|
||||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
||||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
|
||||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
|
||||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
|
||||||
* OTHER DEALINGS IN THE SOFTWARE.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
#include <linux/types.h>
|
|
||||||
#include <linux/kernel.h>
|
|
||||||
#include <linux/slab.h>
|
|
||||||
#include "eventmgr.h"
|
|
||||||
#include "hwmgr.h"
|
|
||||||
#include "eventinit.h"
|
|
||||||
#include "eventmanagement.h"
|
|
||||||
|
|
||||||
static int pem_init(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
int result = 0;
|
|
||||||
struct pem_event_data event_data = { {0} };
|
|
||||||
|
|
||||||
/* Initialize PowerPlay feature info */
|
|
||||||
pem_init_feature_info(eventmgr);
|
|
||||||
|
|
||||||
/* Initialize event action chains */
|
|
||||||
pem_init_event_action_chains(eventmgr);
|
|
||||||
|
|
||||||
/* Call initialization event */
|
|
||||||
result = pem_handle_event(eventmgr, AMD_PP_EVENT_INITIALIZE, &event_data);
|
|
||||||
|
|
||||||
/* if (0 != result)
|
|
||||||
return result; */
|
|
||||||
|
|
||||||
/* Register interrupt callback functions */
|
|
||||||
result = pem_register_interrupts(eventmgr);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void pem_fini(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
struct pem_event_data event_data = { {0} };
|
|
||||||
|
|
||||||
pem_uninit_featureInfo(eventmgr);
|
|
||||||
pem_unregister_interrupts(eventmgr);
|
|
||||||
|
|
||||||
pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data);
|
|
||||||
}
|
|
||||||
|
|
||||||
int eventmgr_early_init(struct pp_instance *handle)
|
|
||||||
{
|
|
||||||
struct pp_eventmgr *eventmgr;
|
|
||||||
|
|
||||||
if (handle == NULL)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
eventmgr = kzalloc(sizeof(struct pp_eventmgr), GFP_KERNEL);
|
|
||||||
if (eventmgr == NULL)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
eventmgr->hwmgr = handle->hwmgr;
|
|
||||||
handle->eventmgr = eventmgr;
|
|
||||||
|
|
||||||
eventmgr->platform_descriptor = &(eventmgr->hwmgr->platform_descriptor);
|
|
||||||
eventmgr->pp_eventmgr_init = pem_init;
|
|
||||||
eventmgr->pp_eventmgr_fini = pem_fini;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int pem_handle_event_unlocked(struct pp_eventmgr *eventmgr, enum amd_pp_event event, struct pem_event_data *data)
|
|
||||||
{
|
|
||||||
if (eventmgr == NULL || event >= AMD_PP_EVENT_MAX || data == NULL)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
return pem_excute_event_chain(eventmgr, eventmgr->event_chain[event], data);
|
|
||||||
}
|
|
||||||
|
|
||||||
int pem_handle_event(struct pp_eventmgr *eventmgr, enum amd_pp_event event, struct pem_event_data *event_data)
|
|
||||||
{
|
|
||||||
int r = 0;
|
|
||||||
|
|
||||||
r = pem_handle_event_unlocked(eventmgr, event, event_data);
|
|
||||||
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool pem_is_hw_access_blocked(struct pp_eventmgr *eventmgr)
|
|
||||||
{
|
|
||||||
return (eventmgr->block_adjust_power_state || phm_is_hw_access_blocked(eventmgr->hwmgr));
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user