Merge tag 'drm-amdkfd-next-2016-09-19' of git://people.freedesktop.org/~gabbayo/linux into drm-next

This is amdkfd's pull request for kernel 4.9. It contains a fix to a possible
infinite loop bug and a couple of other minor "cleaning" patches.

* tag 'drm-amdkfd-next-2016-09-19' of git://people.freedesktop.org/~gabbayo/linux:
  drm/amdkfd: Pass 'struct queue_propertices' by reference
  drm/amdkfd: Unify multiple calls to pr_debug() into one
  drm/amdkfd: Fix possible infinite loop
  drm/amdkfd: Reuse function to find a process through pasid
  drm/amdkfd: Add some missing memset zero'ing in queue init func
  drm/amdkfd: Tidy up kfd_generate_gpu_id() uint64_t bitshift unpack
This commit is contained in:
Dave Airlie 2016-09-28 10:25:42 +10:00
commit 196ebdcc1d
9 changed files with 65 additions and 67 deletions

View File

@ -103,11 +103,11 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id);
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
unsigned int timeout);
unsigned int utimeout);
static int kgd_address_watch_disable(struct kgd_dev *kgd);
static int kgd_address_watch_execute(struct kgd_dev *kgd,
unsigned int watch_point_id,
@ -437,11 +437,12 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
}
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t temp;
int timeout = utimeout;
acquire_queue(kgd, pipe_id, queue_id);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
@ -452,9 +453,8 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
temp = RREG32(mmCP_HQD_ACTIVE);
if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
break;
if (timeout == 0) {
pr_err("kfd: cp queue preemption time out (%dms)\n",
temp);
if (timeout <= 0) {
pr_err("kfd: cp queue preemption time out.\n");
release_queue(kgd);
return -ETIME;
}
@ -467,12 +467,13 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
}
static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
unsigned int timeout)
unsigned int utimeout)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
uint32_t sdma_base_addr;
uint32_t temp;
int timeout = utimeout;
m = get_sdma_mqd(mqd);
sdma_base_addr = get_sdma_base_addr(m);
@ -485,7 +486,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
break;
if (timeout == 0)
if (timeout <= 0)
return -ETIME;
msleep(20);
timeout -= 20;

View File

@ -62,10 +62,10 @@ static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id);
static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
unsigned int timeout);
unsigned int utimeout);
static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
static int kgd_address_watch_disable(struct kgd_dev *kgd);
static int kgd_address_watch_execute(struct kgd_dev *kgd,
@ -349,11 +349,12 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
}
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id,
unsigned int utimeout, uint32_t pipe_id,
uint32_t queue_id)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
uint32_t temp;
int timeout = utimeout;
acquire_queue(kgd, pipe_id, queue_id);
@ -363,9 +364,8 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
temp = RREG32(mmCP_HQD_ACTIVE);
if (temp & CP_HQD_ACTIVE__ACTIVE_MASK)
break;
if (timeout == 0) {
pr_err("kfd: cp queue preemption time out (%dms)\n",
temp);
if (timeout <= 0) {
pr_err("kfd: cp queue preemption time out.\n");
release_queue(kgd);
return -ETIME;
}
@ -378,12 +378,13 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
}
static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
unsigned int timeout)
unsigned int utimeout)
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
uint32_t sdma_base_addr;
uint32_t temp;
int timeout = utimeout;
m = get_sdma_mqd(mqd);
sdma_base_addr = get_sdma_base_addr(m);
@ -396,7 +397,7 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
break;
if (timeout == 0)
if (timeout <= 0)
return -ETIME;
msleep(20);
timeout -= 20;

View File

@ -142,14 +142,16 @@ int kfd_doorbell_mmap(struct kfd_process *process, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pr_debug("mapping doorbell page:\n");
pr_debug(" target user address == 0x%08llX\n",
(unsigned long long) vma->vm_start);
pr_debug(" physical address == 0x%08llX\n", address);
pr_debug(" vm_flags == 0x%04lX\n", vma->vm_flags);
pr_debug(" size == 0x%04lX\n",
pr_debug("kfd: mapping doorbell page in %s\n"
" target user address == 0x%08llX\n"
" physical address == 0x%08llX\n"
" vm_flags == 0x%04lX\n"
" size == 0x%04lX\n",
__func__,
(unsigned long long) vma->vm_start, address, vma->vm_flags,
doorbell_process_allocation());
return io_remap_pfn_range(vma,
vma->vm_start,
address >> PAGE_SHIFT,

View File

@ -47,6 +47,9 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
pr_debug("amdkfd: In func %s initializing queue type %d size %d\n",
__func__, KFD_QUEUE_TYPE_HIQ, queue_size);
memset(&prop, 0, sizeof(prop));
memset(&nop, 0, sizeof(nop));
nop.opcode = IT_NOP;
nop.type = PM4_TYPE_3;
nop.u32all |= PM4_COUNT_ZERO;
@ -121,7 +124,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
prop.eop_ring_buffer_address = kq->eop_gpu_addr;
prop.eop_ring_buffer_size = PAGE_SIZE;
if (init_queue(&kq->queue, prop) != 0)
if (init_queue(&kq->queue, &prop) != 0)
goto err_init_queue;
kq->queue->device = dev;

View File

@ -619,7 +619,7 @@ int kfd_init_apertures(struct kfd_process *process);
/* Queue Context Management */
struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd);
int init_queue(struct queue **q, struct queue_properties properties);
int init_queue(struct queue **q, const struct queue_properties *properties);
void uninit_queue(struct queue *q);
void print_queue_properties(struct queue_properties *q);
void print_queue(struct queue *q);

View File

@ -404,26 +404,20 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
{
struct kfd_process *p;
struct kfd_process_device *pdd;
int idx, i;
BUG_ON(dev == NULL);
idx = srcu_read_lock(&kfd_processes_srcu);
/*
* Look for the process that matches the pasid. If there is no such
* process, we either released it in amdkfd's own notifier, or there
* is a bug. Unfortunately, there is no way to tell...
*/
hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes)
if (p->pasid == pasid) {
srcu_read_unlock(&kfd_processes_srcu, idx);
p = kfd_lookup_process_by_pasid(pasid);
if (!p)
return;
pr_debug("Unbinding process %d from IOMMU\n", pasid);
mutex_lock(&p->mutex);
if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid))
kfd_dbgmgr_destroy(dev->dbgmgr);
@ -451,11 +445,6 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid)
pdd->bound = false;
mutex_unlock(&p->mutex);
return;
}
srcu_read_unlock(&kfd_processes_srcu, idx);
}
struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p)

View File

@ -129,7 +129,7 @@ static int create_cp_queue(struct process_queue_manager *pqm,
q_properties->vmid = 0;
q_properties->queue_id = qid;
retval = init_queue(q, *q_properties);
retval = init_queue(q, q_properties);
if (retval != 0)
goto err_init_queue;

View File

@ -63,7 +63,7 @@ void print_queue(struct queue *q)
pr_debug("Queue Device Address: 0x%p\n", q->device);
}
int init_queue(struct queue **q, struct queue_properties properties)
int init_queue(struct queue **q, const struct queue_properties *properties)
{
struct queue *tmp;
@ -73,7 +73,7 @@ int init_queue(struct queue **q, struct queue_properties properties)
if (!tmp)
return -ENOMEM;
memcpy(&tmp->properties, &properties, sizeof(struct queue_properties));
memcpy(&tmp->properties, properties, sizeof(struct queue_properties));
*q = tmp;
return 0;

View File

@ -1090,19 +1090,21 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
{
uint32_t hashout;
uint32_t buf[7];
uint64_t local_mem_size;
int i;
if (!gpu)
return 0;
local_mem_size = gpu->kfd2kgd->get_vmem_size(gpu->kgd);
buf[0] = gpu->pdev->devfn;
buf[1] = gpu->pdev->subsystem_vendor;
buf[2] = gpu->pdev->subsystem_device;
buf[3] = gpu->pdev->device;
buf[4] = gpu->pdev->bus->number;
buf[5] = (uint32_t)(gpu->kfd2kgd->get_vmem_size(gpu->kgd)
& 0xffffffff);
buf[6] = (uint32_t)(gpu->kfd2kgd->get_vmem_size(gpu->kgd) >> 32);
buf[5] = lower_32_bits(local_mem_size);
buf[6] = upper_32_bits(local_mem_size);
for (i = 0, hashout = 0; i < 7; i++)
hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH);