Merge branch 'drm-next-4.4' of git://people.freedesktop.org/~agd5f/linux into drm-next

More amdgpu and radeon stuff for drm-next.  Stoney support is the big change.
The rest is just bug fixes and code cleanups.  The Stoney stuff is pretty
low impact with respect to existing chips.

* 'drm-next-4.4' of git://people.freedesktop.org/~agd5f/linux:
  drm/amdgpu: change VM size default to 64GB
  drm/amdgpu: add Stoney pci ids
  drm/amdgpu: update the core VI support for Stoney
  drm/amdgpu: add VCE support for Stoney (v2)
  drm/amdgpu: add UVD support for Stoney
  drm/amdgpu: add GFX support for Stoney (v2)
  drm/amdgpu: add SDMA support for Stoney (v2)
  drm/amdgpu: add DCE support for Stoney
  drm/amdgpu: Update SMC/DPM for Stoney
  drm/amdgpu: add GMC support for Stoney
  drm/amdgpu: add Stoney chip family
  drm/amdgpu: fix the broken vm->mutex V2
  drm/amdgpu: remove the unnecessary parameter adev for amdgpu_fence_wait_any()
  drm/amdgpu: remove the exclusive lock
  drm/amdgpu: remove old lockup detection infrastructure
  drm: fix trivial typos
  drm/amdgpu/dce: simplify suspend/resume
  drm/amdgpu/gfx8: set TC_WB_ACTION_EN in RELEASE_MEM packet
  drm/radeon: Use rdev->gem.mutex to protect hyperz/cmask owners
This commit is contained in:
Dave Airlie 2015-10-30 09:48:28 +10:00
commit f1a04d8258
37 changed files with 617 additions and 352 deletions

View File

@ -345,7 +345,6 @@ struct amdgpu_ring_funcs {
/* testing functions */ /* testing functions */
int (*test_ring)(struct amdgpu_ring *ring); int (*test_ring)(struct amdgpu_ring *ring);
int (*test_ib)(struct amdgpu_ring *ring); int (*test_ib)(struct amdgpu_ring *ring);
bool (*is_lockup)(struct amdgpu_ring *ring);
/* insert NOP packets */ /* insert NOP packets */
void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count); void (*insert_nop)(struct amdgpu_ring *ring, uint32_t count);
}; };
@ -448,8 +447,7 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring); int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring); unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);
signed long amdgpu_fence_wait_any(struct amdgpu_device *adev, signed long amdgpu_fence_wait_any(struct fence **array,
struct fence **array,
uint32_t count, uint32_t count,
bool intr, bool intr,
signed long t); signed long t);
@ -907,8 +905,6 @@ struct amdgpu_ring {
unsigned ring_size; unsigned ring_size;
unsigned ring_free_dw; unsigned ring_free_dw;
int count_dw; int count_dw;
atomic_t last_rptr;
atomic64_t last_activity;
uint64_t gpu_addr; uint64_t gpu_addr;
uint32_t align_mask; uint32_t align_mask;
uint32_t ptr_mask; uint32_t ptr_mask;
@ -1230,8 +1226,6 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring); void amdgpu_ring_unlock_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring); void amdgpu_ring_undo(struct amdgpu_ring *ring);
void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring); void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring);
void amdgpu_ring_lockup_update(struct amdgpu_ring *ring);
bool amdgpu_ring_test_lockup(struct amdgpu_ring *ring);
unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, unsigned amdgpu_ring_backup(struct amdgpu_ring *ring,
uint32_t **data); uint32_t **data);
int amdgpu_ring_restore(struct amdgpu_ring *ring, int amdgpu_ring_restore(struct amdgpu_ring *ring,
@ -1960,7 +1954,6 @@ struct amdgpu_device {
struct device *dev; struct device *dev;
struct drm_device *ddev; struct drm_device *ddev;
struct pci_dev *pdev; struct pci_dev *pdev;
struct rw_semaphore exclusive_lock;
/* ASIC */ /* ASIC */
enum amd_asic_type asic_type; enum amd_asic_type asic_type;
@ -1974,7 +1967,6 @@ struct amdgpu_device {
bool suspend; bool suspend;
bool need_dma32; bool need_dma32;
bool accel_working; bool accel_working;
bool needs_reset;
struct work_struct reset_work; struct work_struct reset_work;
struct notifier_block acpi_nb; struct notifier_block acpi_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
@ -2253,7 +2245,6 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib))) #define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r)) #define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
#define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r)) #define amdgpu_ring_test_ib(r) (r)->funcs->test_ib((r))
#define amdgpu_ring_is_lockup(r) (r)->funcs->is_lockup((r))
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r)) #define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
#define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r)) #define amdgpu_ring_get_wptr(r) (r)->funcs->get_wptr((r))
#define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r)) #define amdgpu_ring_set_wptr(r) (r)->funcs->set_wptr((r))

View File

@ -609,7 +609,6 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
} }
} }
mutex_lock(&vm->mutex);
r = amdgpu_bo_vm_update_pte(parser, vm); r = amdgpu_bo_vm_update_pte(parser, vm);
if (r) { if (r) {
goto out; goto out;
@ -620,7 +619,6 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
parser->filp); parser->filp);
out: out:
mutex_unlock(&vm->mutex);
return r; return r;
} }
@ -828,15 +826,14 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{ {
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
union drm_amdgpu_cs *cs = data; union drm_amdgpu_cs *cs = data;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_cs_parser *parser; struct amdgpu_cs_parser *parser;
bool reserved_buffers = false; bool reserved_buffers = false;
int i, r; int i, r;
down_read(&adev->exclusive_lock); if (!adev->accel_working)
if (!adev->accel_working) {
up_read(&adev->exclusive_lock);
return -EBUSY; return -EBUSY;
}
parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0); parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0);
if (!parser) if (!parser)
@ -844,12 +841,11 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
r = amdgpu_cs_parser_init(parser, data); r = amdgpu_cs_parser_init(parser, data);
if (r) { if (r) {
DRM_ERROR("Failed to initialize parser !\n"); DRM_ERROR("Failed to initialize parser !\n");
kfree(parser); amdgpu_cs_parser_fini(parser, r, false);
up_read(&adev->exclusive_lock);
r = amdgpu_cs_handle_lockup(adev, r); r = amdgpu_cs_handle_lockup(adev, r);
return r; return r;
} }
mutex_lock(&vm->mutex);
r = amdgpu_cs_parser_relocs(parser); r = amdgpu_cs_parser_relocs(parser);
if (r == -ENOMEM) if (r == -ENOMEM)
DRM_ERROR("Not enough memory for command submission!\n"); DRM_ERROR("Not enough memory for command submission!\n");
@ -916,14 +912,14 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
mutex_unlock(&job->job_lock); mutex_unlock(&job->job_lock);
amdgpu_cs_parser_fini_late(parser); amdgpu_cs_parser_fini_late(parser);
up_read(&adev->exclusive_lock); mutex_unlock(&vm->mutex);
return 0; return 0;
} }
cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence; cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
out: out:
amdgpu_cs_parser_fini(parser, r, reserved_buffers); amdgpu_cs_parser_fini(parser, r, reserved_buffers);
up_read(&adev->exclusive_lock); mutex_unlock(&vm->mutex);
r = amdgpu_cs_handle_lockup(adev, r); r = amdgpu_cs_handle_lockup(adev, r);
return r; return r;
} }

View File

@ -57,6 +57,7 @@ static const char *amdgpu_asic_name[] = {
"TONGA", "TONGA",
"FIJI", "FIJI",
"CARRIZO", "CARRIZO",
"STONEY",
"LAST", "LAST",
}; };
@ -1165,7 +1166,8 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
case CHIP_TONGA: case CHIP_TONGA:
case CHIP_FIJI: case CHIP_FIJI:
case CHIP_CARRIZO: case CHIP_CARRIZO:
if (adev->asic_type == CHIP_CARRIZO) case CHIP_STONEY:
if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
adev->family = AMDGPU_FAMILY_CZ; adev->family = AMDGPU_FAMILY_CZ;
else else
adev->family = AMDGPU_FAMILY_VI; adev->family = AMDGPU_FAMILY_VI;
@ -1418,7 +1420,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->gfx.gpu_clock_mutex); mutex_init(&adev->gfx.gpu_clock_mutex);
mutex_init(&adev->srbm_mutex); mutex_init(&adev->srbm_mutex);
mutex_init(&adev->grbm_idx_mutex); mutex_init(&adev->grbm_idx_mutex);
init_rwsem(&adev->exclusive_lock);
mutex_init(&adev->mn_lock); mutex_init(&adev->mn_lock);
hash_init(adev->mn_hash); hash_init(adev->mn_hash);
@ -1814,14 +1815,6 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev)
int i, r; int i, r;
int resched; int resched;
down_write(&adev->exclusive_lock);
if (!adev->needs_reset) {
up_write(&adev->exclusive_lock);
return 0;
}
adev->needs_reset = false;
atomic_inc(&adev->gpu_reset_counter); atomic_inc(&adev->gpu_reset_counter);
/* block TTM */ /* block TTM */
@ -1885,7 +1878,6 @@ retry:
dev_info(adev->dev, "GPU reset failed\n"); dev_info(adev->dev, "GPU reset failed\n");
} }
up_write(&adev->exclusive_lock);
return r; return r;
} }

View File

@ -47,11 +47,8 @@ static void amdgpu_flip_wait_fence(struct amdgpu_device *adev,
fence = to_amdgpu_fence(*f); fence = to_amdgpu_fence(*f);
if (fence) { if (fence) {
r = fence_wait(&fence->base, false); r = fence_wait(&fence->base, false);
if (r == -EDEADLK) { if (r == -EDEADLK)
up_read(&adev->exclusive_lock);
r = amdgpu_gpu_reset(adev); r = amdgpu_gpu_reset(adev);
down_read(&adev->exclusive_lock);
}
} else } else
r = fence_wait(*f, false); r = fence_wait(*f, false);
@ -77,7 +74,6 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
unsigned long flags; unsigned long flags;
unsigned i; unsigned i;
down_read(&adev->exclusive_lock);
amdgpu_flip_wait_fence(adev, &work->excl); amdgpu_flip_wait_fence(adev, &work->excl);
for (i = 0; i < work->shared_count; ++i) for (i = 0; i < work->shared_count; ++i)
amdgpu_flip_wait_fence(adev, &work->shared[i]); amdgpu_flip_wait_fence(adev, &work->shared[i]);
@ -93,7 +89,6 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; amdgpuCrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags); spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
up_read(&adev->exclusive_lock);
} }
/* /*

View File

@ -73,7 +73,7 @@ int amdgpu_hard_reset = 0;
unsigned amdgpu_ip_block_mask = 0xffffffff; unsigned amdgpu_ip_block_mask = 0xffffffff;
int amdgpu_bapm = -1; int amdgpu_bapm = -1;
int amdgpu_deep_color = 0; int amdgpu_deep_color = 0;
int amdgpu_vm_size = 8; int amdgpu_vm_size = 64;
int amdgpu_vm_block_size = -1; int amdgpu_vm_block_size = -1;
int amdgpu_vm_fault_stop = 0; int amdgpu_vm_fault_stop = 0;
int amdgpu_vm_debug = 0; int amdgpu_vm_debug = 0;
@ -137,7 +137,7 @@ module_param_named(bapm, amdgpu_bapm, int, 0444);
MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))"); MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
module_param_named(deep_color, amdgpu_deep_color, int, 0444); module_param_named(deep_color, amdgpu_deep_color, int, 0444);
MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 8GB)"); MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 64GB)");
module_param_named(vm_size, amdgpu_vm_size, int, 0444); module_param_named(vm_size, amdgpu_vm_size, int, 0444);
MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)"); MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
@ -273,6 +273,8 @@ static struct pci_device_id pciidlist[] = {
{0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, {0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
{0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, {0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
{0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU}, {0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
/* stoney */
{0x1002, 0x98E4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_STONEY|AMD_IS_APU},
{0, 0, 0} {0, 0, 0}
}; };

View File

@ -260,27 +260,8 @@ static void amdgpu_fence_check_lockup(struct work_struct *work)
lockup_work.work); lockup_work.work);
ring = fence_drv->ring; ring = fence_drv->ring;
if (!down_read_trylock(&ring->adev->exclusive_lock)) { if (amdgpu_fence_activity(ring))
/* just reschedule the check if a reset is going on */
amdgpu_fence_schedule_check(ring);
return;
}
if (amdgpu_fence_activity(ring)) {
wake_up_all(&ring->fence_drv.fence_queue); wake_up_all(&ring->fence_drv.fence_queue);
}
else if (amdgpu_ring_is_lockup(ring)) {
/* good news we believe it's a lockup */
dev_warn(ring->adev->dev, "GPU lockup (current fence id "
"0x%016llx last fence id 0x%016llx on ring %d)\n",
(uint64_t)atomic64_read(&fence_drv->last_seq),
fence_drv->sync_seq[ring->idx], ring->idx);
/* remember that we need an reset */
ring->adev->needs_reset = true;
wake_up_all(&ring->fence_drv.fence_queue);
}
up_read(&ring->adev->exclusive_lock);
} }
/** /**
@ -328,18 +309,15 @@ static bool amdgpu_fence_is_signaled(struct fence *f)
{ {
struct amdgpu_fence *fence = to_amdgpu_fence(f); struct amdgpu_fence *fence = to_amdgpu_fence(f);
struct amdgpu_ring *ring = fence->ring; struct amdgpu_ring *ring = fence->ring;
struct amdgpu_device *adev = ring->adev;
if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq) if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
return true; return true;
if (down_read_trylock(&adev->exclusive_lock)) { amdgpu_fence_process(ring);
amdgpu_fence_process(ring);
up_read(&adev->exclusive_lock); if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
return true;
if (atomic64_read(&ring->fence_drv.last_seq) >= fence->seq)
return true;
}
return false; return false;
} }
@ -380,7 +358,6 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
*/ */
static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq) static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
{ {
struct amdgpu_device *adev = ring->adev;
bool signaled = false; bool signaled = false;
BUG_ON(!ring); BUG_ON(!ring);
@ -391,8 +368,7 @@ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
return 0; return 0;
wait_event(ring->fence_drv.fence_queue, ( wait_event(ring->fence_drv.fence_queue, (
(signaled = amdgpu_fence_seq_signaled(ring, seq)) (signaled = amdgpu_fence_seq_signaled(ring, seq))));
|| adev->needs_reset));
if (signaled) if (signaled)
return 0; return 0;
@ -881,16 +857,12 @@ static void amdgpu_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
static signed long amdgpu_fence_default_wait(struct fence *f, bool intr, static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
signed long t) signed long t)
{ {
struct amdgpu_fence *fence = to_amdgpu_fence(f); return amdgpu_fence_wait_any(&f, 1, intr, t);
struct amdgpu_device *adev = fence->ring->adev;
return amdgpu_fence_wait_any(adev, &f, 1, intr, t);
} }
/** /**
* Wait the fence array with timeout * Wait the fence array with timeout
* *
* @adev: amdgpu device
* @array: the fence array with amdgpu fence pointer * @array: the fence array with amdgpu fence pointer
* @count: the number of the fence array * @count: the number of the fence array
* @intr: when sleep, set the current task interruptable or not * @intr: when sleep, set the current task interruptable or not
@ -898,8 +870,7 @@ static signed long amdgpu_fence_default_wait(struct fence *f, bool intr,
* *
* It will return when any fence is signaled or timeout. * It will return when any fence is signaled or timeout.
*/ */
signed long amdgpu_fence_wait_any(struct amdgpu_device *adev, signed long amdgpu_fence_wait_any(struct fence **array, uint32_t count,
struct fence **array, uint32_t count,
bool intr, signed long t) bool intr, signed long t)
{ {
struct amdgpu_wait_cb *cb; struct amdgpu_wait_cb *cb;
@ -939,11 +910,6 @@ signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
if (amdgpu_test_signaled_any(array, count)) if (amdgpu_test_signaled_any(array, count))
break; break;
if (adev->needs_reset) {
t = -EDEADLK;
break;
}
t = schedule_timeout(t); t = schedule_timeout(t);
if (t > 0 && intr && signal_pending(current)) if (t > 0 && intr && signal_pending(current))

View File

@ -115,9 +115,10 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va; struct amdgpu_bo_va *bo_va;
int r; int r;
mutex_lock(&vm->mutex);
r = amdgpu_bo_reserve(rbo, false); r = amdgpu_bo_reserve(rbo, false);
if (r) { if (r) {
mutex_unlock(&vm->mutex);
return r; return r;
} }
@ -128,7 +129,7 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri
++bo_va->ref_count; ++bo_va->ref_count;
} }
amdgpu_bo_unreserve(rbo); amdgpu_bo_unreserve(rbo);
mutex_unlock(&vm->mutex);
return 0; return 0;
} }
@ -141,9 +142,10 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
struct amdgpu_vm *vm = &fpriv->vm; struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va; struct amdgpu_bo_va *bo_va;
int r; int r;
mutex_lock(&vm->mutex);
r = amdgpu_bo_reserve(rbo, true); r = amdgpu_bo_reserve(rbo, true);
if (r) { if (r) {
mutex_unlock(&vm->mutex);
dev_err(adev->dev, "leaking bo va because " dev_err(adev->dev, "leaking bo va because "
"we fail to reserve bo (%d)\n", r); "we fail to reserve bo (%d)\n", r);
return; return;
@ -155,6 +157,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
} }
} }
amdgpu_bo_unreserve(rbo); amdgpu_bo_unreserve(rbo);
mutex_unlock(&vm->mutex);
} }
static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r) static int amdgpu_gem_handle_lockup(struct amdgpu_device *adev, int r)
@ -181,7 +184,6 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
bool kernel = false; bool kernel = false;
int r; int r;
down_read(&adev->exclusive_lock);
/* create a gem object to contain this object in */ /* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS | if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) { AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
@ -214,11 +216,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
memset(args, 0, sizeof(*args)); memset(args, 0, sizeof(*args));
args->out.handle = handle; args->out.handle = handle;
up_read(&adev->exclusive_lock);
return 0; return 0;
error_unlock: error_unlock:
up_read(&adev->exclusive_lock);
r = amdgpu_gem_handle_lockup(adev, r); r = amdgpu_gem_handle_lockup(adev, r);
return r; return r;
} }
@ -250,8 +250,6 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
return -EACCES; return -EACCES;
} }
down_read(&adev->exclusive_lock);
/* create a gem object to contain this object in */ /* create a gem object to contain this object in */
r = amdgpu_gem_object_create(adev, args->size, 0, r = amdgpu_gem_object_create(adev, args->size, 0,
AMDGPU_GEM_DOMAIN_CPU, 0, AMDGPU_GEM_DOMAIN_CPU, 0,
@ -293,14 +291,12 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
goto handle_lockup; goto handle_lockup;
args->handle = handle; args->handle = handle;
up_read(&adev->exclusive_lock);
return 0; return 0;
release_object: release_object:
drm_gem_object_unreference_unlocked(gobj); drm_gem_object_unreference_unlocked(gobj);
handle_lockup: handle_lockup:
up_read(&adev->exclusive_lock);
r = amdgpu_gem_handle_lockup(adev, r); r = amdgpu_gem_handle_lockup(adev, r);
return r; return r;
@ -488,18 +484,13 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
goto error_unreserve; goto error_unreserve;
} }
mutex_lock(&bo_va->vm->mutex);
r = amdgpu_vm_clear_freed(adev, bo_va->vm); r = amdgpu_vm_clear_freed(adev, bo_va->vm);
if (r) if (r)
goto error_unlock; goto error_unreserve;
if (operation == AMDGPU_VA_OP_MAP) if (operation == AMDGPU_VA_OP_MAP)
r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem); r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
error_unlock:
mutex_unlock(&bo_va->vm->mutex);
error_unreserve: error_unreserve:
ttm_eu_backoff_reservation(&ticket, &list); ttm_eu_backoff_reservation(&ticket, &list);
@ -556,10 +547,11 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
gobj = drm_gem_object_lookup(dev, filp, args->handle); gobj = drm_gem_object_lookup(dev, filp, args->handle);
if (gobj == NULL) if (gobj == NULL)
return -ENOENT; return -ENOENT;
mutex_lock(&fpriv->vm.mutex);
rbo = gem_to_amdgpu_bo(gobj); rbo = gem_to_amdgpu_bo(gobj);
r = amdgpu_bo_reserve(rbo, false); r = amdgpu_bo_reserve(rbo, false);
if (r) { if (r) {
mutex_unlock(&fpriv->vm.mutex);
drm_gem_object_unreference_unlocked(gobj); drm_gem_object_unreference_unlocked(gobj);
return r; return r;
} }
@ -567,6 +559,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo); bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
if (!bo_va) { if (!bo_va) {
amdgpu_bo_unreserve(rbo); amdgpu_bo_unreserve(rbo);
mutex_unlock(&fpriv->vm.mutex);
return -ENOENT; return -ENOENT;
} }
@ -591,7 +584,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
amdgpu_gem_va_update_vm(adev, bo_va, args->operation); amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
mutex_unlock(&fpriv->vm.mutex);
drm_gem_object_unreference_unlocked(gobj); drm_gem_object_unreference_unlocked(gobj);
return r; return r;
} }

View File

@ -298,7 +298,6 @@ int amdgpu_ib_ring_tests(struct amdgpu_device *adev)
r = amdgpu_ring_test_ib(ring); r = amdgpu_ring_test_ib(ring);
if (r) { if (r) {
ring->ready = false; ring->ready = false;
adev->needs_reset = false;
if (ring == &adev->gfx.gfx_ring[0]) { if (ring == &adev->gfx.gfx_ring[0]) {
/* oh, oh, that's really bad */ /* oh, oh, that's really bad */

View File

@ -67,8 +67,6 @@ void amdgpu_ring_free_size(struct amdgpu_ring *ring)
if (!ring->ring_free_dw) { if (!ring->ring_free_dw) {
/* this is an empty ring */ /* this is an empty ring */
ring->ring_free_dw = ring->ring_size / 4; ring->ring_free_dw = ring->ring_size / 4;
/* update lockup info to avoid false positive */
amdgpu_ring_lockup_update(ring);
} }
} }
@ -208,46 +206,6 @@ void amdgpu_ring_unlock_undo(struct amdgpu_ring *ring)
mutex_unlock(ring->ring_lock); mutex_unlock(ring->ring_lock);
} }
/**
* amdgpu_ring_lockup_update - update lockup variables
*
* @ring: amdgpu_ring structure holding ring information
*
* Update the last rptr value and timestamp (all asics).
*/
void amdgpu_ring_lockup_update(struct amdgpu_ring *ring)
{
atomic_set(&ring->last_rptr, amdgpu_ring_get_rptr(ring));
atomic64_set(&ring->last_activity, jiffies_64);
}
/**
* amdgpu_ring_test_lockup() - check if ring is lockedup by recording information
* @ring: amdgpu_ring structure holding ring information
*
*/
bool amdgpu_ring_test_lockup(struct amdgpu_ring *ring)
{
uint32_t rptr = amdgpu_ring_get_rptr(ring);
uint64_t last = atomic64_read(&ring->last_activity);
uint64_t elapsed;
if (rptr != atomic_read(&ring->last_rptr)) {
/* ring is still working, no lockup */
amdgpu_ring_lockup_update(ring);
return false;
}
elapsed = jiffies_to_msecs(jiffies_64 - last);
if (amdgpu_lockup_timeout && elapsed >= amdgpu_lockup_timeout) {
dev_err(ring->adev->dev, "ring %d stalled for more than %llumsec\n",
ring->idx, elapsed);
return true;
}
/* give a chance to the GPU ... */
return false;
}
/** /**
* amdgpu_ring_backup - Back up the content of a ring * amdgpu_ring_backup - Back up the content of a ring
* *
@ -436,7 +394,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
if (amdgpu_debugfs_ring_init(adev, ring)) { if (amdgpu_debugfs_ring_init(adev, ring)) {
DRM_ERROR("Failed to register debugfs file for rings !\n"); DRM_ERROR("Failed to register debugfs file for rings !\n");
} }
amdgpu_ring_lockup_update(ring);
return 0; return 0;
} }

View File

@ -372,7 +372,7 @@ int amdgpu_sa_bo_new(struct amdgpu_device *adev,
} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
spin_unlock(&sa_manager->wq.lock); spin_unlock(&sa_manager->wq.lock);
t = amdgpu_fence_wait_any(adev, fences, AMDGPU_MAX_RINGS, t = amdgpu_fence_wait_any(fences, AMDGPU_MAX_RINGS,
false, MAX_SCHEDULE_TIMEOUT); false, MAX_SCHEDULE_TIMEOUT);
r = (t > 0) ? 0 : t; r = (t > 0) ? 0 : t;
spin_lock(&sa_manager->wq.lock); spin_lock(&sa_manager->wq.lock);

View File

@ -53,6 +53,7 @@
#define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin" #define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin"
#define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin" #define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin"
#define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin" #define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin"
#define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin"
/** /**
* amdgpu_uvd_cs_ctx - Command submission parser context * amdgpu_uvd_cs_ctx - Command submission parser context
@ -83,6 +84,7 @@ MODULE_FIRMWARE(FIRMWARE_MULLINS);
MODULE_FIRMWARE(FIRMWARE_TONGA); MODULE_FIRMWARE(FIRMWARE_TONGA);
MODULE_FIRMWARE(FIRMWARE_CARRIZO); MODULE_FIRMWARE(FIRMWARE_CARRIZO);
MODULE_FIRMWARE(FIRMWARE_FIJI); MODULE_FIRMWARE(FIRMWARE_FIJI);
MODULE_FIRMWARE(FIRMWARE_STONEY);
static void amdgpu_uvd_note_usage(struct amdgpu_device *adev); static void amdgpu_uvd_note_usage(struct amdgpu_device *adev);
static void amdgpu_uvd_idle_work_handler(struct work_struct *work); static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
@ -124,6 +126,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
case CHIP_CARRIZO: case CHIP_CARRIZO:
fw_name = FIRMWARE_CARRIZO; fw_name = FIRMWARE_CARRIZO;
break; break;
case CHIP_STONEY:
fw_name = FIRMWARE_STONEY;
break;
default: default:
return -EINVAL; return -EINVAL;
} }

View File

@ -49,6 +49,7 @@
#define FIRMWARE_TONGA "amdgpu/tonga_vce.bin" #define FIRMWARE_TONGA "amdgpu/tonga_vce.bin"
#define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin" #define FIRMWARE_CARRIZO "amdgpu/carrizo_vce.bin"
#define FIRMWARE_FIJI "amdgpu/fiji_vce.bin" #define FIRMWARE_FIJI "amdgpu/fiji_vce.bin"
#define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
#ifdef CONFIG_DRM_AMDGPU_CIK #ifdef CONFIG_DRM_AMDGPU_CIK
MODULE_FIRMWARE(FIRMWARE_BONAIRE); MODULE_FIRMWARE(FIRMWARE_BONAIRE);
@ -60,6 +61,7 @@ MODULE_FIRMWARE(FIRMWARE_MULLINS);
MODULE_FIRMWARE(FIRMWARE_TONGA); MODULE_FIRMWARE(FIRMWARE_TONGA);
MODULE_FIRMWARE(FIRMWARE_CARRIZO); MODULE_FIRMWARE(FIRMWARE_CARRIZO);
MODULE_FIRMWARE(FIRMWARE_FIJI); MODULE_FIRMWARE(FIRMWARE_FIJI);
MODULE_FIRMWARE(FIRMWARE_STONEY);
static void amdgpu_vce_idle_work_handler(struct work_struct *work); static void amdgpu_vce_idle_work_handler(struct work_struct *work);
@ -106,6 +108,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
case CHIP_FIJI: case CHIP_FIJI:
fw_name = FIRMWARE_FIJI; fw_name = FIRMWARE_FIJI;
break; break;
case CHIP_STONEY:
fw_name = FIRMWARE_STONEY;
break;
default: default:
return -EINVAL; return -EINVAL;

View File

@ -90,11 +90,9 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
struct amdgpu_bo_list_entry *list; struct amdgpu_bo_list_entry *list;
unsigned i, idx; unsigned i, idx;
mutex_lock(&vm->mutex);
list = drm_malloc_ab(vm->max_pde_used + 2, list = drm_malloc_ab(vm->max_pde_used + 2,
sizeof(struct amdgpu_bo_list_entry)); sizeof(struct amdgpu_bo_list_entry));
if (!list) { if (!list) {
mutex_unlock(&vm->mutex);
return NULL; return NULL;
} }
@ -119,7 +117,6 @@ struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
list[idx].tv.shared = true; list[idx].tv.shared = true;
list_add(&list[idx++].tv.head, head); list_add(&list[idx++].tv.head, head);
} }
mutex_unlock(&vm->mutex);
return list; return list;
} }
@ -972,9 +969,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
INIT_LIST_HEAD(&bo_va->invalids); INIT_LIST_HEAD(&bo_va->invalids);
INIT_LIST_HEAD(&bo_va->vm_status); INIT_LIST_HEAD(&bo_va->vm_status);
mutex_lock(&vm->mutex);
list_add_tail(&bo_va->bo_list, &bo->va); list_add_tail(&bo_va->bo_list, &bo->va);
mutex_unlock(&vm->mutex);
return bo_va; return bo_va;
} }
@ -1027,8 +1022,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
return -EINVAL; return -EINVAL;
} }
mutex_lock(&vm->mutex);
saddr /= AMDGPU_GPU_PAGE_SIZE; saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE; eaddr /= AMDGPU_GPU_PAGE_SIZE;
@ -1042,14 +1035,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
tmp->it.start, tmp->it.last + 1); tmp->it.start, tmp->it.last + 1);
amdgpu_bo_unreserve(bo_va->bo); amdgpu_bo_unreserve(bo_va->bo);
r = -EINVAL; r = -EINVAL;
goto error_unlock; goto error;
} }
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
if (!mapping) { if (!mapping) {
amdgpu_bo_unreserve(bo_va->bo); amdgpu_bo_unreserve(bo_va->bo);
r = -ENOMEM; r = -ENOMEM;
goto error_unlock; goto error;
} }
INIT_LIST_HEAD(&mapping->list); INIT_LIST_HEAD(&mapping->list);
@ -1081,9 +1074,6 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
if (vm->page_tables[pt_idx].bo) if (vm->page_tables[pt_idx].bo)
continue; continue;
/* drop mutex to allocate and clear page table */
mutex_unlock(&vm->mutex);
ww_mutex_lock(&resv->lock, NULL); ww_mutex_lock(&resv->lock, NULL);
r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8, r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
AMDGPU_GPU_PAGE_SIZE, true, AMDGPU_GPU_PAGE_SIZE, true,
@ -1100,32 +1090,19 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
goto error_free; goto error_free;
} }
/* aquire mutex again */
mutex_lock(&vm->mutex);
if (vm->page_tables[pt_idx].bo) {
/* someone else allocated the pt in the meantime */
mutex_unlock(&vm->mutex);
amdgpu_bo_unref(&pt);
mutex_lock(&vm->mutex);
continue;
}
vm->page_tables[pt_idx].addr = 0; vm->page_tables[pt_idx].addr = 0;
vm->page_tables[pt_idx].bo = pt; vm->page_tables[pt_idx].bo = pt;
} }
mutex_unlock(&vm->mutex);
return 0; return 0;
error_free: error_free:
mutex_lock(&vm->mutex);
list_del(&mapping->list); list_del(&mapping->list);
interval_tree_remove(&mapping->it, &vm->va); interval_tree_remove(&mapping->it, &vm->va);
trace_amdgpu_vm_bo_unmap(bo_va, mapping); trace_amdgpu_vm_bo_unmap(bo_va, mapping);
kfree(mapping); kfree(mapping);
error_unlock: error:
mutex_unlock(&vm->mutex);
return r; return r;
} }
@ -1170,7 +1147,6 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
} }
} }
mutex_lock(&vm->mutex);
list_del(&mapping->list); list_del(&mapping->list);
interval_tree_remove(&mapping->it, &vm->va); interval_tree_remove(&mapping->it, &vm->va);
trace_amdgpu_vm_bo_unmap(bo_va, mapping); trace_amdgpu_vm_bo_unmap(bo_va, mapping);
@ -1179,7 +1155,6 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
list_add(&mapping->list, &vm->freed); list_add(&mapping->list, &vm->freed);
else else
kfree(mapping); kfree(mapping);
mutex_unlock(&vm->mutex);
amdgpu_bo_unreserve(bo_va->bo); amdgpu_bo_unreserve(bo_va->bo);
return 0; return 0;
@ -1203,8 +1178,6 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
list_del(&bo_va->bo_list); list_del(&bo_va->bo_list);
mutex_lock(&vm->mutex);
spin_lock(&vm->status_lock); spin_lock(&vm->status_lock);
list_del(&bo_va->vm_status); list_del(&bo_va->vm_status);
spin_unlock(&vm->status_lock); spin_unlock(&vm->status_lock);
@ -1223,8 +1196,6 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
fence_put(bo_va->last_pt_update); fence_put(bo_va->last_pt_update);
kfree(bo_va); kfree(bo_va);
mutex_unlock(&vm->mutex);
} }
/** /**

View File

@ -1290,24 +1290,6 @@ const struct amd_ip_funcs cik_sdma_ip_funcs = {
.set_powergating_state = cik_sdma_set_powergating_state, .set_powergating_state = cik_sdma_set_powergating_state,
}; };
/**
* cik_sdma_ring_is_lockup - Check if the DMA engine is locked up
*
* @ring: amdgpu_ring structure holding ring information
*
* Check if the async DMA engine is locked up (CIK).
* Returns true if the engine appears to be locked up, false if not.
*/
static bool cik_sdma_ring_is_lockup(struct amdgpu_ring *ring)
{
if (cik_sdma_is_idle(ring->adev)) {
amdgpu_ring_lockup_update(ring);
return false;
}
return amdgpu_ring_test_lockup(ring);
}
static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = { static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
.get_rptr = cik_sdma_ring_get_rptr, .get_rptr = cik_sdma_ring_get_rptr,
.get_wptr = cik_sdma_ring_get_wptr, .get_wptr = cik_sdma_ring_get_wptr,
@ -1320,7 +1302,6 @@ static const struct amdgpu_ring_funcs cik_sdma_ring_funcs = {
.emit_hdp_flush = cik_sdma_ring_emit_hdp_flush, .emit_hdp_flush = cik_sdma_ring_emit_hdp_flush,
.test_ring = cik_sdma_ring_test_ring, .test_ring = cik_sdma_ring_test_ring,
.test_ib = cik_sdma_ring_test_ib, .test_ib = cik_sdma_ring_test_ib,
.is_lockup = cik_sdma_ring_is_lockup,
.insert_nop = cik_sdma_ring_insert_nop, .insert_nop = cik_sdma_ring_insert_nop,
}; };

View File

@ -1264,6 +1264,7 @@ static void cz_apply_state_adjust_rules(struct amdgpu_device *adev,
static int cz_dpm_enable(struct amdgpu_device *adev) static int cz_dpm_enable(struct amdgpu_device *adev)
{ {
const char *chip_name;
int ret = 0; int ret = 0;
/* renable will hang up SMU, so check first */ /* renable will hang up SMU, so check first */
@ -1272,21 +1273,33 @@ static int cz_dpm_enable(struct amdgpu_device *adev)
cz_program_voting_clients(adev); cz_program_voting_clients(adev);
switch (adev->asic_type) {
case CHIP_CARRIZO:
chip_name = "carrizo";
break;
case CHIP_STONEY:
chip_name = "stoney";
break;
default:
BUG();
}
ret = cz_start_dpm(adev); ret = cz_start_dpm(adev);
if (ret) { if (ret) {
DRM_ERROR("Carrizo DPM enable failed\n"); DRM_ERROR("%s DPM enable failed\n", chip_name);
return -EINVAL; return -EINVAL;
} }
ret = cz_program_bootup_state(adev); ret = cz_program_bootup_state(adev);
if (ret) { if (ret) {
DRM_ERROR("Carrizo bootup state program failed\n"); DRM_ERROR("%s bootup state program failed\n", chip_name);
return -EINVAL; return -EINVAL;
} }
ret = cz_enable_didt(adev, true); ret = cz_enable_didt(adev, true);
if (ret) { if (ret) {
DRM_ERROR("Carrizo enable di/dt failed\n"); DRM_ERROR("%s enable di/dt failed\n", chip_name);
return -EINVAL; return -EINVAL;
} }
@ -1353,7 +1366,7 @@ static int cz_dpm_disable(struct amdgpu_device *adev)
ret = cz_enable_didt(adev, false); ret = cz_enable_didt(adev, false);
if (ret) { if (ret) {
DRM_ERROR("Carrizo disable di/dt failed\n"); DRM_ERROR("disable di/dt failed\n");
return -EINVAL; return -EINVAL;
} }

View File

@ -312,13 +312,16 @@ int cz_smu_start(struct amdgpu_device *adev)
UCODE_ID_CP_MEC_JT1_MASK | UCODE_ID_CP_MEC_JT1_MASK |
UCODE_ID_CP_MEC_JT2_MASK; UCODE_ID_CP_MEC_JT2_MASK;
if (adev->asic_type == CHIP_STONEY)
fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
cz_smu_request_load_fw(adev); cz_smu_request_load_fw(adev);
ret = cz_smu_check_fw_load_finish(adev, fw_to_check); ret = cz_smu_check_fw_load_finish(adev, fw_to_check);
if (ret) if (ret)
return ret; return ret;
/* manually load MEC firmware for CZ */ /* manually load MEC firmware for CZ */
if (adev->asic_type == CHIP_CARRIZO) { if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) {
ret = cz_load_mec_firmware(adev); ret = cz_load_mec_firmware(adev);
if (ret) { if (ret) {
dev_err(adev->dev, "(%d) Mec Firmware load failed\n", ret); dev_err(adev->dev, "(%d) Mec Firmware load failed\n", ret);
@ -336,6 +339,9 @@ int cz_smu_start(struct amdgpu_device *adev)
AMDGPU_CPMEC2_UCODE_LOADED | AMDGPU_CPMEC2_UCODE_LOADED |
AMDGPU_CPRLC_UCODE_LOADED; AMDGPU_CPRLC_UCODE_LOADED;
if (adev->asic_type == CHIP_STONEY)
adev->smu.fw_flags &= ~(AMDGPU_SDMA1_UCODE_LOADED | AMDGPU_CPMEC2_UCODE_LOADED);
return ret; return ret;
} }
@ -601,8 +607,13 @@ static int cz_smu_construct_toc_for_vddgfx_exit(struct amdgpu_device *adev)
CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
cz_smu_populate_single_ucode_load_task(adev, cz_smu_populate_single_ucode_load_task(adev,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
cz_smu_populate_single_ucode_load_task(adev, if (adev->asic_type == CHIP_STONEY) {
cz_smu_populate_single_ucode_load_task(adev,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
} else {
cz_smu_populate_single_ucode_load_task(adev,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
}
cz_smu_populate_single_ucode_load_task(adev, cz_smu_populate_single_ucode_load_task(adev,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false); CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
} }
@ -642,8 +653,13 @@ static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev)
if (adev->firmware.smu_load) { if (adev->firmware.smu_load) {
cz_smu_populate_single_ucode_load_task(adev, cz_smu_populate_single_ucode_load_task(adev,
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false); CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
cz_smu_populate_single_ucode_load_task(adev, if (adev->asic_type == CHIP_STONEY) {
cz_smu_populate_single_ucode_load_task(adev,
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
} else {
cz_smu_populate_single_ucode_load_task(adev,
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false); CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
}
cz_smu_populate_single_ucode_load_task(adev, cz_smu_populate_single_ucode_load_task(adev,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false); CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
cz_smu_populate_single_ucode_load_task(adev, cz_smu_populate_single_ucode_load_task(adev,
@ -652,8 +668,13 @@ static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev)
CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false); CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
cz_smu_populate_single_ucode_load_task(adev, cz_smu_populate_single_ucode_load_task(adev,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false); CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
cz_smu_populate_single_ucode_load_task(adev, if (adev->asic_type == CHIP_STONEY) {
cz_smu_populate_single_ucode_load_task(adev,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
} else {
cz_smu_populate_single_ucode_load_task(adev,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false); CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
}
cz_smu_populate_single_ucode_load_task(adev, cz_smu_populate_single_ucode_load_task(adev,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true); CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
} }
@ -888,10 +909,18 @@ int cz_smu_init(struct amdgpu_device *adev)
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
&priv->driver_buffer[priv->driver_buffer_length++])) &priv->driver_buffer[priv->driver_buffer_length++]))
goto smu_init_failed; goto smu_init_failed;
if (cz_smu_populate_single_firmware_entry(adev,
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, if (adev->asic_type == CHIP_STONEY) {
&priv->driver_buffer[priv->driver_buffer_length++])) if (cz_smu_populate_single_firmware_entry(adev,
goto smu_init_failed; CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
&priv->driver_buffer[priv->driver_buffer_length++]))
goto smu_init_failed;
} else {
if (cz_smu_populate_single_firmware_entry(adev,
CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
&priv->driver_buffer[priv->driver_buffer_length++]))
goto smu_init_failed;
}
if (cz_smu_populate_single_firmware_entry(adev, if (cz_smu_populate_single_firmware_entry(adev,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
&priv->driver_buffer[priv->driver_buffer_length++])) &priv->driver_buffer[priv->driver_buffer_length++]))
@ -908,10 +937,17 @@ int cz_smu_init(struct amdgpu_device *adev)
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
&priv->driver_buffer[priv->driver_buffer_length++])) &priv->driver_buffer[priv->driver_buffer_length++]))
goto smu_init_failed; goto smu_init_failed;
if (cz_smu_populate_single_firmware_entry(adev, if (adev->asic_type == CHIP_STONEY) {
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, if (cz_smu_populate_single_firmware_entry(adev,
&priv->driver_buffer[priv->driver_buffer_length++])) CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
goto smu_init_failed; &priv->driver_buffer[priv->driver_buffer_length++]))
goto smu_init_failed;
} else {
if (cz_smu_populate_single_firmware_entry(adev,
CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
&priv->driver_buffer[priv->driver_buffer_length++]))
goto smu_init_failed;
}
if (cz_smu_populate_single_firmware_entry(adev, if (cz_smu_populate_single_firmware_entry(adev,
CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
&priv->driver_buffer[priv->driver_buffer_length++])) &priv->driver_buffer[priv->driver_buffer_length++]))

View File

@ -3086,22 +3086,18 @@ static int dce_v10_0_suspend(void *handle)
amdgpu_atombios_scratch_regs_save(adev); amdgpu_atombios_scratch_regs_save(adev);
dce_v10_0_hpd_fini(adev); return dce_v10_0_hw_fini(handle);
return 0;
} }
static int dce_v10_0_resume(void *handle) static int dce_v10_0_resume(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
dce_v10_0_init_golden_registers(adev); ret = dce_v10_0_hw_init(handle);
amdgpu_atombios_scratch_regs_restore(adev); amdgpu_atombios_scratch_regs_restore(adev);
/* init dig PHYs, disp eng pll */
amdgpu_atombios_encoder_init_dig(adev);
amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
/* turn on the BL */ /* turn on the BL */
if (adev->mode_info.bl_encoder) { if (adev->mode_info.bl_encoder) {
u8 bl_level = amdgpu_display_backlight_get_level(adev, u8 bl_level = amdgpu_display_backlight_get_level(adev,
@ -3110,10 +3106,7 @@ static int dce_v10_0_resume(void *handle)
bl_level); bl_level);
} }
/* initialize hpd */ return ret;
dce_v10_0_hpd_init(adev);
return 0;
} }
static bool dce_v10_0_is_idle(void *handle) static bool dce_v10_0_is_idle(void *handle)

View File

@ -126,6 +126,13 @@ static const u32 cz_mgcg_cgcg_init[] =
mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000, mmXDMA_MEM_POWER_CNTL, 0x00000101, 0x00000000,
}; };
static const u32 stoney_golden_settings_a11[] =
{
mmCRTC_DOUBLE_BUFFER_CONTROL, 0x00010101, 0x00010000,
mmFBC_MISC, 0x1f311fff, 0x14302000,
};
static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev) static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
{ {
switch (adev->asic_type) { switch (adev->asic_type) {
@ -137,6 +144,11 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
cz_golden_settings_a11, cz_golden_settings_a11,
(const u32)ARRAY_SIZE(cz_golden_settings_a11)); (const u32)ARRAY_SIZE(cz_golden_settings_a11));
break; break;
case CHIP_STONEY:
amdgpu_program_register_sequence(adev,
stoney_golden_settings_a11,
(const u32)ARRAY_SIZE(stoney_golden_settings_a11));
break;
default: default:
break; break;
} }
@ -2425,7 +2437,7 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
/* XXX need to determine what plls are available on each DCE11 part */ /* XXX need to determine what plls are available on each DCE11 part */
pll_in_use = amdgpu_pll_get_use_mask(crtc); pll_in_use = amdgpu_pll_get_use_mask(crtc);
if (adev->asic_type == CHIP_CARRIZO) { if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) {
if (!(pll_in_use & (1 << ATOM_PPLL1))) if (!(pll_in_use & (1 << ATOM_PPLL1)))
return ATOM_PPLL1; return ATOM_PPLL1;
if (!(pll_in_use & (1 << ATOM_PPLL0))) if (!(pll_in_use & (1 << ATOM_PPLL0)))
@ -2930,6 +2942,11 @@ static int dce_v11_0_early_init(void *handle)
adev->mode_info.num_hpd = 6; adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 9; adev->mode_info.num_dig = 9;
break; break;
case CHIP_STONEY:
adev->mode_info.num_crtc = 2;
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 9;
break;
default: default:
/* FIXME: not supported yet */ /* FIXME: not supported yet */
return -EINVAL; return -EINVAL;
@ -3028,6 +3045,7 @@ static int dce_v11_0_hw_init(void *handle)
dce_v11_0_init_golden_registers(adev); dce_v11_0_init_golden_registers(adev);
/* init dig PHYs, disp eng pll */ /* init dig PHYs, disp eng pll */
amdgpu_atombios_crtc_powergate_init(adev);
amdgpu_atombios_encoder_init_dig(adev); amdgpu_atombios_encoder_init_dig(adev);
amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk); amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
@ -3061,23 +3079,18 @@ static int dce_v11_0_suspend(void *handle)
amdgpu_atombios_scratch_regs_save(adev); amdgpu_atombios_scratch_regs_save(adev);
dce_v11_0_hpd_fini(adev); return dce_v11_0_hw_fini(handle);
return 0;
} }
static int dce_v11_0_resume(void *handle) static int dce_v11_0_resume(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
dce_v11_0_init_golden_registers(adev); ret = dce_v11_0_hw_init(handle);
amdgpu_atombios_scratch_regs_restore(adev); amdgpu_atombios_scratch_regs_restore(adev);
/* init dig PHYs, disp eng pll */
amdgpu_atombios_crtc_powergate_init(adev);
amdgpu_atombios_encoder_init_dig(adev);
amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
/* turn on the BL */ /* turn on the BL */
if (adev->mode_info.bl_encoder) { if (adev->mode_info.bl_encoder) {
u8 bl_level = amdgpu_display_backlight_get_level(adev, u8 bl_level = amdgpu_display_backlight_get_level(adev,
@ -3086,10 +3099,7 @@ static int dce_v11_0_resume(void *handle)
bl_level); bl_level);
} }
/* initialize hpd */ return ret;
dce_v11_0_hpd_init(adev);
return 0;
} }
static bool dce_v11_0_is_idle(void *handle) static bool dce_v11_0_is_idle(void *handle)

View File

@ -2994,20 +2994,18 @@ static int dce_v8_0_suspend(void *handle)
amdgpu_atombios_scratch_regs_save(adev); amdgpu_atombios_scratch_regs_save(adev);
dce_v8_0_hpd_fini(adev); return dce_v8_0_hw_fini(handle);
return 0;
} }
static int dce_v8_0_resume(void *handle) static int dce_v8_0_resume(void *handle)
{ {
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int ret;
ret = dce_v8_0_hw_init(handle);
amdgpu_atombios_scratch_regs_restore(adev); amdgpu_atombios_scratch_regs_restore(adev);
/* init dig PHYs, disp eng pll */
amdgpu_atombios_encoder_init_dig(adev);
amdgpu_atombios_crtc_set_disp_eng_pll(adev, adev->clock.default_dispclk);
/* turn on the BL */ /* turn on the BL */
if (adev->mode_info.bl_encoder) { if (adev->mode_info.bl_encoder) {
u8 bl_level = amdgpu_display_backlight_get_level(adev, u8 bl_level = amdgpu_display_backlight_get_level(adev,
@ -3016,10 +3014,7 @@ static int dce_v8_0_resume(void *handle)
bl_level); bl_level);
} }
/* initialize hpd */ return ret;
dce_v8_0_hpd_init(adev);
return 0;
} }
static bool dce_v8_0_is_idle(void *handle) static bool dce_v8_0_is_idle(void *handle)

View File

@ -5542,24 +5542,6 @@ const struct amd_ip_funcs gfx_v7_0_ip_funcs = {
.set_powergating_state = gfx_v7_0_set_powergating_state, .set_powergating_state = gfx_v7_0_set_powergating_state,
}; };
/**
* gfx_v7_0_ring_is_lockup - check if the 3D engine is locked up
*
* @adev: amdgpu_device pointer
* @ring: amdgpu_ring structure holding ring information
*
* Check if the 3D engine is locked up (CIK).
* Returns true if the engine is locked, false if not.
*/
static bool gfx_v7_0_ring_is_lockup(struct amdgpu_ring *ring)
{
if (gfx_v7_0_is_idle(ring->adev)) {
amdgpu_ring_lockup_update(ring);
return false;
}
return amdgpu_ring_test_lockup(ring);
}
static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = { static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.get_rptr = gfx_v7_0_ring_get_rptr_gfx, .get_rptr = gfx_v7_0_ring_get_rptr_gfx,
.get_wptr = gfx_v7_0_ring_get_wptr_gfx, .get_wptr = gfx_v7_0_ring_get_wptr_gfx,
@ -5573,7 +5555,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush, .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
.test_ring = gfx_v7_0_ring_test_ring, .test_ring = gfx_v7_0_ring_test_ring,
.test_ib = gfx_v7_0_ring_test_ib, .test_ib = gfx_v7_0_ring_test_ib,
.is_lockup = gfx_v7_0_ring_is_lockup,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = amdgpu_ring_insert_nop,
}; };
@ -5590,7 +5571,6 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
.emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush, .emit_hdp_flush = gfx_v7_0_ring_emit_hdp_flush,
.test_ring = gfx_v7_0_ring_test_ring, .test_ring = gfx_v7_0_ring_test_ring,
.test_ib = gfx_v7_0_ring_test_ib, .test_ib = gfx_v7_0_ring_test_ib,
.is_lockup = gfx_v7_0_ring_is_lockup,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = amdgpu_ring_insert_nop,
}; };

View File

@ -73,6 +73,12 @@ MODULE_FIRMWARE("amdgpu/carrizo_mec.bin");
MODULE_FIRMWARE("amdgpu/carrizo_mec2.bin"); MODULE_FIRMWARE("amdgpu/carrizo_mec2.bin");
MODULE_FIRMWARE("amdgpu/carrizo_rlc.bin"); MODULE_FIRMWARE("amdgpu/carrizo_rlc.bin");
MODULE_FIRMWARE("amdgpu/stoney_ce.bin");
MODULE_FIRMWARE("amdgpu/stoney_pfp.bin");
MODULE_FIRMWARE("amdgpu/stoney_me.bin");
MODULE_FIRMWARE("amdgpu/stoney_mec.bin");
MODULE_FIRMWARE("amdgpu/stoney_rlc.bin");
MODULE_FIRMWARE("amdgpu/tonga_ce.bin"); MODULE_FIRMWARE("amdgpu/tonga_ce.bin");
MODULE_FIRMWARE("amdgpu/tonga_pfp.bin"); MODULE_FIRMWARE("amdgpu/tonga_pfp.bin");
MODULE_FIRMWARE("amdgpu/tonga_me.bin"); MODULE_FIRMWARE("amdgpu/tonga_me.bin");
@ -493,6 +499,42 @@ static const u32 cz_mgcg_cgcg_init[] =
mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001, mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
}; };
static const u32 stoney_golden_settings_a11[] =
{
mmDB_DEBUG2, 0xf00fffff, 0x00000400,
mmGB_GPU_ID, 0x0000000f, 0x00000000,
mmPA_SC_ENHANCE, 0xffffffff, 0x20000001,
mmPA_SC_LINE_STIPPLE_STATE, 0x0000ff0f, 0x00000000,
mmRLC_CGCG_CGLS_CTRL, 0x00000003, 0x0001003c,
mmTA_CNTL_AUX, 0x000f000f, 0x000b0000,
mmTCC_CTRL, 0x00100000, 0xf31fff7f,
mmTCC_EXE_DISABLE, 0x00000002, 0x00000002,
mmTCP_ADDR_CONFIG, 0x0000000f, 0x000000f1,
mmTCP_CHAN_STEER_LO, 0xffffffff, 0x10101010,
};
static const u32 stoney_golden_common_all[] =
{
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
mmPA_SC_RASTER_CONFIG, 0xffffffff, 0x00000000,
mmPA_SC_RASTER_CONFIG_1, 0xffffffff, 0x00000000,
mmGB_ADDR_CONFIG, 0xffffffff, 0x12010001,
mmSPI_RESOURCE_RESERVE_CU_0, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_CU_1, 0xffffffff, 0x00000800,
mmSPI_RESOURCE_RESERVE_EN_CU_0, 0xffffffff, 0x00007FBF,
mmSPI_RESOURCE_RESERVE_EN_CU_1, 0xffffffff, 0x00007FAF,
};
static const u32 stoney_mgcg_cgcg_init[] =
{
mmGRBM_GFX_INDEX, 0xffffffff, 0xe0000000,
mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003f,
mmCP_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
mmRLC_MEM_SLP_CNTL, 0xffffffff, 0x00020201,
mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96940200,
mmATC_MISC_CG, 0xffffffff, 0x000c0200,
};
static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev); static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev);
static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev); static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev);
static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev); static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev);
@ -545,6 +587,17 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
cz_golden_common_all, cz_golden_common_all,
(const u32)ARRAY_SIZE(cz_golden_common_all)); (const u32)ARRAY_SIZE(cz_golden_common_all));
break; break;
case CHIP_STONEY:
amdgpu_program_register_sequence(adev,
stoney_mgcg_cgcg_init,
(const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
amdgpu_program_register_sequence(adev,
stoney_golden_settings_a11,
(const u32)ARRAY_SIZE(stoney_golden_settings_a11));
amdgpu_program_register_sequence(adev,
stoney_golden_common_all,
(const u32)ARRAY_SIZE(stoney_golden_common_all));
break;
default: default:
break; break;
} }
@ -691,6 +744,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
case CHIP_FIJI: case CHIP_FIJI:
chip_name = "fiji"; chip_name = "fiji";
break; break;
case CHIP_STONEY:
chip_name = "stoney";
break;
default: default:
BUG(); BUG();
} }
@ -748,21 +804,23 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); if (adev->asic_type != CHIP_STONEY) {
err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
if (!err) { err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
err = amdgpu_ucode_validate(adev->gfx.mec2_fw); if (!err) {
if (err) err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
goto out; if (err)
cp_hdr = (const struct gfx_firmware_header_v1_0 *) goto out;
adev->gfx.mec2_fw->data; cp_hdr = (const struct gfx_firmware_header_v1_0 *)
adev->gfx.mec2_fw_version = le32_to_cpu( adev->gfx.mec2_fw->data;
cp_hdr->header.ucode_version); adev->gfx.mec2_fw_version =
adev->gfx.mec2_feature_version = le32_to_cpu( le32_to_cpu(cp_hdr->header.ucode_version);
cp_hdr->ucode_feature_version); adev->gfx.mec2_feature_version =
} else { le32_to_cpu(cp_hdr->ucode_feature_version);
err = 0; } else {
adev->gfx.mec2_fw = NULL; err = 0;
adev->gfx.mec2_fw = NULL;
}
} }
if (adev->firmware.smu_load) { if (adev->firmware.smu_load) {
@ -1004,6 +1062,40 @@ static void gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.max_gs_threads = 32; adev->gfx.config.max_gs_threads = 32;
adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
gb_addr_config = CARRIZO_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_STONEY:
adev->gfx.config.max_shader_engines = 1;
adev->gfx.config.max_tile_pipes = 2;
adev->gfx.config.max_sh_per_se = 1;
adev->gfx.config.max_backends_per_se = 1;
switch (adev->pdev->revision) {
case 0xc0:
case 0xc1:
case 0xc2:
case 0xc4:
case 0xc8:
case 0xc9:
adev->gfx.config.max_cu_per_sh = 3;
break;
case 0xd0:
case 0xd1:
case 0xd2:
default:
adev->gfx.config.max_cu_per_sh = 2;
break;
}
adev->gfx.config.max_texture_channel_caches = 2;
adev->gfx.config.max_gprs = 256;
adev->gfx.config.max_gs_threads = 16;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100; adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
adev->gfx.config.sc_hiz_tile_fifo_size = 0x30; adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
@ -1797,6 +1889,273 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden); WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
} }
break; break;
case CHIP_STONEY:
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
switch (reg_offset) {
case 0:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
break;
case 1:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
break;
case 2:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
break;
case 3:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
break;
case 4:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
break;
case 5:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
break;
case 6:
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
break;
case 8:
gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
PIPE_CONFIG(ADDR_SURF_P2));
break;
case 9:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
break;
case 10:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
break;
case 11:
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
break;
case 13:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
break;
case 14:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
break;
case 15:
gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
break;
case 16:
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
break;
case 18:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
break;
case 19:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
break;
case 20:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
break;
case 21:
gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
break;
case 22:
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
break;
case 24:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
break;
case 25:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
break;
case 26:
gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
break;
case 27:
gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
break;
case 28:
gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
break;
case 29:
gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P2) |
MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
break;
case 7:
case 12:
case 17:
case 23:
/* unused idx */
continue;
default:
gb_tile_moden = 0;
break;
};
adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
}
for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
switch (reg_offset) {
case 0:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 1:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 2:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 3:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 4:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 5:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 6:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 8:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 9:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 10:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 11:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 12:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 13:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4) |
NUM_BANKS(ADDR_SURF_16_BANK));
break;
case 14:
gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
NUM_BANKS(ADDR_SURF_8_BANK));
break;
case 7:
/* unused idx */
continue;
default:
gb_tile_moden = 0;
break;
};
adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
}
break;
case CHIP_CARRIZO: case CHIP_CARRIZO:
default: default:
for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
@ -2384,7 +2743,7 @@ static void gfx_v8_0_rlc_start(struct amdgpu_device *adev)
WREG32(mmRLC_CNTL, tmp); WREG32(mmRLC_CNTL, tmp);
/* carrizo do enable cp interrupt after cp inited */ /* carrizo do enable cp interrupt after cp inited */
if (adev->asic_type != CHIP_CARRIZO) if (!(adev->flags & AMD_IS_APU))
gfx_v8_0_enable_gui_idle_interrupt(adev, true); gfx_v8_0_enable_gui_idle_interrupt(adev, true);
udelay(50); udelay(50);
@ -2606,6 +2965,10 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
amdgpu_ring_write(ring, 0x00000002); amdgpu_ring_write(ring, 0x00000002);
amdgpu_ring_write(ring, 0x00000000); amdgpu_ring_write(ring, 0x00000000);
break; break;
case CHIP_STONEY:
amdgpu_ring_write(ring, 0x00000000);
amdgpu_ring_write(ring, 0x00000000);
break;
default: default:
BUG(); BUG();
} }
@ -3240,7 +3603,8 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
/* enable the doorbell if requested */ /* enable the doorbell if requested */
if (use_doorbell) { if (use_doorbell) {
if ((adev->asic_type == CHIP_CARRIZO) || if ((adev->asic_type == CHIP_CARRIZO) ||
(adev->asic_type == CHIP_FIJI)) { (adev->asic_type == CHIP_FIJI) ||
(adev->asic_type == CHIP_STONEY)) {
WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
AMDGPU_DOORBELL_KIQ << 2); AMDGPU_DOORBELL_KIQ << 2);
WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
@ -3312,7 +3676,7 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev)
{ {
int r; int r;
if (adev->asic_type != CHIP_CARRIZO) if (!(adev->flags & AMD_IS_APU))
gfx_v8_0_enable_gui_idle_interrupt(adev, false); gfx_v8_0_enable_gui_idle_interrupt(adev, false);
if (!adev->firmware.smu_load) { if (!adev->firmware.smu_load) {
@ -4075,15 +4439,6 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
} }
} }
static bool gfx_v8_0_ring_is_lockup(struct amdgpu_ring *ring)
{
if (gfx_v8_0_is_idle(ring->adev)) {
amdgpu_ring_lockup_update(ring);
return false;
}
return amdgpu_ring_test_lockup(ring);
}
static u32 gfx_v8_0_ring_get_rptr_compute(struct amdgpu_ring *ring) static u32 gfx_v8_0_ring_get_rptr_compute(struct amdgpu_ring *ring)
{ {
return ring->adev->wb.wb[ring->rptr_offs]; return ring->adev->wb.wb[ring->rptr_offs];
@ -4114,6 +4469,7 @@ static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5)); amdgpu_ring_write(ring, PACKET3(PACKET3_RELEASE_MEM, 5));
amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN | amdgpu_ring_write(ring, (EOP_TCL1_ACTION_EN |
EOP_TC_ACTION_EN | EOP_TC_ACTION_EN |
EOP_TC_WB_ACTION_EN |
EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) | EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT) |
EVENT_INDEX(5))); EVENT_INDEX(5)));
amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0)); amdgpu_ring_write(ring, DATA_SEL(write64bit ? 2 : 1) | INT_SEL(int_sel ? 2 : 0));
@ -4364,7 +4720,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush, .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
.test_ring = gfx_v8_0_ring_test_ring, .test_ring = gfx_v8_0_ring_test_ring,
.test_ib = gfx_v8_0_ring_test_ib, .test_ib = gfx_v8_0_ring_test_ib,
.is_lockup = gfx_v8_0_ring_is_lockup,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = amdgpu_ring_insert_nop,
}; };
@ -4381,7 +4736,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush, .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush,
.test_ring = gfx_v8_0_ring_test_ring, .test_ring = gfx_v8_0_ring_test_ring,
.test_ib = gfx_v8_0_ring_test_ib, .test_ib = gfx_v8_0_ring_test_ib,
.is_lockup = gfx_v8_0_ring_is_lockup,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = amdgpu_ring_insert_nop,
}; };

View File

@ -93,6 +93,12 @@ static const u32 cz_mgcg_cgcg_init[] =
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
}; };
static const u32 stoney_mgcg_cgcg_init[] =
{
mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
};
static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
{ {
switch (adev->asic_type) { switch (adev->asic_type) {
@ -125,6 +131,11 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
cz_mgcg_cgcg_init, cz_mgcg_cgcg_init,
(const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
break; break;
case CHIP_STONEY:
amdgpu_program_register_sequence(adev,
stoney_mgcg_cgcg_init,
(const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
break;
default: default:
break; break;
} }
@ -228,6 +239,7 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
chip_name = "fiji"; chip_name = "fiji";
break; break;
case CHIP_CARRIZO: case CHIP_CARRIZO:
case CHIP_STONEY:
return 0; return 0;
default: BUG(); default: BUG();
} }

View File

@ -1295,24 +1295,6 @@ const struct amd_ip_funcs sdma_v2_4_ip_funcs = {
.set_powergating_state = sdma_v2_4_set_powergating_state, .set_powergating_state = sdma_v2_4_set_powergating_state,
}; };
/**
* sdma_v2_4_ring_is_lockup - Check if the DMA engine is locked up
*
* @ring: amdgpu_ring structure holding ring information
*
* Check if the async DMA engine is locked up (VI).
* Returns true if the engine appears to be locked up, false if not.
*/
static bool sdma_v2_4_ring_is_lockup(struct amdgpu_ring *ring)
{
if (sdma_v2_4_is_idle(ring->adev)) {
amdgpu_ring_lockup_update(ring);
return false;
}
return amdgpu_ring_test_lockup(ring);
}
static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = { static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
.get_rptr = sdma_v2_4_ring_get_rptr, .get_rptr = sdma_v2_4_ring_get_rptr,
.get_wptr = sdma_v2_4_ring_get_wptr, .get_wptr = sdma_v2_4_ring_get_wptr,
@ -1325,7 +1307,6 @@ static const struct amdgpu_ring_funcs sdma_v2_4_ring_funcs = {
.emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush, .emit_hdp_flush = sdma_v2_4_ring_emit_hdp_flush,
.test_ring = sdma_v2_4_ring_test_ring, .test_ring = sdma_v2_4_ring_test_ring,
.test_ib = sdma_v2_4_ring_test_ib, .test_ib = sdma_v2_4_ring_test_ib,
.is_lockup = sdma_v2_4_ring_is_lockup,
.insert_nop = sdma_v2_4_ring_insert_nop, .insert_nop = sdma_v2_4_ring_insert_nop,
}; };

View File

@ -55,6 +55,7 @@ MODULE_FIRMWARE("amdgpu/carrizo_sdma.bin");
MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin"); MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin");
MODULE_FIRMWARE("amdgpu/fiji_sdma.bin"); MODULE_FIRMWARE("amdgpu/fiji_sdma.bin");
MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin"); MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin");
MODULE_FIRMWARE("amdgpu/stoney_sdma.bin");
static const u32 sdma_offsets[SDMA_MAX_INSTANCE] = static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
{ {
@ -122,6 +123,19 @@ static const u32 cz_mgcg_cgcg_init[] =
mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100 mmSDMA1_CLK_CTRL, 0xff000ff0, 0x00000100
}; };
static const u32 stoney_golden_settings_a11[] =
{
mmSDMA0_GFX_IB_CNTL, 0x00000100, 0x00000100,
mmSDMA0_POWER_CNTL, 0x00000800, 0x0003c800,
mmSDMA0_RLC0_IB_CNTL, 0x00000100, 0x00000100,
mmSDMA0_RLC1_IB_CNTL, 0x00000100, 0x00000100,
};
static const u32 stoney_mgcg_cgcg_init[] =
{
mmSDMA0_CLK_CTRL, 0xffffffff, 0x00000100,
};
/* /*
* sDMA - System DMA * sDMA - System DMA
* Starting with CIK, the GPU has new asynchronous * Starting with CIK, the GPU has new asynchronous
@ -166,6 +180,14 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
cz_golden_settings_a11, cz_golden_settings_a11,
(const u32)ARRAY_SIZE(cz_golden_settings_a11)); (const u32)ARRAY_SIZE(cz_golden_settings_a11));
break; break;
case CHIP_STONEY:
amdgpu_program_register_sequence(adev,
stoney_mgcg_cgcg_init,
(const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
amdgpu_program_register_sequence(adev,
stoney_golden_settings_a11,
(const u32)ARRAY_SIZE(stoney_golden_settings_a11));
break;
default: default:
break; break;
} }
@ -201,6 +223,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
case CHIP_CARRIZO: case CHIP_CARRIZO:
chip_name = "carrizo"; chip_name = "carrizo";
break; break;
case CHIP_STONEY:
chip_name = "stoney";
break;
default: BUG(); default: BUG();
} }
@ -1071,6 +1096,9 @@ static int sdma_v3_0_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_device *adev = (struct amdgpu_device *)handle;
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_STONEY:
adev->sdma.num_instances = 1;
break;
default: default:
adev->sdma.num_instances = SDMA_MAX_INSTANCE; adev->sdma.num_instances = SDMA_MAX_INSTANCE;
break; break;
@ -1428,24 +1456,6 @@ const struct amd_ip_funcs sdma_v3_0_ip_funcs = {
.set_powergating_state = sdma_v3_0_set_powergating_state, .set_powergating_state = sdma_v3_0_set_powergating_state,
}; };
/**
* sdma_v3_0_ring_is_lockup - Check if the DMA engine is locked up
*
* @ring: amdgpu_ring structure holding ring information
*
* Check if the async DMA engine is locked up (VI).
* Returns true if the engine appears to be locked up, false if not.
*/
static bool sdma_v3_0_ring_is_lockup(struct amdgpu_ring *ring)
{
if (sdma_v3_0_is_idle(ring->adev)) {
amdgpu_ring_lockup_update(ring);
return false;
}
return amdgpu_ring_test_lockup(ring);
}
static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = { static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
.get_rptr = sdma_v3_0_ring_get_rptr, .get_rptr = sdma_v3_0_ring_get_rptr,
.get_wptr = sdma_v3_0_ring_get_wptr, .get_wptr = sdma_v3_0_ring_get_wptr,
@ -1458,7 +1468,6 @@ static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs = {
.emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush, .emit_hdp_flush = sdma_v3_0_ring_emit_hdp_flush,
.test_ring = sdma_v3_0_ring_test_ring, .test_ring = sdma_v3_0_ring_test_ring,
.test_ib = sdma_v3_0_ring_test_ib, .test_ib = sdma_v3_0_ring_test_ib,
.is_lockup = sdma_v3_0_ring_is_lockup,
.insert_nop = sdma_v3_0_ring_insert_nop, .insert_nop = sdma_v3_0_ring_insert_nop,
}; };

View File

@ -885,7 +885,6 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = {
.emit_semaphore = uvd_v4_2_ring_emit_semaphore, .emit_semaphore = uvd_v4_2_ring_emit_semaphore,
.test_ring = uvd_v4_2_ring_test_ring, .test_ring = uvd_v4_2_ring_test_ring,
.test_ib = uvd_v4_2_ring_test_ib, .test_ib = uvd_v4_2_ring_test_ib,
.is_lockup = amdgpu_ring_test_lockup,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = amdgpu_ring_insert_nop,
}; };

View File

@ -824,7 +824,6 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = {
.emit_semaphore = uvd_v5_0_ring_emit_semaphore, .emit_semaphore = uvd_v5_0_ring_emit_semaphore,
.test_ring = uvd_v5_0_ring_test_ring, .test_ring = uvd_v5_0_ring_test_ring,
.test_ib = uvd_v5_0_ring_test_ib, .test_ib = uvd_v5_0_ring_test_ib,
.is_lockup = amdgpu_ring_test_lockup,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = amdgpu_ring_insert_nop,
}; };

View File

@ -808,7 +808,6 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = {
.emit_semaphore = uvd_v6_0_ring_emit_semaphore, .emit_semaphore = uvd_v6_0_ring_emit_semaphore,
.test_ring = uvd_v6_0_ring_test_ring, .test_ring = uvd_v6_0_ring_test_ring,
.test_ib = uvd_v6_0_ring_test_ib, .test_ib = uvd_v6_0_ring_test_ib,
.is_lockup = amdgpu_ring_test_lockup,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = amdgpu_ring_insert_nop,
}; };

View File

@ -642,7 +642,6 @@ static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
.emit_semaphore = amdgpu_vce_ring_emit_semaphore, .emit_semaphore = amdgpu_vce_ring_emit_semaphore,
.test_ring = amdgpu_vce_ring_test_ring, .test_ring = amdgpu_vce_ring_test_ring,
.test_ib = amdgpu_vce_ring_test_ib, .test_ib = amdgpu_vce_ring_test_ib,
.is_lockup = amdgpu_ring_test_lockup,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = amdgpu_ring_insert_nop,
}; };

View File

@ -205,8 +205,9 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
u32 tmp; u32 tmp;
unsigned ret; unsigned ret;
/* Fiji is single pipe */ /* Fiji, Stoney are single pipe */
if (adev->asic_type == CHIP_FIJI) { if ((adev->asic_type == CHIP_FIJI) ||
(adev->asic_type == CHIP_STONEY)){
ret = AMDGPU_VCE_HARVEST_VCE1; ret = AMDGPU_VCE_HARVEST_VCE1;
return ret; return ret;
} }
@ -643,7 +644,6 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs = {
.emit_semaphore = amdgpu_vce_ring_emit_semaphore, .emit_semaphore = amdgpu_vce_ring_emit_semaphore,
.test_ring = amdgpu_vce_ring_test_ring, .test_ring = amdgpu_vce_ring_test_ring,
.test_ib = amdgpu_vce_ring_test_ib, .test_ib = amdgpu_vce_ring_test_ib,
.is_lockup = amdgpu_ring_test_lockup,
.insert_nop = amdgpu_ring_insert_nop, .insert_nop = amdgpu_ring_insert_nop,
}; };

View File

@ -232,6 +232,13 @@ static const u32 cz_mgcg_cgcg_init[] =
mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104, mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
}; };
static const u32 stoney_mgcg_cgcg_init[] =
{
mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00000100,
mmHDP_XDP_CGTT_BLK_CTRL, 0xffffffff, 0x00000104,
mmHDP_HOST_PATH_CNTL, 0xffffffff, 0x0f000027,
};
static void vi_init_golden_registers(struct amdgpu_device *adev) static void vi_init_golden_registers(struct amdgpu_device *adev)
{ {
/* Some of the registers might be dependent on GRBM_GFX_INDEX */ /* Some of the registers might be dependent on GRBM_GFX_INDEX */
@ -258,6 +265,11 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
cz_mgcg_cgcg_init, cz_mgcg_cgcg_init,
(const u32)ARRAY_SIZE(cz_mgcg_cgcg_init)); (const u32)ARRAY_SIZE(cz_mgcg_cgcg_init));
break; break;
case CHIP_STONEY:
amdgpu_program_register_sequence(adev,
stoney_mgcg_cgcg_init,
(const u32)ARRAY_SIZE(stoney_mgcg_cgcg_init));
break;
default: default:
break; break;
} }
@ -488,6 +500,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
case CHIP_FIJI: case CHIP_FIJI:
case CHIP_TONGA: case CHIP_TONGA:
case CHIP_CARRIZO: case CHIP_CARRIZO:
case CHIP_STONEY:
asic_register_table = cz_allowed_read_registers; asic_register_table = cz_allowed_read_registers;
size = ARRAY_SIZE(cz_allowed_read_registers); size = ARRAY_SIZE(cz_allowed_read_registers);
break; break;
@ -543,8 +556,10 @@ static void vi_print_gpu_status_regs(struct amdgpu_device *adev)
RREG32(mmSRBM_STATUS2)); RREG32(mmSRBM_STATUS2));
dev_info(adev->dev, " SDMA0_STATUS_REG = 0x%08X\n", dev_info(adev->dev, " SDMA0_STATUS_REG = 0x%08X\n",
RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET)); RREG32(mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET));
dev_info(adev->dev, " SDMA1_STATUS_REG = 0x%08X\n", if (adev->sdma.num_instances > 1) {
RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET)); dev_info(adev->dev, " SDMA1_STATUS_REG = 0x%08X\n",
RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET));
}
dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT)); dev_info(adev->dev, " CP_STAT = 0x%08x\n", RREG32(mmCP_STAT));
dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n", dev_info(adev->dev, " CP_STALLED_STAT1 = 0x%08x\n",
RREG32(mmCP_STALLED_STAT1)); RREG32(mmCP_STALLED_STAT1));
@ -639,9 +654,11 @@ u32 vi_gpu_check_soft_reset(struct amdgpu_device *adev)
reset_mask |= AMDGPU_RESET_DMA; reset_mask |= AMDGPU_RESET_DMA;
/* SDMA1_STATUS_REG */ /* SDMA1_STATUS_REG */
tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET); if (adev->sdma.num_instances > 1) {
if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK)) tmp = RREG32(mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET);
reset_mask |= AMDGPU_RESET_DMA1; if (!(tmp & SDMA0_STATUS_REG__IDLE_MASK))
reset_mask |= AMDGPU_RESET_DMA1;
}
#if 0 #if 0
/* VCE_STATUS */ /* VCE_STATUS */
if (adev->asic_type != CHIP_TOPAZ) { if (adev->asic_type != CHIP_TOPAZ) {
@ -1319,6 +1336,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks); adev->num_ip_blocks = ARRAY_SIZE(tonga_ip_blocks);
break; break;
case CHIP_CARRIZO: case CHIP_CARRIZO:
case CHIP_STONEY:
adev->ip_blocks = cz_ip_blocks; adev->ip_blocks = cz_ip_blocks;
adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks); adev->num_ip_blocks = ARRAY_SIZE(cz_ip_blocks);
break; break;
@ -1330,11 +1348,18 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
return 0; return 0;
} }
#define ATI_REV_ID_FUSE_MACRO__ADDRESS 0xC0014044
#define ATI_REV_ID_FUSE_MACRO__SHIFT 9
#define ATI_REV_ID_FUSE_MACRO__MASK 0x00001E00
static uint32_t vi_get_rev_id(struct amdgpu_device *adev) static uint32_t vi_get_rev_id(struct amdgpu_device *adev)
{ {
if (adev->asic_type == CHIP_TOPAZ) if (adev->asic_type == CHIP_TOPAZ)
return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK) return (RREG32(mmPCIE_EFUSE4) & PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID_MASK)
>> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT; >> PCIE_EFUSE4__STRAP_BIF_ATI_REV_ID__SHIFT;
else if (adev->flags & AMD_IS_APU)
return (RREG32_SMC(ATI_REV_ID_FUSE_MACRO__ADDRESS) & ATI_REV_ID_FUSE_MACRO__MASK)
>> ATI_REV_ID_FUSE_MACRO__SHIFT;
else else
return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK) return (RREG32(mmCC_DRM_ID_STRAPS) & CC_DRM_ID_STRAPS__ATI_REV_ID_MASK)
>> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT; >> CC_DRM_ID_STRAPS__ATI_REV_ID__SHIFT;
@ -1401,6 +1426,7 @@ static int vi_common_early_init(void *handle)
adev->firmware.smu_load = true; adev->firmware.smu_load = true;
break; break;
case CHIP_CARRIZO: case CHIP_CARRIZO:
case CHIP_STONEY:
adev->has_uvd = true; adev->has_uvd = true;
adev->cg_flags = 0; adev->cg_flags = 0;
/* Disable UVD pg */ /* Disable UVD pg */

View File

@ -47,6 +47,7 @@ enum amd_asic_type {
CHIP_TONGA, CHIP_TONGA,
CHIP_FIJI, CHIP_FIJI,
CHIP_CARRIZO, CHIP_CARRIZO,
CHIP_STONEY,
CHIP_LAST, CHIP_LAST,
}; };

View File

@ -6784,7 +6784,7 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE_V2_1
ULONG ulMCUcodeRomStartAddr; ULONG ulMCUcodeRomStartAddr;
ULONG ulMCUcodeLength; ULONG ulMCUcodeLength;
USHORT usMcRegInitTableOffset; // offset of ATOM_REG_INIT_SETTING array for MC core register settings. USHORT usMcRegInitTableOffset; // offset of ATOM_REG_INIT_SETTING array for MC core register settings.
USHORT usReserved; // offset of ATOM_INIT_REG_BLOCK for MC SEQ/PHY regsiter setting USHORT usReserved; // offset of ATOM_INIT_REG_BLOCK for MC SEQ/PHY register setting
}ATOM_MC_INIT_PARAM_TABLE_V2_1; }ATOM_MC_INIT_PARAM_TABLE_V2_1;

View File

@ -32,7 +32,7 @@
* evergreen cards need to use the 3D engine to blit data which requires * evergreen cards need to use the 3D engine to blit data which requires
* quite a bit of hw state setup. Rather than pull the whole 3D driver * quite a bit of hw state setup. Rather than pull the whole 3D driver
* (which normally generates the 3D state) into the DRM, we opt to use * (which normally generates the 3D state) into the DRM, we opt to use
* statically generated state tables. The regsiter state and shaders * statically generated state tables. The register state and shaders
* were hand generated to support blitting functionality. See the 3D * were hand generated to support blitting functionality. See the 3D
* driver or documentation for descriptions of the registers and * driver or documentation for descriptions of the registers and
* shader instructions. * shader instructions.

View File

@ -32,7 +32,7 @@
* evergreen cards need to use the 3D engine to blit data which requires * evergreen cards need to use the 3D engine to blit data which requires
* quite a bit of hw state setup. Rather than pull the whole 3D driver * quite a bit of hw state setup. Rather than pull the whole 3D driver
* (which normally generates the 3D state) into the DRM, we opt to use * (which normally generates the 3D state) into the DRM, we opt to use
* statically generated state tables. The regsiter state and shaders * statically generated state tables. The register state and shaders
* were hand generated to support blitting functionality. See the 3D * were hand generated to support blitting functionality. See the 3D
* driver or documentation for descriptions of the registers and * driver or documentation for descriptions of the registers and
* shader instructions. * shader instructions.

View File

@ -32,7 +32,7 @@
* R6xx+ cards need to use the 3D engine to blit data which requires * R6xx+ cards need to use the 3D engine to blit data which requires
* quite a bit of hw state setup. Rather than pull the whole 3D driver * quite a bit of hw state setup. Rather than pull the whole 3D driver
* (which normally generates the 3D state) into the DRM, we opt to use * (which normally generates the 3D state) into the DRM, we opt to use
* statically generated state tables. The regsiter state and shaders * statically generated state tables. The register state and shaders
* were hand generated to support blitting functionality. See the 3D * were hand generated to support blitting functionality. See the 3D
* driver or documentation for descriptions of the registers and * driver or documentation for descriptions of the registers and
* shader instructions. * shader instructions.

View File

@ -181,7 +181,9 @@ static void radeon_set_filp_rights(struct drm_device *dev,
struct drm_file *applier, struct drm_file *applier,
uint32_t *value) uint32_t *value)
{ {
mutex_lock(&dev->struct_mutex); struct radeon_device *rdev = dev->dev_private;
mutex_lock(&rdev->gem.mutex);
if (*value == 1) { if (*value == 1) {
/* wants rights */ /* wants rights */
if (!*owner) if (!*owner)
@ -192,7 +194,7 @@ static void radeon_set_filp_rights(struct drm_device *dev,
*owner = NULL; *owner = NULL;
} }
*value = *owner == applier ? 1 : 0; *value = *owner == applier ? 1 : 0;
mutex_unlock(&dev->struct_mutex); mutex_unlock(&rdev->gem.mutex);
} }
/* /*
@ -727,10 +729,14 @@ void radeon_driver_preclose_kms(struct drm_device *dev,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct radeon_device *rdev = dev->dev_private; struct radeon_device *rdev = dev->dev_private;
mutex_lock(&rdev->gem.mutex);
if (rdev->hyperz_filp == file_priv) if (rdev->hyperz_filp == file_priv)
rdev->hyperz_filp = NULL; rdev->hyperz_filp = NULL;
if (rdev->cmask_filp == file_priv) if (rdev->cmask_filp == file_priv)
rdev->cmask_filp = NULL; rdev->cmask_filp = NULL;
mutex_unlock(&rdev->gem.mutex);
radeon_uvd_free_handles(rdev, file_priv); radeon_uvd_free_handles(rdev, file_priv);
radeon_vce_free_handles(rdev, file_priv); radeon_vce_free_handles(rdev, file_priv);
} }

View File

@ -640,6 +640,6 @@ struct drm_amdgpu_info_hw_ip {
#define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */ #define AMDGPU_FAMILY_CI 120 /* Bonaire, Hawaii */
#define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */ #define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */
#define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */ #define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */
#define AMDGPU_FAMILY_CZ 135 /* Carrizo */ #define AMDGPU_FAMILY_CZ 135 /* Carrizo, Stoney */
#endif #endif