forked from Minki/linux
drm/msm: convert iova to 64b
For a5xx the gpu is 64b so we need to change iova to 64b everywhere. On the display side, iova is still 32b so it can ignore the upper bits. (Although all the armv8 devices have an iommu that can map 64b pa to 32b iova.) Signed-off-by: Rob Clark <robdclark@gmail.com>
This commit is contained in:
parent
c83ea57601
commit
78babc1633
@ -153,7 +153,7 @@ struct adreno_gpu {
|
||||
// different for z180..
|
||||
struct adreno_rbmemptrs *memptrs;
|
||||
struct drm_gem_object *memptrs_bo;
|
||||
uint32_t memptrs_iova;
|
||||
uint64_t memptrs_iova;
|
||||
|
||||
/*
|
||||
* Register offsets are different between some GPUs.
|
||||
|
@ -981,7 +981,7 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
|
||||
struct drm_device *dev = msm_host->dev;
|
||||
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
|
||||
int ret;
|
||||
u32 iova;
|
||||
uint64_t iova;
|
||||
|
||||
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
@ -1146,7 +1146,7 @@ static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
|
||||
{
|
||||
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
|
||||
int ret;
|
||||
u32 dma_base;
|
||||
uint64_t dma_base;
|
||||
bool triggered;
|
||||
|
||||
if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
|
||||
|
@ -373,7 +373,7 @@ static void update_cursor(struct drm_crtc *crtc)
|
||||
if (mdp4_crtc->cursor.stale) {
|
||||
struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
|
||||
struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
|
||||
uint32_t iova = mdp4_crtc->cursor.next_iova;
|
||||
uint64_t iova = mdp4_crtc->cursor.next_iova;
|
||||
|
||||
if (next_bo) {
|
||||
/* take a obj ref + iova ref when we start scanning out: */
|
||||
@ -418,7 +418,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_gem_object *cursor_bo, *old_bo;
|
||||
unsigned long flags;
|
||||
uint32_t iova;
|
||||
uint64_t iova;
|
||||
int ret;
|
||||
|
||||
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
|
||||
|
@ -51,7 +51,7 @@ struct mdp4_kms {
|
||||
|
||||
/* empty/blank cursor bo to use when cursor is "disabled" */
|
||||
struct drm_gem_object *blank_cursor_bo;
|
||||
uint32_t blank_cursor_iova;
|
||||
uint64_t blank_cursor_iova;
|
||||
};
|
||||
#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
|
||||
|
||||
|
@ -489,7 +489,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct mdp5_kms *mdp5_kms = get_kms(crtc);
|
||||
struct drm_gem_object *cursor_bo, *old_bo = NULL;
|
||||
uint32_t blendcfg, cursor_addr, stride;
|
||||
uint32_t blendcfg, stride;
|
||||
uint64_t cursor_addr;
|
||||
int ret, lm;
|
||||
enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
|
||||
uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
|
||||
|
@ -209,9 +209,9 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
|
||||
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
|
||||
int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
|
||||
uint32_t *iova);
|
||||
int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
|
||||
uint32_t msm_gem_iova(struct drm_gem_object *obj, int id);
|
||||
uint64_t *iova);
|
||||
int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova);
|
||||
uint64_t msm_gem_iova(struct drm_gem_object *obj, int id);
|
||||
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
|
||||
void msm_gem_put_pages(struct drm_gem_object *obj);
|
||||
void msm_gem_put_iova(struct drm_gem_object *obj, int id);
|
||||
|
@ -88,11 +88,11 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
|
||||
{
|
||||
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
|
||||
int ret, i, n = drm_format_num_planes(fb->pixel_format);
|
||||
uint32_t iova;
|
||||
uint64_t iova;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova);
|
||||
DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret);
|
||||
DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
|
||||
struct drm_framebuffer *fb = NULL;
|
||||
struct fb_info *fbi = NULL;
|
||||
struct drm_mode_fb_cmd2 mode_cmd = {0};
|
||||
uint32_t paddr;
|
||||
uint64_t paddr;
|
||||
int ret, size;
|
||||
|
||||
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
|
||||
|
@ -309,7 +309,7 @@ put_iova(struct drm_gem_object *obj)
|
||||
* the refcnt counter needs to be atomic_t.
|
||||
*/
|
||||
int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
|
||||
uint32_t *iova)
|
||||
uint64_t *iova)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
int ret = 0;
|
||||
@ -336,7 +336,7 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
|
||||
}
|
||||
|
||||
/* get iova, taking a reference. Should have a matching put */
|
||||
int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
|
||||
int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint64_t *iova)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
int ret;
|
||||
@ -358,7 +358,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
|
||||
/* get iova without taking a reference, used in places where you have
|
||||
* already done a 'msm_gem_get_iova()'.
|
||||
*/
|
||||
uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
|
||||
uint64_t msm_gem_iova(struct drm_gem_object *obj, int id)
|
||||
{
|
||||
struct msm_gem_object *msm_obj = to_msm_bo(obj);
|
||||
WARN_ON(!msm_obj->domain[id].iova);
|
||||
|
@ -123,13 +123,13 @@ struct msm_gem_submit {
|
||||
struct {
|
||||
uint32_t type;
|
||||
uint32_t size; /* in dwords */
|
||||
uint32_t iova;
|
||||
uint64_t iova;
|
||||
uint32_t idx; /* cmdstream buffer idx in bos[] */
|
||||
} *cmd; /* array of size nr_cmds */
|
||||
struct {
|
||||
uint32_t flags;
|
||||
struct msm_gem_object *obj;
|
||||
uint32_t iova;
|
||||
uint64_t iova;
|
||||
} bos[0];
|
||||
};
|
||||
|
||||
|
@ -241,7 +241,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
|
||||
|
||||
for (i = 0; i < submit->nr_bos; i++) {
|
||||
struct msm_gem_object *msm_obj = submit->bos[i].obj;
|
||||
uint32_t iova;
|
||||
uint64_t iova;
|
||||
|
||||
/* if locking succeeded, pin bo: */
|
||||
ret = msm_gem_get_iova_locked(&msm_obj->base,
|
||||
@ -266,7 +266,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit)
|
||||
}
|
||||
|
||||
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
|
||||
struct msm_gem_object **obj, uint32_t *iova, bool *valid)
|
||||
struct msm_gem_object **obj, uint64_t *iova, bool *valid)
|
||||
{
|
||||
if (idx >= submit->nr_bos) {
|
||||
DRM_ERROR("invalid buffer index: %u (out of %u)\n",
|
||||
@ -312,7 +312,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
|
||||
struct drm_msm_gem_submit_reloc submit_reloc;
|
||||
void __user *userptr =
|
||||
u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
|
||||
uint32_t iova, off;
|
||||
uint32_t off;
|
||||
uint64_t iova;
|
||||
bool valid;
|
||||
|
||||
ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
|
||||
@ -461,7 +462,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
|
||||
void __user *userptr =
|
||||
u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
|
||||
struct msm_gem_object *msm_obj;
|
||||
uint32_t iova;
|
||||
uint64_t iova;
|
||||
|
||||
ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
|
||||
if (ret) {
|
||||
|
@ -528,7 +528,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
|
||||
|
||||
for (i = 0; i < submit->nr_bos; i++) {
|
||||
struct msm_gem_object *msm_obj = submit->bos[i].obj;
|
||||
uint32_t iova;
|
||||
uint64_t iova;
|
||||
|
||||
/* can't happen yet.. but when we add 2d support we'll have
|
||||
* to deal w/ cross-ring synchronization:
|
||||
|
@ -80,7 +80,7 @@ struct msm_gpu {
|
||||
|
||||
/* ringbuffer: */
|
||||
struct msm_ringbuffer *rb;
|
||||
uint32_t rb_iova;
|
||||
uint64_t rb_iova;
|
||||
|
||||
/* list of GEM active objects: */
|
||||
struct list_head active_list;
|
||||
|
@ -45,13 +45,13 @@ static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names,
|
||||
iommu_detach_device(iommu->domain, mmu->dev);
|
||||
}
|
||||
|
||||
static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
|
||||
static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
|
||||
struct sg_table *sgt, unsigned len, int prot)
|
||||
{
|
||||
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
||||
struct iommu_domain *domain = iommu->domain;
|
||||
struct scatterlist *sg;
|
||||
unsigned int da = iova;
|
||||
unsigned long da = iova;
|
||||
unsigned int i, j;
|
||||
int ret;
|
||||
|
||||
@ -62,7 +62,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
|
||||
dma_addr_t pa = sg_phys(sg) - sg->offset;
|
||||
size_t bytes = sg->length + sg->offset;
|
||||
|
||||
VERB("map[%d]: %08x %08lx(%zx)", i, da, (unsigned long)pa, bytes);
|
||||
VERB("map[%d]: %08lx %08lx(%zx)", i, da, (unsigned long)pa, bytes);
|
||||
|
||||
ret = iommu_map(domain, da, pa, bytes, prot);
|
||||
if (ret)
|
||||
@ -84,13 +84,13 @@ fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
|
||||
static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
|
||||
struct sg_table *sgt, unsigned len)
|
||||
{
|
||||
struct msm_iommu *iommu = to_msm_iommu(mmu);
|
||||
struct iommu_domain *domain = iommu->domain;
|
||||
struct scatterlist *sg;
|
||||
unsigned int da = iova;
|
||||
unsigned long da = iova;
|
||||
int i;
|
||||
|
||||
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
|
||||
@ -101,7 +101,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
|
||||
if (unmapped < bytes)
|
||||
return unmapped;
|
||||
|
||||
VERB("unmap[%d]: %08x(%zx)", i, da, bytes);
|
||||
VERB("unmap[%d]: %08lx(%zx)", i, da, bytes);
|
||||
|
||||
BUG_ON(!PAGE_ALIGNED(bytes));
|
||||
|
||||
|
@ -23,9 +23,9 @@
|
||||
struct msm_mmu_funcs {
|
||||
int (*attach)(struct msm_mmu *mmu, const char * const *names, int cnt);
|
||||
void (*detach)(struct msm_mmu *mmu, const char * const *names, int cnt);
|
||||
int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
|
||||
int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
|
||||
unsigned len, int prot);
|
||||
int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
|
||||
int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
|
||||
unsigned len);
|
||||
void (*destroy)(struct msm_mmu *mmu);
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user