forked from Minki/linux
drm/gem: rename struct drm_gem_dma_object.{paddr => dma_addr}
The field paddr of struct drm_gem_dma_object holds a DMA address, which might actually be a physical address. However, depending on the platform, it can also be a bus address or a virtual address managed by an IOMMU. Hence, rename the field to dma_addr, which is more applicable. In order to do this renaming the following coccinelle script was used: ``` @@ struct drm_gem_dma_object *gem; @@ - gem->paddr + gem->dma_addr @@ struct drm_gem_dma_object gem; @@ - gem.paddr + gem.dma_addr @exists@ typedef dma_addr_t; symbol paddr; @@ dma_addr_t paddr; <... - paddr + dma_addr ...> @@ symbol paddr; @@ dma_addr_t - paddr + dma_addr ; ``` This patch is compile-time tested with: ``` make ARCH={x86_64,arm,arm64} allyesconfig make ARCH={x86_64,arm,arm64} drivers/gpu/drm` ``` Acked-by: Sam Ravnborg <sam@ravnborg.org> Suggested-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com> Signed-off-by: Danilo Krummrich <dakr@redhat.com> Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Link: https://patchwork.freedesktop.org/patch/msgid/20220802000405.949236-5-dakr@redhat.com
This commit is contained in:
parent
4a83c26a1d
commit
8c30eecc67
@ -137,7 +137,7 @@ komeda_fb_none_afbc_size_check(struct komeda_dev *mdev, struct komeda_fb *kfb,
|
|||||||
}
|
}
|
||||||
|
|
||||||
min_size = komeda_fb_get_pixel_addr(kfb, 0, fb->height, i)
|
min_size = komeda_fb_get_pixel_addr(kfb, 0, fb->height, i)
|
||||||
- to_drm_gem_dma_obj(obj)->paddr;
|
- to_drm_gem_dma_obj(obj)->dma_addr;
|
||||||
if (obj->size < min_size) {
|
if (obj->size < min_size) {
|
||||||
DRM_DEBUG_KMS("The fb->obj[%d] size: 0x%zx lower than the minimum requirement: 0x%llx.\n",
|
DRM_DEBUG_KMS("The fb->obj[%d] size: 0x%zx lower than the minimum requirement: 0x%llx.\n",
|
||||||
i, obj->size, min_size);
|
i, obj->size, min_size);
|
||||||
@ -260,7 +260,7 @@ komeda_fb_get_pixel_addr(struct komeda_fb *kfb, int x, int y, int plane)
|
|||||||
+ plane_y * fb->pitches[plane];
|
+ plane_y * fb->pitches[plane];
|
||||||
}
|
}
|
||||||
|
|
||||||
return obj->paddr + offset;
|
return obj->dma_addr + offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* if the fb can be supported by a specific layer */
|
/* if the fb can be supported by a specific layer */
|
||||||
|
@ -170,7 +170,7 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
mw_state->pitches[i] = fb->pitches[i];
|
mw_state->pitches[i] = fb->pitches[i];
|
||||||
mw_state->addrs[i] = obj->paddr + fb->offsets[i];
|
mw_state->addrs[i] = obj->dma_addr + fb->offsets[i];
|
||||||
}
|
}
|
||||||
mw_state->n_planes = n_planes;
|
mw_state->n_planes = n_planes;
|
||||||
|
|
||||||
|
@ -714,7 +714,7 @@ static void malidp_set_plane_base_addr(struct drm_framebuffer *fb,
|
|||||||
struct malidp_plane *mp,
|
struct malidp_plane *mp,
|
||||||
int plane_index)
|
int plane_index)
|
||||||
{
|
{
|
||||||
dma_addr_t paddr;
|
dma_addr_t dma_addr;
|
||||||
u16 ptr;
|
u16 ptr;
|
||||||
struct drm_plane *plane = &mp->base;
|
struct drm_plane *plane = &mp->base;
|
||||||
bool afbc = fb->modifier ? true : false;
|
bool afbc = fb->modifier ? true : false;
|
||||||
@ -729,8 +729,8 @@ static void malidp_set_plane_base_addr(struct drm_framebuffer *fb,
|
|||||||
* and _AD_CROP_V registers.
|
* and _AD_CROP_V registers.
|
||||||
*/
|
*/
|
||||||
if (!afbc) {
|
if (!afbc) {
|
||||||
paddr = drm_fb_dma_get_gem_addr(fb, plane->state,
|
dma_addr = drm_fb_dma_get_gem_addr(fb, plane->state,
|
||||||
plane_index);
|
plane_index);
|
||||||
} else {
|
} else {
|
||||||
struct drm_gem_dma_object *obj;
|
struct drm_gem_dma_object *obj;
|
||||||
|
|
||||||
@ -738,11 +738,11 @@ static void malidp_set_plane_base_addr(struct drm_framebuffer *fb,
|
|||||||
|
|
||||||
if (WARN_ON(!obj))
|
if (WARN_ON(!obj))
|
||||||
return;
|
return;
|
||||||
paddr = obj->paddr;
|
dma_addr = obj->dma_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
malidp_hw_write(mp->hwdev, lower_32_bits(paddr), ptr);
|
malidp_hw_write(mp->hwdev, lower_32_bits(dma_addr), ptr);
|
||||||
malidp_hw_write(mp->hwdev, upper_32_bits(paddr), ptr + 4);
|
malidp_hw_write(mp->hwdev, upper_32_bits(dma_addr), ptr + 4);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void malidp_de_set_plane_afbc(struct drm_plane *plane)
|
static void malidp_de_set_plane_afbc(struct drm_plane *plane)
|
||||||
|
@ -188,7 +188,7 @@ static void aspeed_gfx_pipe_update(struct drm_simple_display_pipe *pipe,
|
|||||||
gem = drm_fb_dma_get_gem_obj(fb, 0);
|
gem = drm_fb_dma_get_gem_obj(fb, 0);
|
||||||
if (!gem)
|
if (!gem)
|
||||||
return;
|
return;
|
||||||
writel(gem->paddr, priv->base + CRT_ADDR);
|
writel(gem->dma_addr, priv->base + CRT_ADDR);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int aspeed_gfx_enable_vblank(struct drm_simple_display_pipe *pipe)
|
static int aspeed_gfx_enable_vblank(struct drm_simple_display_pipe *pipe)
|
||||||
|
@ -450,7 +450,7 @@ static void atmel_hlcdc_plane_update_buffers(struct atmel_hlcdc_plane *plane,
|
|||||||
for (i = 0; i < state->nplanes; i++) {
|
for (i = 0; i < state->nplanes; i++) {
|
||||||
struct drm_gem_dma_object *gem = drm_fb_dma_get_gem_obj(fb, i);
|
struct drm_gem_dma_object *gem = drm_fb_dma_get_gem_obj(fb, i);
|
||||||
|
|
||||||
state->dscrs[i]->addr = gem->paddr + state->offsets[i];
|
state->dscrs[i]->addr = gem->dma_addr + state->offsets[i];
|
||||||
|
|
||||||
atmel_hlcdc_layer_write_reg(&plane->layer,
|
atmel_hlcdc_layer_write_reg(&plane->layer,
|
||||||
ATMEL_HLCDC_LAYER_PLANE_HEAD(i),
|
ATMEL_HLCDC_LAYER_PLANE_HEAD(i),
|
||||||
|
@ -72,7 +72,7 @@ dma_addr_t drm_fb_dma_get_gem_addr(struct drm_framebuffer *fb,
|
|||||||
unsigned int plane)
|
unsigned int plane)
|
||||||
{
|
{
|
||||||
struct drm_gem_dma_object *obj;
|
struct drm_gem_dma_object *obj;
|
||||||
dma_addr_t paddr;
|
dma_addr_t dma_addr;
|
||||||
u8 h_div = 1, v_div = 1;
|
u8 h_div = 1, v_div = 1;
|
||||||
u32 block_w = drm_format_info_block_width(fb->format, plane);
|
u32 block_w = drm_format_info_block_width(fb->format, plane);
|
||||||
u32 block_h = drm_format_info_block_height(fb->format, plane);
|
u32 block_h = drm_format_info_block_height(fb->format, plane);
|
||||||
@ -86,7 +86,7 @@ dma_addr_t drm_fb_dma_get_gem_addr(struct drm_framebuffer *fb,
|
|||||||
if (!obj)
|
if (!obj)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
paddr = obj->paddr + fb->offsets[plane];
|
dma_addr = obj->dma_addr + fb->offsets[plane];
|
||||||
|
|
||||||
if (plane > 0) {
|
if (plane > 0) {
|
||||||
h_div = fb->format->hsub;
|
h_div = fb->format->hsub;
|
||||||
@ -98,10 +98,10 @@ dma_addr_t drm_fb_dma_get_gem_addr(struct drm_framebuffer *fb,
|
|||||||
block_start_y = (sample_y / block_h) * block_h;
|
block_start_y = (sample_y / block_h) * block_h;
|
||||||
num_hblocks = sample_x / block_w;
|
num_hblocks = sample_x / block_w;
|
||||||
|
|
||||||
paddr += fb->pitches[plane] * block_start_y;
|
dma_addr += fb->pitches[plane] * block_start_y;
|
||||||
paddr += block_size * num_hblocks;
|
dma_addr += block_size * num_hblocks;
|
||||||
|
|
||||||
return paddr;
|
return dma_addr;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(drm_fb_dma_get_gem_addr);
|
EXPORT_SYMBOL_GPL(drm_fb_dma_get_gem_addr);
|
||||||
|
|
||||||
|
@ -145,11 +145,12 @@ struct drm_gem_dma_object *drm_gem_dma_create(struct drm_device *drm,
|
|||||||
|
|
||||||
if (dma_obj->map_noncoherent) {
|
if (dma_obj->map_noncoherent) {
|
||||||
dma_obj->vaddr = dma_alloc_noncoherent(drm->dev, size,
|
dma_obj->vaddr = dma_alloc_noncoherent(drm->dev, size,
|
||||||
&dma_obj->paddr,
|
&dma_obj->dma_addr,
|
||||||
DMA_TO_DEVICE,
|
DMA_TO_DEVICE,
|
||||||
GFP_KERNEL | __GFP_NOWARN);
|
GFP_KERNEL | __GFP_NOWARN);
|
||||||
} else {
|
} else {
|
||||||
dma_obj->vaddr = dma_alloc_wc(drm->dev, size, &dma_obj->paddr,
|
dma_obj->vaddr = dma_alloc_wc(drm->dev, size,
|
||||||
|
&dma_obj->dma_addr,
|
||||||
GFP_KERNEL | __GFP_NOWARN);
|
GFP_KERNEL | __GFP_NOWARN);
|
||||||
}
|
}
|
||||||
if (!dma_obj->vaddr) {
|
if (!dma_obj->vaddr) {
|
||||||
@ -234,11 +235,11 @@ void drm_gem_dma_free(struct drm_gem_dma_object *dma_obj)
|
|||||||
} else if (dma_obj->vaddr) {
|
} else if (dma_obj->vaddr) {
|
||||||
if (dma_obj->map_noncoherent)
|
if (dma_obj->map_noncoherent)
|
||||||
dma_free_noncoherent(gem_obj->dev->dev, dma_obj->base.size,
|
dma_free_noncoherent(gem_obj->dev->dev, dma_obj->base.size,
|
||||||
dma_obj->vaddr, dma_obj->paddr,
|
dma_obj->vaddr, dma_obj->dma_addr,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
else
|
else
|
||||||
dma_free_wc(gem_obj->dev->dev, dma_obj->base.size,
|
dma_free_wc(gem_obj->dev->dev, dma_obj->base.size,
|
||||||
dma_obj->vaddr, dma_obj->paddr);
|
dma_obj->vaddr, dma_obj->dma_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
drm_gem_object_release(gem_obj);
|
drm_gem_object_release(gem_obj);
|
||||||
@ -396,12 +397,12 @@ EXPORT_SYMBOL_GPL(drm_gem_dma_get_unmapped_area);
|
|||||||
* @p: DRM printer
|
* @p: DRM printer
|
||||||
* @indent: Tab indentation level
|
* @indent: Tab indentation level
|
||||||
*
|
*
|
||||||
* This function prints paddr and vaddr for use in e.g. debugfs output.
|
* This function prints dma_addr and vaddr for use in e.g. debugfs output.
|
||||||
*/
|
*/
|
||||||
void drm_gem_dma_print_info(const struct drm_gem_dma_object *dma_obj,
|
void drm_gem_dma_print_info(const struct drm_gem_dma_object *dma_obj,
|
||||||
struct drm_printer *p, unsigned int indent)
|
struct drm_printer *p, unsigned int indent)
|
||||||
{
|
{
|
||||||
drm_printf_indent(p, indent, "paddr=%pad\n", &dma_obj->paddr);
|
drm_printf_indent(p, indent, "dma_addr=%pad\n", &dma_obj->dma_addr);
|
||||||
drm_printf_indent(p, indent, "vaddr=%p\n", dma_obj->vaddr);
|
drm_printf_indent(p, indent, "vaddr=%p\n", dma_obj->vaddr);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(drm_gem_dma_print_info);
|
EXPORT_SYMBOL(drm_gem_dma_print_info);
|
||||||
@ -428,7 +429,7 @@ struct sg_table *drm_gem_dma_get_sg_table(struct drm_gem_dma_object *dma_obj)
|
|||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
ret = dma_get_sgtable(obj->dev->dev, sgt, dma_obj->vaddr,
|
ret = dma_get_sgtable(obj->dev->dev, sgt, dma_obj->vaddr,
|
||||||
dma_obj->paddr, obj->size);
|
dma_obj->dma_addr, obj->size);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
@ -473,10 +474,11 @@ drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
|
|||||||
if (IS_ERR(dma_obj))
|
if (IS_ERR(dma_obj))
|
||||||
return ERR_CAST(dma_obj);
|
return ERR_CAST(dma_obj);
|
||||||
|
|
||||||
dma_obj->paddr = sg_dma_address(sgt->sgl);
|
dma_obj->dma_addr = sg_dma_address(sgt->sgl);
|
||||||
dma_obj->sgt = sgt;
|
dma_obj->sgt = sgt;
|
||||||
|
|
||||||
DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &dma_obj->paddr, attach->dmabuf->size);
|
DRM_DEBUG_PRIME("dma_addr = %pad, size = %zu\n", &dma_obj->dma_addr,
|
||||||
|
attach->dmabuf->size);
|
||||||
|
|
||||||
return &dma_obj->base;
|
return &dma_obj->base;
|
||||||
}
|
}
|
||||||
@ -539,7 +541,8 @@ int drm_gem_dma_mmap(struct drm_gem_dma_object *dma_obj, struct vm_area_struct *
|
|||||||
virt_to_page(dma_obj->vaddr));
|
virt_to_page(dma_obj->vaddr));
|
||||||
} else {
|
} else {
|
||||||
ret = dma_mmap_wc(dma_obj->base.dev->dev, vma, dma_obj->vaddr,
|
ret = dma_mmap_wc(dma_obj->base.dev->dev, vma, dma_obj->vaddr,
|
||||||
dma_obj->paddr, vma->vm_end - vma->vm_start);
|
dma_obj->dma_addr,
|
||||||
|
vma->vm_end - vma->vm_start);
|
||||||
}
|
}
|
||||||
if (ret)
|
if (ret)
|
||||||
drm_gem_vm_close(vma);
|
drm_gem_vm_close(vma);
|
||||||
|
@ -136,7 +136,7 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane,
|
|||||||
DCU_LAYER_POSY(new_state->crtc_y) |
|
DCU_LAYER_POSY(new_state->crtc_y) |
|
||||||
DCU_LAYER_POSX(new_state->crtc_x));
|
DCU_LAYER_POSX(new_state->crtc_x));
|
||||||
regmap_write(fsl_dev->regmap,
|
regmap_write(fsl_dev->regmap,
|
||||||
DCU_CTRLDESCLN(index, 3), gem->paddr);
|
DCU_CTRLDESCLN(index, 3), gem->dma_addr);
|
||||||
regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4),
|
regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(index, 4),
|
||||||
DCU_LAYER_EN |
|
DCU_LAYER_EN |
|
||||||
DCU_LAYER_TRANS(0xff) |
|
DCU_LAYER_TRANS(0xff) |
|
||||||
|
@ -551,10 +551,10 @@ static void ade_rdma_set(void __iomem *base, struct drm_framebuffer *fb,
|
|||||||
struct drm_gem_dma_object *obj = drm_fb_dma_get_gem_obj(fb, 0);
|
struct drm_gem_dma_object *obj = drm_fb_dma_get_gem_obj(fb, 0);
|
||||||
u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en;
|
u32 reg_ctrl, reg_addr, reg_size, reg_stride, reg_space, reg_en;
|
||||||
u32 stride = fb->pitches[0];
|
u32 stride = fb->pitches[0];
|
||||||
u32 addr = (u32)obj->paddr + y * stride;
|
u32 addr = (u32) obj->dma_addr + y * stride;
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("rdma%d: (y=%d, height=%d), stride=%d, paddr=0x%x\n",
|
DRM_DEBUG_DRIVER("rdma%d: (y=%d, height=%d), stride=%d, paddr=0x%x\n",
|
||||||
ch + 1, y, in_h, stride, (u32)obj->paddr);
|
ch + 1, y, in_h, stride, (u32) obj->dma_addr);
|
||||||
DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%p4cc)\n",
|
DRM_DEBUG_DRIVER("addr=0x%x, fb:%dx%d, pixel_format=%d(%p4cc)\n",
|
||||||
addr, fb->width, fb->height, fmt,
|
addr, fb->width, fb->height, fmt,
|
||||||
&fb->format->format);
|
&fb->format->format);
|
||||||
|
@ -224,20 +224,20 @@ static void dcss_plane_atomic_set_base(struct dcss_plane *dcss_plane)
|
|||||||
if (!format->is_yuv ||
|
if (!format->is_yuv ||
|
||||||
format->format == DRM_FORMAT_NV12 ||
|
format->format == DRM_FORMAT_NV12 ||
|
||||||
format->format == DRM_FORMAT_NV21)
|
format->format == DRM_FORMAT_NV21)
|
||||||
p1_ba = dma_obj->paddr + fb->offsets[0] +
|
p1_ba = dma_obj->dma_addr + fb->offsets[0] +
|
||||||
fb->pitches[0] * (state->src.y1 >> 16) +
|
fb->pitches[0] * (state->src.y1 >> 16) +
|
||||||
format->char_per_block[0] * (state->src.x1 >> 16);
|
format->char_per_block[0] * (state->src.x1 >> 16);
|
||||||
else if (format->format == DRM_FORMAT_UYVY ||
|
else if (format->format == DRM_FORMAT_UYVY ||
|
||||||
format->format == DRM_FORMAT_VYUY ||
|
format->format == DRM_FORMAT_VYUY ||
|
||||||
format->format == DRM_FORMAT_YUYV ||
|
format->format == DRM_FORMAT_YUYV ||
|
||||||
format->format == DRM_FORMAT_YVYU)
|
format->format == DRM_FORMAT_YVYU)
|
||||||
p1_ba = dma_obj->paddr + fb->offsets[0] +
|
p1_ba = dma_obj->dma_addr + fb->offsets[0] +
|
||||||
fb->pitches[0] * (state->src.y1 >> 16) +
|
fb->pitches[0] * (state->src.y1 >> 16) +
|
||||||
2 * format->char_per_block[0] * (state->src.x1 >> 17);
|
2 * format->char_per_block[0] * (state->src.x1 >> 17);
|
||||||
|
|
||||||
if (format->format == DRM_FORMAT_NV12 ||
|
if (format->format == DRM_FORMAT_NV12 ||
|
||||||
format->format == DRM_FORMAT_NV21)
|
format->format == DRM_FORMAT_NV21)
|
||||||
p2_ba = dma_obj->paddr + fb->offsets[1] +
|
p2_ba = dma_obj->dma_addr + fb->offsets[1] +
|
||||||
(((fb->pitches[1] >> 1) * (state->src.y1 >> 17) +
|
(((fb->pitches[1] >> 1) * (state->src.y1 >> 17) +
|
||||||
(state->src.x1 >> 17)) << 1);
|
(state->src.x1 >> 17)) << 1);
|
||||||
|
|
||||||
|
@ -132,7 +132,7 @@ drm_plane_state_to_eba(struct drm_plane_state *state, int plane)
|
|||||||
dma_obj = drm_fb_dma_get_gem_obj(fb, plane);
|
dma_obj = drm_fb_dma_get_gem_obj(fb, plane);
|
||||||
BUG_ON(!dma_obj);
|
BUG_ON(!dma_obj);
|
||||||
|
|
||||||
return dma_obj->paddr + fb->offsets[plane] + fb->pitches[plane] * y +
|
return dma_obj->dma_addr + fb->offsets[plane] + fb->pitches[plane] * y +
|
||||||
fb->format->cpp[plane] * x;
|
fb->format->cpp[plane] * x;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -151,7 +151,7 @@ drm_plane_state_to_ubo(struct drm_plane_state *state)
|
|||||||
x /= fb->format->hsub;
|
x /= fb->format->hsub;
|
||||||
y /= fb->format->vsub;
|
y /= fb->format->vsub;
|
||||||
|
|
||||||
return dma_obj->paddr + fb->offsets[1] + fb->pitches[1] * y +
|
return dma_obj->dma_addr + fb->offsets[1] + fb->pitches[1] * y +
|
||||||
fb->format->cpp[1] * x - eba;
|
fb->format->cpp[1] * x - eba;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -170,7 +170,7 @@ drm_plane_state_to_vbo(struct drm_plane_state *state)
|
|||||||
x /= fb->format->hsub;
|
x /= fb->format->hsub;
|
||||||
y /= fb->format->vsub;
|
y /= fb->format->vsub;
|
||||||
|
|
||||||
return dma_obj->paddr + fb->offsets[2] + fb->pitches[2] * y +
|
return dma_obj->dma_addr + fb->offsets[2] + fb->pitches[2] * y +
|
||||||
fb->format->cpp[2] * x - eba;
|
fb->format->cpp[2] * x - eba;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -651,7 +651,7 @@ static void meson_overlay_atomic_update(struct drm_plane *plane,
|
|||||||
switch (priv->viu.vd1_planes) {
|
switch (priv->viu.vd1_planes) {
|
||||||
case 3:
|
case 3:
|
||||||
gem = drm_fb_dma_get_gem_obj(fb, 2);
|
gem = drm_fb_dma_get_gem_obj(fb, 2);
|
||||||
priv->viu.vd1_addr2 = gem->paddr + fb->offsets[2];
|
priv->viu.vd1_addr2 = gem->dma_addr + fb->offsets[2];
|
||||||
priv->viu.vd1_stride2 = fb->pitches[2];
|
priv->viu.vd1_stride2 = fb->pitches[2];
|
||||||
priv->viu.vd1_height2 =
|
priv->viu.vd1_height2 =
|
||||||
drm_format_info_plane_height(fb->format,
|
drm_format_info_plane_height(fb->format,
|
||||||
@ -663,7 +663,7 @@ static void meson_overlay_atomic_update(struct drm_plane *plane,
|
|||||||
fallthrough;
|
fallthrough;
|
||||||
case 2:
|
case 2:
|
||||||
gem = drm_fb_dma_get_gem_obj(fb, 1);
|
gem = drm_fb_dma_get_gem_obj(fb, 1);
|
||||||
priv->viu.vd1_addr1 = gem->paddr + fb->offsets[1];
|
priv->viu.vd1_addr1 = gem->dma_addr + fb->offsets[1];
|
||||||
priv->viu.vd1_stride1 = fb->pitches[1];
|
priv->viu.vd1_stride1 = fb->pitches[1];
|
||||||
priv->viu.vd1_height1 =
|
priv->viu.vd1_height1 =
|
||||||
drm_format_info_plane_height(fb->format,
|
drm_format_info_plane_height(fb->format,
|
||||||
@ -675,7 +675,7 @@ static void meson_overlay_atomic_update(struct drm_plane *plane,
|
|||||||
fallthrough;
|
fallthrough;
|
||||||
case 1:
|
case 1:
|
||||||
gem = drm_fb_dma_get_gem_obj(fb, 0);
|
gem = drm_fb_dma_get_gem_obj(fb, 0);
|
||||||
priv->viu.vd1_addr0 = gem->paddr + fb->offsets[0];
|
priv->viu.vd1_addr0 = gem->dma_addr + fb->offsets[0];
|
||||||
priv->viu.vd1_stride0 = fb->pitches[0];
|
priv->viu.vd1_stride0 = fb->pitches[0];
|
||||||
priv->viu.vd1_height0 =
|
priv->viu.vd1_height0 =
|
||||||
drm_format_info_plane_height(fb->format,
|
drm_format_info_plane_height(fb->format,
|
||||||
|
@ -367,7 +367,7 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
|
|||||||
/* Update Canvas with buffer address */
|
/* Update Canvas with buffer address */
|
||||||
gem = drm_fb_dma_get_gem_obj(fb, 0);
|
gem = drm_fb_dma_get_gem_obj(fb, 0);
|
||||||
|
|
||||||
priv->viu.osd1_addr = gem->paddr;
|
priv->viu.osd1_addr = gem->dma_addr;
|
||||||
priv->viu.osd1_stride = fb->pitches[0];
|
priv->viu.osd1_stride = fb->pitches[0];
|
||||||
priv->viu.osd1_height = fb->height;
|
priv->viu.osd1_height = fb->height;
|
||||||
priv->viu.osd1_width = fb->width;
|
priv->viu.osd1_width = fb->width;
|
||||||
|
@ -352,7 +352,7 @@ static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc,
|
|||||||
struct drm_bridge_state *bridge_state = NULL;
|
struct drm_bridge_state *bridge_state = NULL;
|
||||||
struct drm_device *drm = mxsfb->drm;
|
struct drm_device *drm = mxsfb->drm;
|
||||||
u32 bus_format = 0;
|
u32 bus_format = 0;
|
||||||
dma_addr_t paddr;
|
dma_addr_t dma_addr;
|
||||||
|
|
||||||
pm_runtime_get_sync(drm->dev);
|
pm_runtime_get_sync(drm->dev);
|
||||||
mxsfb_enable_axi_clk(mxsfb);
|
mxsfb_enable_axi_clk(mxsfb);
|
||||||
@ -388,10 +388,10 @@ static void mxsfb_crtc_atomic_enable(struct drm_crtc *crtc,
|
|||||||
mxsfb_crtc_mode_set_nofb(mxsfb, bridge_state, bus_format);
|
mxsfb_crtc_mode_set_nofb(mxsfb, bridge_state, bus_format);
|
||||||
|
|
||||||
/* Write cur_buf as well to avoid an initial corrupt frame */
|
/* Write cur_buf as well to avoid an initial corrupt frame */
|
||||||
paddr = drm_fb_dma_get_gem_addr(new_pstate->fb, new_pstate, 0);
|
dma_addr = drm_fb_dma_get_gem_addr(new_pstate->fb, new_pstate, 0);
|
||||||
if (paddr) {
|
if (dma_addr) {
|
||||||
writel(paddr, mxsfb->base + mxsfb->devdata->cur_buf);
|
writel(dma_addr, mxsfb->base + mxsfb->devdata->cur_buf);
|
||||||
writel(paddr, mxsfb->base + mxsfb->devdata->next_buf);
|
writel(dma_addr, mxsfb->base + mxsfb->devdata->next_buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
mxsfb_enable_controller(mxsfb);
|
mxsfb_enable_controller(mxsfb);
|
||||||
@ -541,11 +541,11 @@ static void mxsfb_plane_primary_atomic_update(struct drm_plane *plane,
|
|||||||
struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev);
|
struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev);
|
||||||
struct drm_plane_state *new_pstate = drm_atomic_get_new_plane_state(state,
|
struct drm_plane_state *new_pstate = drm_atomic_get_new_plane_state(state,
|
||||||
plane);
|
plane);
|
||||||
dma_addr_t paddr;
|
dma_addr_t dma_addr;
|
||||||
|
|
||||||
paddr = drm_fb_dma_get_gem_addr(new_pstate->fb, new_pstate, 0);
|
dma_addr = drm_fb_dma_get_gem_addr(new_pstate->fb, new_pstate, 0);
|
||||||
if (paddr)
|
if (dma_addr)
|
||||||
writel(paddr, mxsfb->base + mxsfb->devdata->next_buf);
|
writel(dma_addr, mxsfb->base + mxsfb->devdata->next_buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane,
|
static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane,
|
||||||
@ -556,11 +556,11 @@ static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane,
|
|||||||
struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev);
|
struct mxsfb_drm_private *mxsfb = to_mxsfb_drm_private(plane->dev);
|
||||||
struct drm_plane_state *new_pstate = drm_atomic_get_new_plane_state(state,
|
struct drm_plane_state *new_pstate = drm_atomic_get_new_plane_state(state,
|
||||||
plane);
|
plane);
|
||||||
dma_addr_t paddr;
|
dma_addr_t dma_addr;
|
||||||
u32 ctrl;
|
u32 ctrl;
|
||||||
|
|
||||||
paddr = drm_fb_dma_get_gem_addr(new_pstate->fb, new_pstate, 0);
|
dma_addr = drm_fb_dma_get_gem_addr(new_pstate->fb, new_pstate, 0);
|
||||||
if (!paddr) {
|
if (!dma_addr) {
|
||||||
writel(0, mxsfb->base + LCDC_AS_CTRL);
|
writel(0, mxsfb->base + LCDC_AS_CTRL);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -571,16 +571,16 @@ static void mxsfb_plane_overlay_atomic_update(struct drm_plane *plane,
|
|||||||
* is understood, live with the 16 initial invalid pixels on the first
|
* is understood, live with the 16 initial invalid pixels on the first
|
||||||
* line and start 64 bytes within the framebuffer.
|
* line and start 64 bytes within the framebuffer.
|
||||||
*/
|
*/
|
||||||
paddr += 64;
|
dma_addr += 64;
|
||||||
|
|
||||||
writel(paddr, mxsfb->base + LCDC_AS_NEXT_BUF);
|
writel(dma_addr, mxsfb->base + LCDC_AS_NEXT_BUF);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If the plane was previously disabled, write LCDC_AS_BUF as well to
|
* If the plane was previously disabled, write LCDC_AS_BUF as well to
|
||||||
* provide the first buffer.
|
* provide the first buffer.
|
||||||
*/
|
*/
|
||||||
if (!old_pstate->fb)
|
if (!old_pstate->fb)
|
||||||
writel(paddr, mxsfb->base + LCDC_AS_BUF);
|
writel(dma_addr, mxsfb->base + LCDC_AS_BUF);
|
||||||
|
|
||||||
ctrl = AS_CTRL_AS_ENABLE | AS_CTRL_ALPHA(255);
|
ctrl = AS_CTRL_AS_ENABLE | AS_CTRL_ALPHA(255);
|
||||||
|
|
||||||
|
@ -365,7 +365,7 @@ struct drm_gem_object *rcar_du_gem_prime_import_sg_table(struct drm_device *dev,
|
|||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_obj->paddr = 0;
|
dma_obj->dma_addr = 0;
|
||||||
dma_obj->sgt = sgt;
|
dma_obj->sgt = sgt;
|
||||||
|
|
||||||
return gem_obj;
|
return gem_obj;
|
||||||
|
@ -351,7 +351,7 @@ static void rcar_du_plane_setup_scanout(struct rcar_du_group *rgrp,
|
|||||||
|
|
||||||
for (i = 0; i < state->format->planes; ++i) {
|
for (i = 0; i < state->format->planes; ++i) {
|
||||||
gem = drm_fb_dma_get_gem_obj(fb, i);
|
gem = drm_fb_dma_get_gem_obj(fb, i);
|
||||||
dma[i] = gem->paddr + fb->offsets[i];
|
dma[i] = gem->dma_addr + fb->offsets[i];
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
pitch = drm_rect_width(&state->state.src) >> 16;
|
pitch = drm_rect_width(&state->state.src) >> 16;
|
||||||
|
@ -212,7 +212,7 @@ int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
ret = dma_get_sgtable(rcdu->dev, sgt, gem->vaddr,
|
ret = dma_get_sgtable(rcdu->dev, sgt, gem->vaddr,
|
||||||
gem->paddr, gem->base.size);
|
gem->dma_addr, gem->base.size);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
@ -293,13 +293,13 @@ static void shmob_drm_crtc_compute_base(struct shmob_drm_crtc *scrtc,
|
|||||||
|
|
||||||
bpp = scrtc->format->yuv ? 8 : scrtc->format->bpp;
|
bpp = scrtc->format->yuv ? 8 : scrtc->format->bpp;
|
||||||
gem = drm_fb_dma_get_gem_obj(fb, 0);
|
gem = drm_fb_dma_get_gem_obj(fb, 0);
|
||||||
scrtc->dma[0] = gem->paddr + fb->offsets[0]
|
scrtc->dma[0] = gem->dma_addr + fb->offsets[0]
|
||||||
+ y * fb->pitches[0] + x * bpp / 8;
|
+ y * fb->pitches[0] + x * bpp / 8;
|
||||||
|
|
||||||
if (scrtc->format->yuv) {
|
if (scrtc->format->yuv) {
|
||||||
bpp = scrtc->format->bpp - 8;
|
bpp = scrtc->format->bpp - 8;
|
||||||
gem = drm_fb_dma_get_gem_obj(fb, 1);
|
gem = drm_fb_dma_get_gem_obj(fb, 1);
|
||||||
scrtc->dma[1] = gem->paddr + fb->offsets[1]
|
scrtc->dma[1] = gem->dma_addr + fb->offsets[1]
|
||||||
+ y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
|
+ y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
|
||||||
+ x * (bpp == 16 ? 2 : 1);
|
+ x * (bpp == 16 ? 2 : 1);
|
||||||
}
|
}
|
||||||
|
@ -46,13 +46,13 @@ static void shmob_drm_plane_compute_base(struct shmob_drm_plane *splane,
|
|||||||
|
|
||||||
bpp = splane->format->yuv ? 8 : splane->format->bpp;
|
bpp = splane->format->yuv ? 8 : splane->format->bpp;
|
||||||
gem = drm_fb_dma_get_gem_obj(fb, 0);
|
gem = drm_fb_dma_get_gem_obj(fb, 0);
|
||||||
splane->dma[0] = gem->paddr + fb->offsets[0]
|
splane->dma[0] = gem->dma_addr + fb->offsets[0]
|
||||||
+ y * fb->pitches[0] + x * bpp / 8;
|
+ y * fb->pitches[0] + x * bpp / 8;
|
||||||
|
|
||||||
if (splane->format->yuv) {
|
if (splane->format->yuv) {
|
||||||
bpp = splane->format->bpp - 8;
|
bpp = splane->format->bpp - 8;
|
||||||
gem = drm_fb_dma_get_gem_obj(fb, 1);
|
gem = drm_fb_dma_get_gem_obj(fb, 1);
|
||||||
splane->dma[1] = gem->paddr + fb->offsets[1]
|
splane->dma[1] = gem->dma_addr + fb->offsets[1]
|
||||||
+ y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
|
+ y / (bpp == 4 ? 2 : 1) * fb->pitches[1]
|
||||||
+ x * (bpp == 16 ? 2 : 1);
|
+ x * (bpp == 16 ? 2 : 1);
|
||||||
}
|
}
|
||||||
|
@ -341,7 +341,7 @@ static void sprd_dpu_layer(struct sprd_dpu *dpu, struct drm_plane_state *state)
|
|||||||
|
|
||||||
for (i = 0; i < fb->format->num_planes; i++) {
|
for (i = 0; i < fb->format->num_planes; i++) {
|
||||||
dma_obj = drm_fb_dma_get_gem_obj(fb, i);
|
dma_obj = drm_fb_dma_get_gem_obj(fb, i);
|
||||||
addr = dma_obj->paddr + fb->offsets[i];
|
addr = dma_obj->dma_addr + fb->offsets[i];
|
||||||
|
|
||||||
if (i == 0)
|
if (i == 0)
|
||||||
layer_reg_wr(ctx, REG_LAY_BASE_ADDR0, addr, index);
|
layer_reg_wr(ctx, REG_LAY_BASE_ADDR0, addr, index);
|
||||||
|
@ -782,11 +782,11 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
|
|||||||
|
|
||||||
DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
|
DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
|
||||||
(char *)&fb->format->format,
|
(char *)&fb->format->format,
|
||||||
(unsigned long)dma_obj->paddr);
|
(unsigned long) dma_obj->dma_addr);
|
||||||
|
|
||||||
/* pixel memory location */
|
/* pixel memory location */
|
||||||
bpp = fb->format->cpp[0];
|
bpp = fb->format->cpp[0];
|
||||||
top_field->gam_gdp_pml = (u32)dma_obj->paddr + fb->offsets[0];
|
top_field->gam_gdp_pml = (u32) dma_obj->dma_addr + fb->offsets[0];
|
||||||
top_field->gam_gdp_pml += src_x * bpp;
|
top_field->gam_gdp_pml += src_x * bpp;
|
||||||
top_field->gam_gdp_pml += src_y * fb->pitches[0];
|
top_field->gam_gdp_pml += src_y * fb->pitches[0];
|
||||||
|
|
||||||
@ -831,7 +831,7 @@ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
|
|||||||
dev_dbg(gdp->dev, "Current NVN:0x%X\n",
|
dev_dbg(gdp->dev, "Current NVN:0x%X\n",
|
||||||
readl(gdp->regs + GAM_GDP_NVN_OFFSET));
|
readl(gdp->regs + GAM_GDP_NVN_OFFSET));
|
||||||
dev_dbg(gdp->dev, "Posted buff: %lx current buff: %x\n",
|
dev_dbg(gdp->dev, "Posted buff: %lx current buff: %x\n",
|
||||||
(unsigned long)dma_obj->paddr,
|
(unsigned long) dma_obj->dma_addr,
|
||||||
readl(gdp->regs + GAM_GDP_PML_OFFSET));
|
readl(gdp->regs + GAM_GDP_PML_OFFSET));
|
||||||
|
|
||||||
if (!curr_list) {
|
if (!curr_list) {
|
||||||
|
@ -1182,11 +1182,11 @@ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
|
|||||||
|
|
||||||
DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
|
DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
|
||||||
(char *)&fb->format->format,
|
(char *)&fb->format->format,
|
||||||
(unsigned long)dma_obj->paddr);
|
(unsigned long) dma_obj->dma_addr);
|
||||||
|
|
||||||
/* Buffer planes address */
|
/* Buffer planes address */
|
||||||
cmd->top.current_luma = (u32)dma_obj->paddr + fb->offsets[0];
|
cmd->top.current_luma = (u32) dma_obj->dma_addr + fb->offsets[0];
|
||||||
cmd->top.current_chroma = (u32)dma_obj->paddr + fb->offsets[1];
|
cmd->top.current_chroma = (u32) dma_obj->dma_addr + fb->offsets[1];
|
||||||
|
|
||||||
/* Pitches */
|
/* Pitches */
|
||||||
cmd->top.luma_processed_pitch = fb->pitches[0];
|
cmd->top.luma_processed_pitch = fb->pitches[0];
|
||||||
|
@ -329,7 +329,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
|
|||||||
struct drm_plane_state *state = plane->state;
|
struct drm_plane_state *state = plane->state;
|
||||||
struct drm_framebuffer *fb = state->fb;
|
struct drm_framebuffer *fb = state->fb;
|
||||||
u32 lo_paddr, hi_paddr;
|
u32 lo_paddr, hi_paddr;
|
||||||
dma_addr_t paddr;
|
dma_addr_t dma_addr;
|
||||||
|
|
||||||
/* Set the line width */
|
/* Set the line width */
|
||||||
DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
|
DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
|
||||||
@ -338,21 +338,21 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
|
|||||||
fb->pitches[0] * 8);
|
fb->pitches[0] * 8);
|
||||||
|
|
||||||
/* Get the start of the displayed memory */
|
/* Get the start of the displayed memory */
|
||||||
paddr = drm_fb_dma_get_gem_addr(fb, state, 0);
|
dma_addr = drm_fb_dma_get_gem_addr(fb, state, 0);
|
||||||
DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
|
DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &dma_addr);
|
||||||
|
|
||||||
if (fb->format->is_yuv)
|
if (fb->format->is_yuv)
|
||||||
return sun4i_backend_update_yuv_buffer(backend, fb, paddr);
|
return sun4i_backend_update_yuv_buffer(backend, fb, dma_addr);
|
||||||
|
|
||||||
/* Write the 32 lower bits of the address (in bits) */
|
/* Write the 32 lower bits of the address (in bits) */
|
||||||
lo_paddr = paddr << 3;
|
lo_paddr = dma_addr << 3;
|
||||||
DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr);
|
DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr);
|
||||||
regmap_write(backend->engine.regs,
|
regmap_write(backend->engine.regs,
|
||||||
SUN4I_BACKEND_LAYFB_L32ADD_REG(layer),
|
SUN4I_BACKEND_LAYFB_L32ADD_REG(layer),
|
||||||
lo_paddr);
|
lo_paddr);
|
||||||
|
|
||||||
/* And the upper bits */
|
/* And the upper bits */
|
||||||
hi_paddr = paddr >> 29;
|
hi_paddr = dma_addr >> 29;
|
||||||
DRM_DEBUG_DRIVER("Setting address high bits to 0x%x\n", hi_paddr);
|
DRM_DEBUG_DRIVER("Setting address high bits to 0x%x\n", hi_paddr);
|
||||||
regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_LAYFB_H4ADD_REG,
|
regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_LAYFB_H4ADD_REG,
|
||||||
SUN4I_BACKEND_LAYFB_H4ADD_MSK(layer),
|
SUN4I_BACKEND_LAYFB_H4ADD_MSK(layer),
|
||||||
|
@ -160,7 +160,7 @@ void sun4i_frontend_update_buffer(struct sun4i_frontend *frontend,
|
|||||||
struct drm_framebuffer *fb = state->fb;
|
struct drm_framebuffer *fb = state->fb;
|
||||||
unsigned int strides[3] = {};
|
unsigned int strides[3] = {};
|
||||||
|
|
||||||
dma_addr_t paddr;
|
dma_addr_t dma_addr;
|
||||||
bool swap;
|
bool swap;
|
||||||
|
|
||||||
if (fb->modifier == DRM_FORMAT_MOD_ALLWINNER_TILED) {
|
if (fb->modifier == DRM_FORMAT_MOD_ALLWINNER_TILED) {
|
||||||
@ -221,22 +221,24 @@ void sun4i_frontend_update_buffer(struct sun4i_frontend *frontend,
|
|||||||
swap = sun4i_frontend_format_chroma_requires_swap(fb->format->format);
|
swap = sun4i_frontend_format_chroma_requires_swap(fb->format->format);
|
||||||
|
|
||||||
/* Set the physical address of the buffer in memory */
|
/* Set the physical address of the buffer in memory */
|
||||||
paddr = drm_fb_dma_get_gem_addr(fb, state, 0);
|
dma_addr = drm_fb_dma_get_gem_addr(fb, state, 0);
|
||||||
DRM_DEBUG_DRIVER("Setting buffer #0 address to %pad\n", &paddr);
|
DRM_DEBUG_DRIVER("Setting buffer #0 address to %pad\n", &dma_addr);
|
||||||
regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR0_REG, paddr);
|
regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR0_REG, dma_addr);
|
||||||
|
|
||||||
if (fb->format->num_planes > 1) {
|
if (fb->format->num_planes > 1) {
|
||||||
paddr = drm_fb_dma_get_gem_addr(fb, state, swap ? 2 : 1);
|
dma_addr = drm_fb_dma_get_gem_addr(fb, state, swap ? 2 : 1);
|
||||||
DRM_DEBUG_DRIVER("Setting buffer #1 address to %pad\n", &paddr);
|
DRM_DEBUG_DRIVER("Setting buffer #1 address to %pad\n",
|
||||||
|
&dma_addr);
|
||||||
regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR1_REG,
|
regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR1_REG,
|
||||||
paddr);
|
dma_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fb->format->num_planes > 2) {
|
if (fb->format->num_planes > 2) {
|
||||||
paddr = drm_fb_dma_get_gem_addr(fb, state, swap ? 1 : 2);
|
dma_addr = drm_fb_dma_get_gem_addr(fb, state, swap ? 1 : 2);
|
||||||
DRM_DEBUG_DRIVER("Setting buffer #2 address to %pad\n", &paddr);
|
DRM_DEBUG_DRIVER("Setting buffer #2 address to %pad\n",
|
||||||
|
&dma_addr);
|
||||||
regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR2_REG,
|
regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR2_REG,
|
||||||
paddr);
|
dma_addr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(sun4i_frontend_update_buffer);
|
EXPORT_SYMBOL(sun4i_frontend_update_buffer);
|
||||||
|
@ -193,7 +193,7 @@ static int sun8i_ui_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
|
|||||||
struct drm_plane_state *state = plane->state;
|
struct drm_plane_state *state = plane->state;
|
||||||
struct drm_framebuffer *fb = state->fb;
|
struct drm_framebuffer *fb = state->fb;
|
||||||
struct drm_gem_dma_object *gem;
|
struct drm_gem_dma_object *gem;
|
||||||
dma_addr_t paddr;
|
dma_addr_t dma_addr;
|
||||||
u32 ch_base;
|
u32 ch_base;
|
||||||
int bpp;
|
int bpp;
|
||||||
|
|
||||||
@ -202,15 +202,15 @@ static int sun8i_ui_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
|
|||||||
/* Get the physical address of the buffer in memory */
|
/* Get the physical address of the buffer in memory */
|
||||||
gem = drm_fb_dma_get_gem_obj(fb, 0);
|
gem = drm_fb_dma_get_gem_obj(fb, 0);
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr);
|
DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->dma_addr);
|
||||||
|
|
||||||
/* Compute the start of the displayed memory */
|
/* Compute the start of the displayed memory */
|
||||||
bpp = fb->format->cpp[0];
|
bpp = fb->format->cpp[0];
|
||||||
paddr = gem->paddr + fb->offsets[0];
|
dma_addr = gem->dma_addr + fb->offsets[0];
|
||||||
|
|
||||||
/* Fixup framebuffer address for src coordinates */
|
/* Fixup framebuffer address for src coordinates */
|
||||||
paddr += (state->src.x1 >> 16) * bpp;
|
dma_addr += (state->src.x1 >> 16) * bpp;
|
||||||
paddr += (state->src.y1 >> 16) * fb->pitches[0];
|
dma_addr += (state->src.y1 >> 16) * fb->pitches[0];
|
||||||
|
|
||||||
/* Set the line width */
|
/* Set the line width */
|
||||||
DRM_DEBUG_DRIVER("Layer line width: %d bytes\n", fb->pitches[0]);
|
DRM_DEBUG_DRIVER("Layer line width: %d bytes\n", fb->pitches[0]);
|
||||||
@ -218,11 +218,11 @@ static int sun8i_ui_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
|
|||||||
SUN8I_MIXER_CHAN_UI_LAYER_PITCH(ch_base, overlay),
|
SUN8I_MIXER_CHAN_UI_LAYER_PITCH(ch_base, overlay),
|
||||||
fb->pitches[0]);
|
fb->pitches[0]);
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
|
DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &dma_addr);
|
||||||
|
|
||||||
regmap_write(mixer->engine.regs,
|
regmap_write(mixer->engine.regs,
|
||||||
SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(ch_base, overlay),
|
SUN8I_MIXER_CHAN_UI_LAYER_TOP_LADDR(ch_base, overlay),
|
||||||
lower_32_bits(paddr));
|
lower_32_bits(dma_addr));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -310,7 +310,7 @@ static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
|
|||||||
const struct drm_format_info *format = fb->format;
|
const struct drm_format_info *format = fb->format;
|
||||||
struct drm_gem_dma_object *gem;
|
struct drm_gem_dma_object *gem;
|
||||||
u32 dx, dy, src_x, src_y;
|
u32 dx, dy, src_x, src_y;
|
||||||
dma_addr_t paddr;
|
dma_addr_t dma_addr;
|
||||||
u32 ch_base;
|
u32 ch_base;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
@ -324,10 +324,10 @@ static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
|
|||||||
/* Get the physical address of the buffer in memory */
|
/* Get the physical address of the buffer in memory */
|
||||||
gem = drm_fb_dma_get_gem_obj(fb, i);
|
gem = drm_fb_dma_get_gem_obj(fb, i);
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr);
|
DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->dma_addr);
|
||||||
|
|
||||||
/* Compute the start of the displayed memory */
|
/* Compute the start of the displayed memory */
|
||||||
paddr = gem->paddr + fb->offsets[i];
|
dma_addr = gem->dma_addr + fb->offsets[i];
|
||||||
|
|
||||||
dx = src_x;
|
dx = src_x;
|
||||||
dy = src_y;
|
dy = src_y;
|
||||||
@ -338,8 +338,8 @@ static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Fixup framebuffer address for src coordinates */
|
/* Fixup framebuffer address for src coordinates */
|
||||||
paddr += dx * format->cpp[i];
|
dma_addr += dx * format->cpp[i];
|
||||||
paddr += dy * fb->pitches[i];
|
dma_addr += dy * fb->pitches[i];
|
||||||
|
|
||||||
/* Set the line width */
|
/* Set the line width */
|
||||||
DRM_DEBUG_DRIVER("Layer %d. line width: %d bytes\n",
|
DRM_DEBUG_DRIVER("Layer %d. line width: %d bytes\n",
|
||||||
@ -350,12 +350,12 @@ static int sun8i_vi_layer_update_buffer(struct sun8i_mixer *mixer, int channel,
|
|||||||
fb->pitches[i]);
|
fb->pitches[i]);
|
||||||
|
|
||||||
DRM_DEBUG_DRIVER("Setting %d. buffer address to %pad\n",
|
DRM_DEBUG_DRIVER("Setting %d. buffer address to %pad\n",
|
||||||
i + 1, &paddr);
|
i + 1, &dma_addr);
|
||||||
|
|
||||||
regmap_write(mixer->engine.regs,
|
regmap_write(mixer->engine.regs,
|
||||||
SUN8I_MIXER_CHAN_VI_LAYER_TOP_LADDR(ch_base,
|
SUN8I_MIXER_CHAN_VI_LAYER_TOP_LADDR(ch_base,
|
||||||
overlay, i),
|
overlay, i),
|
||||||
lower_32_bits(paddr));
|
lower_32_bits(dma_addr));
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1954,7 +1954,7 @@ int dispc_plane_check(struct dispc_device *dispc, u32 hw_plane,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static
|
static
|
||||||
dma_addr_t dispc_plane_state_paddr(const struct drm_plane_state *state)
|
dma_addr_t dispc_plane_state_dma_addr(const struct drm_plane_state *state)
|
||||||
{
|
{
|
||||||
struct drm_framebuffer *fb = state->fb;
|
struct drm_framebuffer *fb = state->fb;
|
||||||
struct drm_gem_dma_object *gem;
|
struct drm_gem_dma_object *gem;
|
||||||
@ -1963,7 +1963,7 @@ dma_addr_t dispc_plane_state_paddr(const struct drm_plane_state *state)
|
|||||||
|
|
||||||
gem = drm_fb_dma_get_gem_obj(state->fb, 0);
|
gem = drm_fb_dma_get_gem_obj(state->fb, 0);
|
||||||
|
|
||||||
return gem->paddr + fb->offsets[0] + x * fb->format->cpp[0] +
|
return gem->dma_addr + fb->offsets[0] + x * fb->format->cpp[0] +
|
||||||
y * fb->pitches[0];
|
y * fb->pitches[0];
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1980,7 +1980,7 @@ dma_addr_t dispc_plane_state_p_uv_addr(const struct drm_plane_state *state)
|
|||||||
|
|
||||||
gem = drm_fb_dma_get_gem_obj(fb, 1);
|
gem = drm_fb_dma_get_gem_obj(fb, 1);
|
||||||
|
|
||||||
return gem->paddr + fb->offsets[1] +
|
return gem->dma_addr + fb->offsets[1] +
|
||||||
(x * fb->format->cpp[1] / fb->format->hsub) +
|
(x * fb->format->cpp[1] / fb->format->hsub) +
|
||||||
(y * fb->pitches[1] / fb->format->vsub);
|
(y * fb->pitches[1] / fb->format->vsub);
|
||||||
}
|
}
|
||||||
@ -1993,17 +1993,17 @@ int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
|
|||||||
u32 fourcc = state->fb->format->format;
|
u32 fourcc = state->fb->format->format;
|
||||||
u16 cpp = state->fb->format->cpp[0];
|
u16 cpp = state->fb->format->cpp[0];
|
||||||
u32 fb_width = state->fb->pitches[0] / cpp;
|
u32 fb_width = state->fb->pitches[0] / cpp;
|
||||||
dma_addr_t paddr = dispc_plane_state_paddr(state);
|
dma_addr_t dma_addr = dispc_plane_state_dma_addr(state);
|
||||||
struct dispc_scaling_params scale;
|
struct dispc_scaling_params scale;
|
||||||
|
|
||||||
dispc_vid_calc_scaling(dispc, state, &scale, lite);
|
dispc_vid_calc_scaling(dispc, state, &scale, lite);
|
||||||
|
|
||||||
dispc_plane_set_pixel_format(dispc, hw_plane, fourcc);
|
dispc_plane_set_pixel_format(dispc, hw_plane, fourcc);
|
||||||
|
|
||||||
dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_0, paddr & 0xffffffff);
|
dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_0, dma_addr & 0xffffffff);
|
||||||
dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_0, (u64)paddr >> 32);
|
dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_0, (u64)dma_addr >> 32);
|
||||||
dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_1, paddr & 0xffffffff);
|
dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_1, dma_addr & 0xffffffff);
|
||||||
dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_1, (u64)paddr >> 32);
|
dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_1, (u64)dma_addr >> 32);
|
||||||
|
|
||||||
dispc_vid_write(dispc, hw_plane, DISPC_VID_PICTURE_SIZE,
|
dispc_vid_write(dispc, hw_plane, DISPC_VID_PICTURE_SIZE,
|
||||||
(scale.in_w - 1) | ((scale.in_h - 1) << 16));
|
(scale.in_w - 1) | ((scale.in_h - 1) << 16));
|
||||||
|
@ -70,7 +70,7 @@ static void set_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
|
|||||||
|
|
||||||
gem = drm_fb_dma_get_gem_obj(fb, 0);
|
gem = drm_fb_dma_get_gem_obj(fb, 0);
|
||||||
|
|
||||||
start = gem->paddr + fb->offsets[0] +
|
start = gem->dma_addr + fb->offsets[0] +
|
||||||
crtc->y * fb->pitches[0] +
|
crtc->y * fb->pitches[0] +
|
||||||
crtc->x * fb->format->cpp[0];
|
crtc->x * fb->format->cpp[0];
|
||||||
|
|
||||||
|
@ -227,7 +227,7 @@ static void arc_pgu_update(struct drm_simple_display_pipe *pipe,
|
|||||||
|
|
||||||
arcpgu = pipe_to_arcpgu_priv(pipe);
|
arcpgu = pipe_to_arcpgu_priv(pipe);
|
||||||
gem = drm_fb_dma_get_gem_obj(pipe->plane.state->fb, 0);
|
gem = drm_fb_dma_get_gem_obj(pipe->plane.state->fb, 0);
|
||||||
arc_pgu_write(arcpgu, ARCPGU_REG_BUF0_ADDR, gem->paddr);
|
arc_pgu_write(arcpgu, ARCPGU_REG_BUF0_ADDR, gem->dma_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct drm_simple_display_pipe_funcs arc_pgu_pipe_funcs = {
|
static const struct drm_simple_display_pipe_funcs arc_pgu_pipe_funcs = {
|
||||||
|
@ -303,7 +303,7 @@ static void vc4_bo_purge(struct drm_gem_object *obj)
|
|||||||
|
|
||||||
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
|
drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping);
|
||||||
|
|
||||||
dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.paddr);
|
dma_free_wc(dev->dev, obj->size, bo->base.vaddr, bo->base.dma_addr);
|
||||||
bo->base.vaddr = NULL;
|
bo->base.vaddr = NULL;
|
||||||
bo->madv = __VC4_MADV_PURGED;
|
bo->madv = __VC4_MADV_PURGED;
|
||||||
}
|
}
|
||||||
|
@ -126,7 +126,7 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
|
|||||||
goto err_delete_handle;
|
goto err_delete_handle;
|
||||||
}
|
}
|
||||||
bo_state[i].handle = handle;
|
bo_state[i].handle = handle;
|
||||||
bo_state[i].paddr = vc4_bo->base.paddr;
|
bo_state[i].paddr = vc4_bo->base.dma_addr;
|
||||||
bo_state[i].size = vc4_bo->base.base.size;
|
bo_state[i].size = vc4_bo->base.base.size;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -917,16 +917,16 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
|
|||||||
list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
|
list_add_tail(&to_vc4_bo(&exec->exec_bo->base)->unref_head,
|
||||||
&exec->unref_list);
|
&exec->unref_list);
|
||||||
|
|
||||||
exec->ct0ca = exec->exec_bo->paddr + bin_offset;
|
exec->ct0ca = exec->exec_bo->dma_addr + bin_offset;
|
||||||
|
|
||||||
exec->bin_u = bin;
|
exec->bin_u = bin;
|
||||||
|
|
||||||
exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
|
exec->shader_rec_v = exec->exec_bo->vaddr + shader_rec_offset;
|
||||||
exec->shader_rec_p = exec->exec_bo->paddr + shader_rec_offset;
|
exec->shader_rec_p = exec->exec_bo->dma_addr + shader_rec_offset;
|
||||||
exec->shader_rec_size = args->shader_rec_size;
|
exec->shader_rec_size = args->shader_rec_size;
|
||||||
|
|
||||||
exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
|
exec->uniforms_v = exec->exec_bo->vaddr + uniforms_offset;
|
||||||
exec->uniforms_p = exec->exec_bo->paddr + uniforms_offset;
|
exec->uniforms_p = exec->exec_bo->dma_addr + uniforms_offset;
|
||||||
exec->uniforms_size = args->uniforms_size;
|
exec->uniforms_size = args->uniforms_size;
|
||||||
|
|
||||||
ret = vc4_validate_bin_cl(dev,
|
ret = vc4_validate_bin_cl(dev,
|
||||||
|
@ -105,7 +105,7 @@ vc4_overflow_mem_work(struct work_struct *work)
|
|||||||
}
|
}
|
||||||
vc4->bin_alloc_overflow = BIT(bin_bo_slot);
|
vc4->bin_alloc_overflow = BIT(bin_bo_slot);
|
||||||
|
|
||||||
V3D_WRITE(V3D_BPOA, bo->base.paddr + bin_bo_slot * vc4->bin_alloc_size);
|
V3D_WRITE(V3D_BPOA, bo->base.dma_addr + bin_bo_slot * vc4->bin_alloc_size);
|
||||||
V3D_WRITE(V3D_BPOS, bo->base.base.size);
|
V3D_WRITE(V3D_BPOS, bo->base.base.size);
|
||||||
V3D_WRITE(V3D_INTCTL, V3D_INT_OUTOMEM);
|
V3D_WRITE(V3D_INTCTL, V3D_INT_OUTOMEM);
|
||||||
V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM);
|
V3D_WRITE(V3D_INTENA, V3D_INT_OUTOMEM);
|
||||||
|
@ -359,7 +359,7 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
|
|||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
for (i = 0; i < num_planes; i++)
|
for (i = 0; i < num_planes; i++)
|
||||||
vc4_state->offsets[i] = bo->paddr + fb->offsets[i];
|
vc4_state->offsets[i] = bo->dma_addr + fb->offsets[i];
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We don't support subpixel source positioning for scaling,
|
* We don't support subpixel source positioning for scaling,
|
||||||
@ -1250,7 +1250,7 @@ void vc4_plane_async_set_fb(struct drm_plane *plane, struct drm_framebuffer *fb)
|
|||||||
* because this is only called on the primary plane.
|
* because this is only called on the primary plane.
|
||||||
*/
|
*/
|
||||||
WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0);
|
WARN_ON_ONCE(plane->state->crtc_x < 0 || plane->state->crtc_y < 0);
|
||||||
addr = bo->paddr + fb->offsets[0];
|
addr = bo->dma_addr + fb->offsets[0];
|
||||||
|
|
||||||
/* Write the new address into the hardware immediately. The
|
/* Write the new address into the hardware immediately. The
|
||||||
* scanout will start from this address as soon as the FIFO
|
* scanout will start from this address as soon as the FIFO
|
||||||
|
@ -101,7 +101,7 @@ static uint32_t vc4_full_res_offset(struct vc4_exec_info *exec,
|
|||||||
struct drm_vc4_submit_rcl_surface *surf,
|
struct drm_vc4_submit_rcl_surface *surf,
|
||||||
uint8_t x, uint8_t y)
|
uint8_t x, uint8_t y)
|
||||||
{
|
{
|
||||||
return bo->paddr + surf->offset + VC4_TILE_BUFFER_SIZE *
|
return bo->dma_addr + surf->offset + VC4_TILE_BUFFER_SIZE *
|
||||||
(DIV_ROUND_UP(exec->args->width, 32) * y + x);
|
(DIV_ROUND_UP(exec->args->width, 32) * y + x);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -142,7 +142,7 @@ static void emit_tile(struct vc4_exec_info *exec,
|
|||||||
} else {
|
} else {
|
||||||
rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
|
rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
|
||||||
rcl_u16(setup, args->color_read.bits);
|
rcl_u16(setup, args->color_read.bits);
|
||||||
rcl_u32(setup, setup->color_read->paddr +
|
rcl_u32(setup, setup->color_read->dma_addr +
|
||||||
args->color_read.offset);
|
args->color_read.offset);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -164,7 +164,7 @@ static void emit_tile(struct vc4_exec_info *exec,
|
|||||||
} else {
|
} else {
|
||||||
rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
|
rcl_u8(setup, VC4_PACKET_LOAD_TILE_BUFFER_GENERAL);
|
||||||
rcl_u16(setup, args->zs_read.bits);
|
rcl_u16(setup, args->zs_read.bits);
|
||||||
rcl_u32(setup, setup->zs_read->paddr +
|
rcl_u32(setup, setup->zs_read->dma_addr +
|
||||||
args->zs_read.offset);
|
args->zs_read.offset);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -232,7 +232,7 @@ static void emit_tile(struct vc4_exec_info *exec,
|
|||||||
(last_tile_write ?
|
(last_tile_write ?
|
||||||
0 : VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR));
|
0 : VC4_STORE_TILE_BUFFER_DISABLE_COLOR_CLEAR));
|
||||||
rcl_u32(setup,
|
rcl_u32(setup,
|
||||||
(setup->zs_write->paddr + args->zs_write.offset) |
|
(setup->zs_write->dma_addr + args->zs_write.offset) |
|
||||||
((last && last_tile_write) ?
|
((last && last_tile_write) ?
|
||||||
VC4_LOADSTORE_TILE_BUFFER_EOF : 0));
|
VC4_LOADSTORE_TILE_BUFFER_EOF : 0));
|
||||||
}
|
}
|
||||||
@ -355,7 +355,7 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
|
|||||||
|
|
||||||
rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
|
rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
|
||||||
rcl_u32(setup,
|
rcl_u32(setup,
|
||||||
(setup->color_write ? (setup->color_write->paddr +
|
(setup->color_write ? (setup->color_write->dma_addr +
|
||||||
args->color_write.offset) :
|
args->color_write.offset) :
|
||||||
0));
|
0));
|
||||||
rcl_u16(setup, args->width);
|
rcl_u16(setup, args->width);
|
||||||
@ -374,8 +374,8 @@ static int vc4_create_rcl_bo(struct drm_device *dev, struct vc4_exec_info *exec,
|
|||||||
}
|
}
|
||||||
|
|
||||||
BUG_ON(setup->next_offset != size);
|
BUG_ON(setup->next_offset != size);
|
||||||
exec->ct1ca = setup->rcl->paddr;
|
exec->ct1ca = setup->rcl->dma_addr;
|
||||||
exec->ct1ea = setup->rcl->paddr + setup->next_offset;
|
exec->ct1ea = setup->rcl->dma_addr + setup->next_offset;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -318,7 +318,7 @@ static void vc4_txp_connector_atomic_commit(struct drm_connector *conn,
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
gem = drm_fb_dma_get_gem_obj(fb, 0);
|
gem = drm_fb_dma_get_gem_obj(fb, 0);
|
||||||
TXP_WRITE(TXP_DST_PTR, gem->paddr + fb->offsets[0]);
|
TXP_WRITE(TXP_DST_PTR, gem->dma_addr + fb->offsets[0]);
|
||||||
TXP_WRITE(TXP_DST_PITCH, fb->pitches[0]);
|
TXP_WRITE(TXP_DST_PITCH, fb->pitches[0]);
|
||||||
TXP_WRITE(TXP_DIM,
|
TXP_WRITE(TXP_DIM,
|
||||||
VC4_SET_FIELD(mode->hdisplay, TXP_WIDTH) |
|
VC4_SET_FIELD(mode->hdisplay, TXP_WIDTH) |
|
||||||
|
@ -268,8 +268,8 @@ static int bin_bo_alloc(struct vc4_dev *vc4)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Check if this BO won't trigger the addressing bug. */
|
/* Check if this BO won't trigger the addressing bug. */
|
||||||
if ((bo->base.paddr & 0xf0000000) ==
|
if ((bo->base.dma_addr & 0xf0000000) ==
|
||||||
((bo->base.paddr + bo->base.base.size - 1) & 0xf0000000)) {
|
((bo->base.dma_addr + bo->base.base.size - 1) & 0xf0000000)) {
|
||||||
vc4->bin_bo = bo;
|
vc4->bin_bo = bo;
|
||||||
|
|
||||||
/* Set up for allocating 512KB chunks of
|
/* Set up for allocating 512KB chunks of
|
||||||
|
@ -294,7 +294,7 @@ validate_indexed_prim_list(VALIDATE_ARGS)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
*(uint32_t *)(validated + 5) = ib->paddr + offset;
|
*(uint32_t *)(validated + 5) = ib->dma_addr + offset;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -400,7 +400,7 @@ validate_tile_binning_config(VALIDATE_ARGS)
|
|||||||
* free when the job completes rendering.
|
* free when the job completes rendering.
|
||||||
*/
|
*/
|
||||||
exec->bin_slots |= BIT(bin_slot);
|
exec->bin_slots |= BIT(bin_slot);
|
||||||
bin_addr = vc4->bin_bo->base.paddr + bin_slot * vc4->bin_alloc_size;
|
bin_addr = vc4->bin_bo->base.dma_addr + bin_slot * vc4->bin_alloc_size;
|
||||||
|
|
||||||
/* The tile state data array is 48 bytes per tile, and we put it at
|
/* The tile state data array is 48 bytes per tile, and we put it at
|
||||||
* the start of a BO containing both it and the tile alloc.
|
* the start of a BO containing both it and the tile alloc.
|
||||||
@ -608,7 +608,7 @@ reloc_tex(struct vc4_exec_info *exec,
|
|||||||
"outside of UBO\n");
|
"outside of UBO\n");
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
*validated_p0 = tex->paddr + p0;
|
*validated_p0 = tex->dma_addr + p0;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -736,7 +736,7 @@ reloc_tex(struct vc4_exec_info *exec,
|
|||||||
offset -= level_size;
|
offset -= level_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
*validated_p0 = tex->paddr + p0;
|
*validated_p0 = tex->dma_addr + p0;
|
||||||
|
|
||||||
if (is_cs) {
|
if (is_cs) {
|
||||||
exec->bin_dep_seqno = max(exec->bin_dep_seqno,
|
exec->bin_dep_seqno = max(exec->bin_dep_seqno,
|
||||||
@ -840,7 +840,7 @@ validate_gl_shader_rec(struct drm_device *dev,
|
|||||||
void *uniform_data_u;
|
void *uniform_data_u;
|
||||||
uint32_t tex, uni;
|
uint32_t tex, uni;
|
||||||
|
|
||||||
*(uint32_t *)(pkt_v + o) = bo[i]->paddr + src_offset;
|
*(uint32_t *)(pkt_v + o) = bo[i]->dma_addr + src_offset;
|
||||||
|
|
||||||
if (src_offset != 0) {
|
if (src_offset != 0) {
|
||||||
DRM_DEBUG("Shaders must be at offset 0 of "
|
DRM_DEBUG("Shaders must be at offset 0 of "
|
||||||
@ -928,7 +928,7 @@ validate_gl_shader_rec(struct drm_device *dev,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
*(uint32_t *)(pkt_v + o) = vbo->paddr + offset;
|
*(uint32_t *)(pkt_v + o) = vbo->dma_addr + offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1098,14 +1098,14 @@ static int zynqmp_disp_layer_update(struct zynqmp_disp_layer *layer,
|
|||||||
unsigned int height = state->crtc_h / (i ? info->vsub : 1);
|
unsigned int height = state->crtc_h / (i ? info->vsub : 1);
|
||||||
struct zynqmp_disp_layer_dma *dma = &layer->dmas[i];
|
struct zynqmp_disp_layer_dma *dma = &layer->dmas[i];
|
||||||
struct dma_async_tx_descriptor *desc;
|
struct dma_async_tx_descriptor *desc;
|
||||||
dma_addr_t paddr;
|
dma_addr_t dma_addr;
|
||||||
|
|
||||||
paddr = drm_fb_dma_get_gem_addr(state->fb, state, i);
|
dma_addr = drm_fb_dma_get_gem_addr(state->fb, state, i);
|
||||||
|
|
||||||
dma->xt.numf = height;
|
dma->xt.numf = height;
|
||||||
dma->sgl.size = width * info->cpp[i];
|
dma->sgl.size = width * info->cpp[i];
|
||||||
dma->sgl.icg = state->fb->pitches[i] - dma->sgl.size;
|
dma->sgl.icg = state->fb->pitches[i] - dma->sgl.size;
|
||||||
dma->xt.src_start = paddr;
|
dma->xt.src_start = dma_addr;
|
||||||
dma->xt.frame_size = 1;
|
dma->xt.frame_size = 1;
|
||||||
dma->xt.dir = DMA_MEM_TO_DEV;
|
dma->xt.dir = DMA_MEM_TO_DEV;
|
||||||
dma->xt.src_sgl = true;
|
dma->xt.src_sgl = true;
|
||||||
|
@ -11,7 +11,7 @@ struct drm_mode_create_dumb;
|
|||||||
/**
|
/**
|
||||||
* struct drm_gem_dma_object - GEM object backed by DMA memory allocations
|
* struct drm_gem_dma_object - GEM object backed by DMA memory allocations
|
||||||
* @base: base GEM object
|
* @base: base GEM object
|
||||||
* @paddr: DMA address of the backing memory
|
* @dma_addr: DMA address of the backing memory
|
||||||
* @sgt: scatter/gather table for imported PRIME buffers. The table can have
|
* @sgt: scatter/gather table for imported PRIME buffers. The table can have
|
||||||
* more than one entry but they are guaranteed to have contiguous
|
* more than one entry but they are guaranteed to have contiguous
|
||||||
* DMA addresses.
|
* DMA addresses.
|
||||||
@ -20,7 +20,7 @@ struct drm_mode_create_dumb;
|
|||||||
*/
|
*/
|
||||||
struct drm_gem_dma_object {
|
struct drm_gem_dma_object {
|
||||||
struct drm_gem_object base;
|
struct drm_gem_object base;
|
||||||
dma_addr_t paddr;
|
dma_addr_t dma_addr;
|
||||||
struct sg_table *sgt;
|
struct sg_table *sgt;
|
||||||
|
|
||||||
/* For objects with DMA memory allocated by GEM DMA */
|
/* For objects with DMA memory allocated by GEM DMA */
|
||||||
|
Loading…
Reference in New Issue
Block a user