mirror of
https://github.com/torvalds/linux.git
synced 2024-11-27 14:41:39 +00:00
drm-misc-next for 6.2:
UAPI Changes: Cross-subsystem Changes: Core Changes: - atomic-helper: Add begin_fb_access and end_fb_access hooks - fb-helper: Rework to move fb emulation into helpers - scheduler: rework entity flush, kill and fini - ttm: Optimize pool allocations Driver Changes: - amdgpu: scheduler rework - hdlcd: Switch to DRM-managed resources - ingenic: Fix registration error path - lcdif: FIFO threshold tuning - meson: Fix return type of cvbs' mode_valid - ofdrm: multiple fixes (kconfig, types, endianness) - sun4i: A100 and D1 support - panel: - New Panel: Jadard JD9365DA-H3 -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQRcEzekXsqa64kGDp7j7w1vZxhRxQUCY2y3vQAKCRDj7w1vZxhR xQu+AP9CzbNI2s12aNS8DskEZggo0lqUyRiEBaRJ2jrWZdGr0gEA1+Lc06HaKmGC 2WBD4nw2I7ch0NUN6VFjQ9ATofevPwA= =AY3a -----END PGP SIGNATURE----- Merge tag 'drm-misc-next-2022-11-10-1' of git://anongit.freedesktop.org/drm/drm-misc into drm-next drm-misc-next for 6.2: UAPI Changes: Cross-subsystem Changes: Core Changes: - atomic-helper: Add begin_fb_access and end_fb_access hooks - fb-helper: Rework to move fb emulation into helpers - scheduler: rework entity flush, kill and fini - ttm: Optimize pool allocations Driver Changes: - amdgpu: scheduler rework - hdlcd: Switch to DRM-managed resources - ingenic: Fix registration error path - lcdif: FIFO threshold tuning - meson: Fix return type of cvbs' mode_valid - ofdrm: multiple fixes (kconfig, types, endianness) - sun4i: A100 and D1 support - panel: - New Panel: Jadard JD9365DA-H3 Signed-off-by: Dave Airlie <airlied@redhat.com> From: Maxime Ripard <maxime@cerno.tech> Link: https://patchwork.freedesktop.org/patch/msgid/20221110083612.g63eaocoaa554soh@houat
This commit is contained in:
commit
4e291f2f58
@ -12,9 +12,14 @@ maintainers:
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
oneOf:
|
||||
- enum:
|
||||
- allwinner,sun6i-a31-mipi-dsi
|
||||
- allwinner,sun50i-a64-mipi-dsi
|
||||
- allwinner,sun50i-a100-mipi-dsi
|
||||
- items:
|
||||
- const: allwinner,sun20i-d1-mipi-dsi
|
||||
- const: allwinner,sun50i-a100-mipi-dsi
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
@ -59,7 +64,6 @@ required:
|
||||
- phys
|
||||
- phy-names
|
||||
- resets
|
||||
- vcc-dsi-supply
|
||||
- port
|
||||
|
||||
allOf:
|
||||
@ -68,7 +72,9 @@ allOf:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: allwinner,sun6i-a31-mipi-dsi
|
||||
enum:
|
||||
- allwinner,sun6i-a31-mipi-dsi
|
||||
- allwinner,sun50i-a100-mipi-dsi
|
||||
|
||||
then:
|
||||
properties:
|
||||
@ -78,16 +84,22 @@ allOf:
|
||||
required:
|
||||
- clock-names
|
||||
|
||||
else:
|
||||
properties:
|
||||
clocks:
|
||||
maxItems: 1
|
||||
|
||||
- if:
|
||||
properties:
|
||||
compatible:
|
||||
contains:
|
||||
const: allwinner,sun50i-a64-mipi-dsi
|
||||
enum:
|
||||
- allwinner,sun6i-a31-mipi-dsi
|
||||
- allwinner,sun50i-a64-mipi-dsi
|
||||
|
||||
then:
|
||||
properties:
|
||||
clocks:
|
||||
minItems: 1
|
||||
required:
|
||||
- vcc-dsi-supply
|
||||
|
||||
unevaluatedProperties: false
|
||||
|
||||
|
@ -0,0 +1,70 @@
|
||||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/display/panel/jadard,jd9365da-h3.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Jadard JD9365DA-HE WXGA DSI panel
|
||||
|
||||
maintainers:
|
||||
- Jagan Teki <jagan@edgeble.ai>
|
||||
|
||||
allOf:
|
||||
- $ref: panel-common.yaml#
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
items:
|
||||
- enum:
|
||||
- chongzhou,cz101b4001
|
||||
- const: jadard,jd9365da-h3
|
||||
|
||||
reg: true
|
||||
|
||||
vdd-supply:
|
||||
description: supply regulator for VDD, usually 3.3V
|
||||
|
||||
vccio-supply:
|
||||
description: supply regulator for VCCIO, usually 1.8V
|
||||
|
||||
reset-gpios: true
|
||||
|
||||
backlight: true
|
||||
|
||||
port: true
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- vdd-supply
|
||||
- vccio-supply
|
||||
- reset-gpios
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
#include <dt-bindings/pinctrl/rockchip.h>
|
||||
|
||||
dsi {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
panel@0 {
|
||||
compatible = "chongzhou,cz101b4001", "jadard,jd9365da-h3";
|
||||
reg = <0>;
|
||||
vdd-supply = <&lcd_3v3>;
|
||||
vccio-supply = <&vcca_1v8>;
|
||||
reset-gpios = <&gpio1 RK_PC2 GPIO_ACTIVE_HIGH>;
|
||||
backlight = <&backlight>;
|
||||
|
||||
port {
|
||||
mipi_in_panel: endpoint {
|
||||
remote-endpoint = <&mipi_out_panel>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
...
|
@ -246,6 +246,8 @@ patternProperties:
|
||||
description: ChipOne
|
||||
"^chipspark,.*":
|
||||
description: ChipSPARK
|
||||
"^chongzhou,.*":
|
||||
description: Shenzhen Chongzhou Electronic Technology Co., Ltd
|
||||
"^chrontel,.*":
|
||||
description: Chrontel, Inc.
|
||||
"^chrp,.*":
|
||||
@ -639,6 +641,8 @@ patternProperties:
|
||||
description: ITian Corporation
|
||||
"^iwave,.*":
|
||||
description: iWave Systems Technologies Pvt. Ltd.
|
||||
"^jadard,.*":
|
||||
description: Jadard Technology Inc.
|
||||
"^jdi,.*":
|
||||
description: Japan Display Inc.
|
||||
"^jedec,.*":
|
||||
|
@ -116,6 +116,9 @@ fbdev Helper Functions Reference
|
||||
.. kernel-doc:: drivers/gpu/drm/drm_fb_helper.c
|
||||
:export:
|
||||
|
||||
.. kernel-doc:: drivers/gpu/drm/drm_fbdev_generic.c
|
||||
:export:
|
||||
|
||||
format Helper Functions Reference
|
||||
=================================
|
||||
|
||||
|
@ -6503,6 +6503,12 @@ S: Orphan / Obsolete
|
||||
F: drivers/gpu/drm/i810/
|
||||
F: include/uapi/drm/i810_drm.h
|
||||
|
||||
DRM DRIVER FOR JADARD JD9365DA-H3 MIPI-DSI LCD PANELS
|
||||
M: Jagan Teki <jagan@edgeble.ai>
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
|
||||
F: drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
|
||||
|
||||
DRM DRIVER FOR LOGICVC DISPLAY CONTROLLER
|
||||
M: Paul Kocialkowski <paul.kocialkowski@bootlin.com>
|
||||
S: Supported
|
||||
@ -7113,7 +7119,7 @@ F: drivers/gpu/drm/ttm/
|
||||
F: include/drm/ttm/
|
||||
|
||||
DRM GPU SCHEDULER
|
||||
M: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
|
||||
M: Luben Tuikov <luben.tuikov@amd.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
T: git git://anongit.freedesktop.org/drm/drm-misc
|
||||
|
@ -117,7 +117,9 @@ drm_kms_helper-y := \
|
||||
drm_self_refresh_helper.o \
|
||||
drm_simple_kms_helper.o
|
||||
drm_kms_helper-$(CONFIG_DRM_PANEL_BRIDGE) += bridge/panel.o
|
||||
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += drm_fb_helper.o
|
||||
drm_kms_helper-$(CONFIG_DRM_FBDEV_EMULATION) += \
|
||||
drm_fbdev_generic.o \
|
||||
drm_fb_helper.o
|
||||
obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
|
||||
|
||||
#
|
||||
|
@ -673,7 +673,7 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev,
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = amdgpu_job_alloc(adev, 1, &job, NULL);
|
||||
ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
|
@ -26,7 +26,6 @@
|
||||
|
||||
#include <drm/display/drm_dp_helper.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include "amdgpu.h"
|
||||
|
@ -291,12 +291,8 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
|
||||
return -EINVAL;
|
||||
|
||||
for (i = 0; i < p->gang_size; ++i) {
|
||||
ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm);
|
||||
if (ret)
|
||||
goto free_all_kdata;
|
||||
|
||||
ret = drm_sched_job_init(&p->jobs[i]->base, p->entities[i],
|
||||
&fpriv->vm);
|
||||
ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm,
|
||||
num_ibs[i], &p->jobs[i]);
|
||||
if (ret)
|
||||
goto free_all_kdata;
|
||||
}
|
||||
@ -430,7 +426,7 @@ static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,
|
||||
dma_fence_put(old);
|
||||
}
|
||||
|
||||
r = amdgpu_sync_fence(&p->gang_leader->sync, fence);
|
||||
r = amdgpu_sync_fence(&p->sync, fence);
|
||||
dma_fence_put(fence);
|
||||
if (r)
|
||||
return r;
|
||||
@ -452,9 +448,20 @@ static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,
|
||||
return r;
|
||||
}
|
||||
|
||||
r = amdgpu_sync_fence(&p->gang_leader->sync, fence);
|
||||
dma_fence_put(fence);
|
||||
r = amdgpu_sync_fence(&p->sync, fence);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
/*
|
||||
* When we have an explicit dependency it might be necessary to insert a
|
||||
* pipeline sync to make sure that all caches etc are flushed and the
|
||||
* next job actually sees the results from the previous one.
|
||||
*/
|
||||
if (fence->context == p->gang_leader->base.entity->fence_context)
|
||||
r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence);
|
||||
|
||||
error:
|
||||
dma_fence_put(fence);
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -1101,7 +1108,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_sync_fence(&job->sync, fpriv->prt_va->last_pt_update);
|
||||
r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -1112,7 +1119,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update);
|
||||
r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
@ -1131,7 +1138,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update);
|
||||
r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
@ -1144,7 +1151,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
r = amdgpu_sync_fence(&job->sync, vm->last_update);
|
||||
r = amdgpu_sync_fence(&p->sync, vm->last_update);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -1176,7 +1183,6 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
|
||||
static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
|
||||
{
|
||||
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
|
||||
struct amdgpu_job *leader = p->gang_leader;
|
||||
struct amdgpu_bo_list_entry *e;
|
||||
unsigned int i;
|
||||
int r;
|
||||
@ -1188,14 +1194,14 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
|
||||
|
||||
sync_mode = amdgpu_bo_explicit_sync(bo) ?
|
||||
AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
|
||||
r = amdgpu_sync_resv(p->adev, &leader->sync, resv, sync_mode,
|
||||
r = amdgpu_sync_resv(p->adev, &p->sync, resv, sync_mode,
|
||||
&fpriv->vm);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
|
||||
for (i = 0; i < p->gang_size - 1; ++i) {
|
||||
r = amdgpu_sync_clone(&leader->sync, &p->jobs[i]->sync);
|
||||
for (i = 0; i < p->gang_size; ++i) {
|
||||
r = amdgpu_sync_push_to_job(&p->sync, p->jobs[i]);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
@ -1241,7 +1247,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
struct dma_fence *fence;
|
||||
|
||||
fence = &p->jobs[i]->base.s_fence->scheduled;
|
||||
r = amdgpu_sync_fence(&leader->sync, fence);
|
||||
r = drm_sched_job_add_dependency(&leader->base, fence);
|
||||
if (r)
|
||||
goto error_cleanup;
|
||||
}
|
||||
|
@ -75,6 +75,8 @@ struct amdgpu_cs_parser {
|
||||
|
||||
unsigned num_post_deps;
|
||||
struct amdgpu_cs_post_dep *post_deps;
|
||||
|
||||
struct amdgpu_sync sync;
|
||||
};
|
||||
|
||||
int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include <linux/pci-p2pdma.h>
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include <linux/vgaarb.h>
|
||||
|
@ -39,8 +39,8 @@
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
@ -1214,7 +1214,6 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
|
||||
|
||||
const struct drm_mode_config_funcs amdgpu_mode_funcs = {
|
||||
.fb_create = amdgpu_display_user_framebuffer_create,
|
||||
.output_poll_changed = drm_fb_helper_output_poll_changed,
|
||||
};
|
||||
|
||||
static const struct drm_prop_enum_list amdgpu_underscan_enum_list[] =
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include <drm/drm_aperture.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_gem.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
#include <drm/drm_managed.h>
|
||||
|
@ -182,7 +182,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
|
||||
|
||||
need_ctx_switch = ring->current_ctx != fence_ctx;
|
||||
if (ring->funcs->emit_pipeline_sync && job &&
|
||||
((tmp = amdgpu_sync_get_fence(&job->sched_sync)) ||
|
||||
((tmp = amdgpu_sync_get_fence(&job->explicit_sync)) ||
|
||||
(amdgpu_sriov_vf(adev) && need_ctx_switch) ||
|
||||
amdgpu_vm_need_pipeline_sync(ring, job))) {
|
||||
need_pipe_sync = true;
|
||||
|
@ -170,26 +170,27 @@ bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
|
||||
*
|
||||
* @vm: vm to allocate id for
|
||||
* @ring: ring we want to submit job to
|
||||
* @sync: sync object where we add dependencies
|
||||
* @idle: resulting idle VMID
|
||||
* @fence: fence to wait for if no id could be grabbed
|
||||
*
|
||||
* Try to find an idle VMID, if none is idle add a fence to wait to the sync
|
||||
* object. Returns -ENOMEM when we are out of memory.
|
||||
*/
|
||||
static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
|
||||
struct amdgpu_ring *ring,
|
||||
struct amdgpu_sync *sync,
|
||||
struct amdgpu_vmid **idle)
|
||||
struct amdgpu_vmid **idle,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
unsigned vmhub = ring->funcs->vmhub;
|
||||
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
|
||||
struct dma_fence **fences;
|
||||
unsigned i;
|
||||
int r;
|
||||
|
||||
if (!dma_fence_is_signaled(ring->vmid_wait))
|
||||
return amdgpu_sync_fence(sync, ring->vmid_wait);
|
||||
if (!dma_fence_is_signaled(ring->vmid_wait)) {
|
||||
*fence = dma_fence_get(ring->vmid_wait);
|
||||
return 0;
|
||||
}
|
||||
|
||||
fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL);
|
||||
if (!fences)
|
||||
@ -228,10 +229,10 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
r = amdgpu_sync_fence(sync, &array->base);
|
||||
*fence = dma_fence_get(&array->base);
|
||||
dma_fence_put(ring->vmid_wait);
|
||||
ring->vmid_wait = &array->base;
|
||||
return r;
|
||||
return 0;
|
||||
}
|
||||
kfree(fences);
|
||||
|
||||
@ -243,19 +244,17 @@ static int amdgpu_vmid_grab_idle(struct amdgpu_vm *vm,
|
||||
*
|
||||
* @vm: vm to allocate id for
|
||||
* @ring: ring we want to submit job to
|
||||
* @sync: sync object where we add dependencies
|
||||
* @fence: fence protecting ID from reuse
|
||||
* @job: job who wants to use the VMID
|
||||
* @id: resulting VMID
|
||||
* @fence: fence to wait for if no id could be grabbed
|
||||
*
|
||||
* Try to assign a reserved VMID.
|
||||
*/
|
||||
static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
|
||||
struct amdgpu_ring *ring,
|
||||
struct amdgpu_sync *sync,
|
||||
struct dma_fence *fence,
|
||||
struct amdgpu_job *job,
|
||||
struct amdgpu_vmid **id)
|
||||
struct amdgpu_vmid **id,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
unsigned vmhub = ring->funcs->vmhub;
|
||||
@ -282,7 +281,8 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
|
||||
tmp = amdgpu_sync_peek_fence(&(*id)->active, ring);
|
||||
if (tmp) {
|
||||
*id = NULL;
|
||||
return amdgpu_sync_fence(sync, tmp);
|
||||
*fence = dma_fence_get(tmp);
|
||||
return 0;
|
||||
}
|
||||
needs_flush = true;
|
||||
}
|
||||
@ -290,7 +290,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
|
||||
/* Good we can use this VMID. Remember this submission as
|
||||
* user of the VMID.
|
||||
*/
|
||||
r = amdgpu_sync_fence(&(*id)->active, fence);
|
||||
r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -304,19 +304,17 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
|
||||
*
|
||||
* @vm: vm to allocate id for
|
||||
* @ring: ring we want to submit job to
|
||||
* @sync: sync object where we add dependencies
|
||||
* @fence: fence protecting ID from reuse
|
||||
* @job: job who wants to use the VMID
|
||||
* @id: resulting VMID
|
||||
* @fence: fence to wait for if no id could be grabbed
|
||||
*
|
||||
* Try to reuse a VMID for this submission.
|
||||
*/
|
||||
static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
|
||||
struct amdgpu_ring *ring,
|
||||
struct amdgpu_sync *sync,
|
||||
struct dma_fence *fence,
|
||||
struct amdgpu_job *job,
|
||||
struct amdgpu_vmid **id)
|
||||
struct amdgpu_vmid **id,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
unsigned vmhub = ring->funcs->vmhub;
|
||||
@ -352,7 +350,8 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
|
||||
/* Good, we can use this VMID. Remember this submission as
|
||||
* user of the VMID.
|
||||
*/
|
||||
r = amdgpu_sync_fence(&(*id)->active, fence);
|
||||
r = amdgpu_sync_fence(&(*id)->active,
|
||||
&job->base.s_fence->finished);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -370,15 +369,13 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
|
||||
*
|
||||
* @vm: vm to allocate id for
|
||||
* @ring: ring we want to submit job to
|
||||
* @sync: sync object where we add dependencies
|
||||
* @fence: fence protecting ID from reuse
|
||||
* @job: job who wants to use the VMID
|
||||
* @fence: fence to wait for if no id could be grabbed
|
||||
*
|
||||
* Allocate an id for the vm, adding fences to the sync obj as necessary.
|
||||
*/
|
||||
int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||
struct amdgpu_sync *sync, struct dma_fence *fence,
|
||||
struct amdgpu_job *job)
|
||||
struct amdgpu_job *job, struct dma_fence **fence)
|
||||
{
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
unsigned vmhub = ring->funcs->vmhub;
|
||||
@ -388,16 +385,16 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||
int r = 0;
|
||||
|
||||
mutex_lock(&id_mgr->lock);
|
||||
r = amdgpu_vmid_grab_idle(vm, ring, sync, &idle);
|
||||
r = amdgpu_vmid_grab_idle(vm, ring, &idle, fence);
|
||||
if (r || !idle)
|
||||
goto error;
|
||||
|
||||
if (vm->reserved_vmid[vmhub]) {
|
||||
r = amdgpu_vmid_grab_reserved(vm, ring, sync, fence, job, &id);
|
||||
r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
|
||||
if (r || !id)
|
||||
goto error;
|
||||
} else {
|
||||
r = amdgpu_vmid_grab_used(vm, ring, sync, fence, job, &id);
|
||||
r = amdgpu_vmid_grab_used(vm, ring, job, &id, fence);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
@ -406,7 +403,8 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||
id = idle;
|
||||
|
||||
/* Remember this submission as user of the VMID */
|
||||
r = amdgpu_sync_fence(&id->active, fence);
|
||||
r = amdgpu_sync_fence(&id->active,
|
||||
&job->base.s_fence->finished);
|
||||
if (r)
|
||||
goto error;
|
||||
|
||||
|
@ -84,8 +84,7 @@ void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
|
||||
struct amdgpu_vm *vm,
|
||||
unsigned vmhub);
|
||||
int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
|
||||
struct amdgpu_sync *sync, struct dma_fence *fence,
|
||||
struct amdgpu_job *job);
|
||||
struct amdgpu_job *job, struct dma_fence **fence);
|
||||
void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
|
||||
unsigned vmid);
|
||||
void amdgpu_vmid_reset_all(struct amdgpu_device *adev);
|
||||
|
@ -88,8 +88,9 @@ exit:
|
||||
return DRM_GPU_SCHED_STAT_NOMINAL;
|
||||
}
|
||||
|
||||
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
|
||||
struct amdgpu_job **job, struct amdgpu_vm *vm)
|
||||
int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
struct drm_sched_entity *entity, void *owner,
|
||||
unsigned int num_ibs, struct amdgpu_job **job)
|
||||
{
|
||||
if (num_ibs == 0)
|
||||
return -EINVAL;
|
||||
@ -105,28 +106,34 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
|
||||
(*job)->base.sched = &adev->rings[0]->sched;
|
||||
(*job)->vm = vm;
|
||||
|
||||
amdgpu_sync_create(&(*job)->sync);
|
||||
amdgpu_sync_create(&(*job)->sched_sync);
|
||||
amdgpu_sync_create(&(*job)->explicit_sync);
|
||||
(*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
|
||||
(*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET;
|
||||
|
||||
if (!entity)
|
||||
return 0;
|
||||
|
||||
return drm_sched_job_init(&(*job)->base, entity, owner);
|
||||
}
|
||||
|
||||
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
|
||||
enum amdgpu_ib_pool_type pool_type,
|
||||
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
|
||||
struct drm_sched_entity *entity, void *owner,
|
||||
size_t size, enum amdgpu_ib_pool_type pool_type,
|
||||
struct amdgpu_job **job)
|
||||
{
|
||||
int r;
|
||||
|
||||
r = amdgpu_job_alloc(adev, 1, job, NULL);
|
||||
r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
(*job)->num_ibs = 1;
|
||||
r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]);
|
||||
if (r)
|
||||
if (r) {
|
||||
if (entity)
|
||||
drm_sched_job_cleanup(&(*job)->base);
|
||||
kfree(*job);
|
||||
}
|
||||
|
||||
return r;
|
||||
}
|
||||
@ -166,9 +173,7 @@ static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
|
||||
|
||||
drm_sched_job_cleanup(s_job);
|
||||
|
||||
amdgpu_sync_free(&job->sync);
|
||||
amdgpu_sync_free(&job->sched_sync);
|
||||
|
||||
amdgpu_sync_free(&job->explicit_sync);
|
||||
dma_fence_put(&job->hw_fence);
|
||||
}
|
||||
|
||||
@ -190,9 +195,11 @@ void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
|
||||
|
||||
void amdgpu_job_free(struct amdgpu_job *job)
|
||||
{
|
||||
if (job->base.entity)
|
||||
drm_sched_job_cleanup(&job->base);
|
||||
|
||||
amdgpu_job_free_resources(job);
|
||||
amdgpu_sync_free(&job->sync);
|
||||
amdgpu_sync_free(&job->sched_sync);
|
||||
amdgpu_sync_free(&job->explicit_sync);
|
||||
if (job->gang_submit != &job->base.s_fence->scheduled)
|
||||
dma_fence_put(job->gang_submit);
|
||||
|
||||
@ -202,25 +209,16 @@ void amdgpu_job_free(struct amdgpu_job *job)
|
||||
dma_fence_put(&job->hw_fence);
|
||||
}
|
||||
|
||||
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
|
||||
void *owner, struct dma_fence **f)
|
||||
struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (!f)
|
||||
return -EINVAL;
|
||||
|
||||
r = drm_sched_job_init(&job->base, entity, owner);
|
||||
if (r)
|
||||
return r;
|
||||
struct dma_fence *f;
|
||||
|
||||
drm_sched_job_arm(&job->base);
|
||||
|
||||
*f = dma_fence_get(&job->base.s_fence->finished);
|
||||
f = dma_fence_get(&job->base.s_fence->finished);
|
||||
amdgpu_job_free_resources(job);
|
||||
drm_sched_entity_push_job(&job->base);
|
||||
|
||||
return 0;
|
||||
return f;
|
||||
}
|
||||
|
||||
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
||||
@ -238,30 +236,19 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
|
||||
static struct dma_fence *
|
||||
amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
|
||||
struct drm_sched_entity *s_entity)
|
||||
{
|
||||
struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
|
||||
struct amdgpu_job *job = to_amdgpu_job(sched_job);
|
||||
struct amdgpu_vm *vm = job->vm;
|
||||
struct dma_fence *fence;
|
||||
struct dma_fence *fence = NULL;
|
||||
int r;
|
||||
|
||||
fence = amdgpu_sync_get_fence(&job->sync);
|
||||
if (fence && drm_sched_dependency_optimized(fence, s_entity)) {
|
||||
r = amdgpu_sync_fence(&job->sched_sync, fence);
|
||||
if (r)
|
||||
DRM_ERROR("Error adding fence (%d)\n", r);
|
||||
}
|
||||
|
||||
while (fence == NULL && vm && !job->vmid) {
|
||||
r = amdgpu_vmid_grab(vm, ring, &job->sync,
|
||||
&job->base.s_fence->finished,
|
||||
job);
|
||||
while (!fence && job->vm && !job->vmid) {
|
||||
r = amdgpu_vmid_grab(job->vm, ring, job, &fence);
|
||||
if (r)
|
||||
DRM_ERROR("Error getting VM ID (%d)\n", r);
|
||||
|
||||
fence = amdgpu_sync_get_fence(&job->sync);
|
||||
}
|
||||
|
||||
if (!fence && job->gang_submit)
|
||||
@ -281,8 +268,6 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
|
||||
job = to_amdgpu_job(sched_job);
|
||||
finished = &job->base.s_fence->finished;
|
||||
|
||||
BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
|
||||
|
||||
trace_amdgpu_sched_run_job(job);
|
||||
|
||||
/* Skip job if VRAM is lost and never resubmit gangs */
|
||||
@ -341,7 +326,7 @@ void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched)
|
||||
}
|
||||
|
||||
const struct drm_sched_backend_ops amdgpu_sched_ops = {
|
||||
.dependency = amdgpu_job_dependency,
|
||||
.prepare_job = amdgpu_job_prepare_job,
|
||||
.run_job = amdgpu_job_run,
|
||||
.timedout_job = amdgpu_job_timedout,
|
||||
.free_job = amdgpu_job_free_cb
|
||||
|
@ -47,8 +47,7 @@ enum amdgpu_ib_pool_type;
|
||||
struct amdgpu_job {
|
||||
struct drm_sched_job base;
|
||||
struct amdgpu_vm *vm;
|
||||
struct amdgpu_sync sync;
|
||||
struct amdgpu_sync sched_sync;
|
||||
struct amdgpu_sync explicit_sync;
|
||||
struct dma_fence hw_fence;
|
||||
struct dma_fence *gang_submit;
|
||||
uint32_t preamble_status;
|
||||
@ -78,18 +77,20 @@ static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job)
|
||||
return to_amdgpu_ring(job->base.entity->rq->sched);
|
||||
}
|
||||
|
||||
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
|
||||
struct amdgpu_job **job, struct amdgpu_vm *vm);
|
||||
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
|
||||
enum amdgpu_ib_pool_type pool, struct amdgpu_job **job);
|
||||
int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm,
|
||||
struct drm_sched_entity *entity, void *owner,
|
||||
unsigned int num_ibs, struct amdgpu_job **job);
|
||||
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev,
|
||||
struct drm_sched_entity *entity, void *owner,
|
||||
size_t size, enum amdgpu_ib_pool_type pool_type,
|
||||
struct amdgpu_job **job);
|
||||
void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
|
||||
struct amdgpu_bo *gws, struct amdgpu_bo *oa);
|
||||
void amdgpu_job_free_resources(struct amdgpu_job *job);
|
||||
void amdgpu_job_set_gang_leader(struct amdgpu_job *job,
|
||||
struct amdgpu_job *leader);
|
||||
void amdgpu_job_free(struct amdgpu_job *job);
|
||||
int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
|
||||
void *owner, struct dma_fence **f);
|
||||
struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job);
|
||||
int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
|
||||
struct dma_fence **fence);
|
||||
|
||||
|
@ -150,14 +150,15 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
const unsigned ib_size_dw = 16;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
|
||||
AMDGPU_IB_POOL_DIRECT, &job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
ib = &job->ibs[0];
|
||||
|
||||
ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0);
|
||||
ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0,
|
||||
PACKETJ_TYPE0);
|
||||
ib->ptr[1] = 0xDEADBEEF;
|
||||
for (i = 2; i < 16; i += 2) {
|
||||
ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6);
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "amdgpu.h"
|
||||
#include <drm/amdgpu_drm.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include "amdgpu_uvd.h"
|
||||
#include "amdgpu_vce.h"
|
||||
#include "atom.h"
|
||||
|
@ -36,7 +36,6 @@
|
||||
#include <drm/drm_encoder.h>
|
||||
#include <drm/drm_fixed.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_framebuffer.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <linux/i2c.h>
|
||||
|
@ -259,6 +259,14 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Free the entry back to the slab */
|
||||
static void amdgpu_sync_entry_free(struct amdgpu_sync_entry *e)
|
||||
{
|
||||
hash_del(&e->node);
|
||||
dma_fence_put(e->fence);
|
||||
kmem_cache_free(amdgpu_sync_slab, e);
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_sync_peek_fence - get the next fence not signaled yet
|
||||
*
|
||||
@ -280,9 +288,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
|
||||
struct drm_sched_fence *s_fence = to_drm_sched_fence(f);
|
||||
|
||||
if (dma_fence_is_signaled(f)) {
|
||||
hash_del(&e->node);
|
||||
dma_fence_put(f);
|
||||
kmem_cache_free(amdgpu_sync_slab, e);
|
||||
amdgpu_sync_entry_free(e);
|
||||
continue;
|
||||
}
|
||||
if (ring && s_fence) {
|
||||
@ -355,15 +361,42 @@ int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone)
|
||||
if (r)
|
||||
return r;
|
||||
} else {
|
||||
hash_del(&e->node);
|
||||
dma_fence_put(f);
|
||||
kmem_cache_free(amdgpu_sync_slab, e);
|
||||
amdgpu_sync_entry_free(e);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_sync_push_to_job - push fences into job
|
||||
* @sync: sync object to get the fences from
|
||||
* @job: job to push the fences into
|
||||
*
|
||||
* Add all unsignaled fences from sync to job.
|
||||
*/
|
||||
int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job)
|
||||
{
|
||||
struct amdgpu_sync_entry *e;
|
||||
struct hlist_node *tmp;
|
||||
struct dma_fence *f;
|
||||
int i, r;
|
||||
|
||||
hash_for_each_safe(sync->fences, i, tmp, e, node) {
|
||||
f = e->fence;
|
||||
if (dma_fence_is_signaled(f)) {
|
||||
amdgpu_sync_entry_free(e);
|
||||
continue;
|
||||
}
|
||||
|
||||
dma_fence_get(f);
|
||||
r = drm_sched_job_add_dependency(&job->base, f);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
|
||||
{
|
||||
struct amdgpu_sync_entry *e;
|
||||
@ -375,9 +408,7 @@ int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr)
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
hash_del(&e->node);
|
||||
dma_fence_put(e->fence);
|
||||
kmem_cache_free(amdgpu_sync_slab, e);
|
||||
amdgpu_sync_entry_free(e);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -396,11 +427,8 @@ void amdgpu_sync_free(struct amdgpu_sync *sync)
|
||||
struct hlist_node *tmp;
|
||||
unsigned int i;
|
||||
|
||||
hash_for_each_safe(sync->fences, i, tmp, e, node) {
|
||||
hash_del(&e->node);
|
||||
dma_fence_put(e->fence);
|
||||
kmem_cache_free(amdgpu_sync_slab, e);
|
||||
}
|
||||
hash_for_each_safe(sync->fences, i, tmp, e, node)
|
||||
amdgpu_sync_entry_free(e);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -30,6 +30,7 @@ struct dma_fence;
|
||||
struct dma_resv;
|
||||
struct amdgpu_device;
|
||||
struct amdgpu_ring;
|
||||
struct amdgpu_job;
|
||||
|
||||
enum amdgpu_sync_mode {
|
||||
AMDGPU_SYNC_ALWAYS,
|
||||
@ -54,6 +55,7 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
|
||||
struct amdgpu_ring *ring);
|
||||
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync);
|
||||
int amdgpu_sync_clone(struct amdgpu_sync *source, struct amdgpu_sync *clone);
|
||||
int amdgpu_sync_push_to_job(struct amdgpu_sync *sync, struct amdgpu_job *job);
|
||||
int amdgpu_sync_wait(struct amdgpu_sync *sync, bool intr);
|
||||
void amdgpu_sync_free(struct amdgpu_sync *sync);
|
||||
int amdgpu_sync_init(void);
|
||||
|
@ -189,7 +189,6 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
unsigned offset, num_pages, num_dw, num_bytes;
|
||||
uint64_t src_addr, dst_addr;
|
||||
struct dma_fence *fence;
|
||||
struct amdgpu_job *job;
|
||||
void *cpu_addr;
|
||||
uint64_t flags;
|
||||
@ -229,7 +228,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
|
||||
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
|
||||
num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
|
||||
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
num_dw * 4 + num_bytes,
|
||||
AMDGPU_IB_POOL_DELAYED, &job);
|
||||
if (r)
|
||||
return r;
|
||||
@ -269,18 +270,8 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
|
||||
}
|
||||
}
|
||||
|
||||
r = amdgpu_job_submit(job, &adev->mman.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
dma_fence_put(fence);
|
||||
|
||||
return r;
|
||||
|
||||
error_free:
|
||||
amdgpu_job_free(job);
|
||||
return r;
|
||||
dma_fence_put(amdgpu_job_submit(job));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1417,7 +1408,8 @@ static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos,
|
||||
}
|
||||
|
||||
static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
|
||||
unsigned long offset, void *buf, int len, int write)
|
||||
unsigned long offset, void *buf,
|
||||
int len, int write)
|
||||
{
|
||||
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
|
||||
@ -1441,26 +1433,27 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo,
|
||||
memcpy(adev->mman.sdma_access_ptr, buf, len);
|
||||
|
||||
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
|
||||
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, &job);
|
||||
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
num_dw * 4, AMDGPU_IB_POOL_DELAYED,
|
||||
&job);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm);
|
||||
src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + src_mm.start;
|
||||
src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) +
|
||||
src_mm.start;
|
||||
dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo);
|
||||
if (write)
|
||||
swap(src_addr, dst_addr);
|
||||
|
||||
amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, PAGE_SIZE, false);
|
||||
amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr,
|
||||
PAGE_SIZE, false);
|
||||
|
||||
amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]);
|
||||
WARN_ON(job->ibs[0].length_dw > num_dw);
|
||||
|
||||
r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
|
||||
if (r) {
|
||||
amdgpu_job_free(job);
|
||||
goto out;
|
||||
}
|
||||
fence = amdgpu_job_submit(job);
|
||||
|
||||
if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout))
|
||||
r = -ETIMEDOUT;
|
||||
@ -1959,7 +1952,9 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
|
||||
AMDGPU_IB_POOL_DELAYED;
|
||||
int r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, job);
|
||||
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
num_dw * 4, pool, job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -1969,17 +1964,11 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev,
|
||||
adev->gart.bo);
|
||||
(*job)->vm_needs_flush = true;
|
||||
}
|
||||
if (resv) {
|
||||
r = amdgpu_sync_resv(adev, &(*job)->sync, resv,
|
||||
AMDGPU_SYNC_ALWAYS,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED);
|
||||
if (r) {
|
||||
DRM_ERROR("sync failed (%d).\n", r);
|
||||
amdgpu_job_free(*job);
|
||||
return r;
|
||||
}
|
||||
}
|
||||
if (!resv)
|
||||
return 0;
|
||||
|
||||
return drm_sched_job_add_resv_dependencies(&(*job)->base, resv,
|
||||
DMA_RESV_USAGE_BOOKKEEP);
|
||||
}
|
||||
|
||||
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
|
||||
@ -2024,8 +2013,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
|
||||
if (direct_submit)
|
||||
r = amdgpu_job_submit_direct(job, ring, fence);
|
||||
else
|
||||
r = amdgpu_job_submit(job, &adev->mman.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, fence);
|
||||
*fence = amdgpu_job_submit(job);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
@ -2070,16 +2058,8 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data,
|
||||
|
||||
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
|
||||
WARN_ON(job->ibs[0].length_dw > num_dw);
|
||||
r = amdgpu_job_submit(job, &adev->mman.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, fence);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
*fence = amdgpu_job_submit(job);
|
||||
return 0;
|
||||
|
||||
error_free:
|
||||
amdgpu_job_free(job);
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
||||
|
@ -1132,7 +1132,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
||||
unsigned offset_idx = 0;
|
||||
unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
64, direct ? AMDGPU_IB_POOL_DIRECT :
|
||||
AMDGPU_IB_POOL_DELAYED, &job);
|
||||
if (r)
|
||||
return r;
|
||||
@ -1175,16 +1177,13 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
||||
if (r)
|
||||
goto err_free;
|
||||
} else {
|
||||
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
|
||||
AMDGPU_SYNC_ALWAYS,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED);
|
||||
r = drm_sched_job_add_resv_dependencies(&job->base,
|
||||
bo->tbo.base.resv,
|
||||
DMA_RESV_USAGE_KERNEL);
|
||||
if (r)
|
||||
goto err_free;
|
||||
|
||||
r = amdgpu_job_submit(job, &adev->uvd.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
||||
if (r)
|
||||
goto err_free;
|
||||
f = amdgpu_job_submit(job);
|
||||
}
|
||||
|
||||
amdgpu_bo_reserve(bo, true);
|
||||
|
@ -450,8 +450,10 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
|
||||
AMDGPU_IB_POOL_DIRECT, &job);
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
|
||||
&job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -538,7 +540,9 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
struct dma_fence *f = NULL;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
ib_size_dw * 4,
|
||||
direct ? AMDGPU_IB_POOL_DIRECT :
|
||||
AMDGPU_IB_POOL_DELAYED, &job);
|
||||
if (r)
|
||||
@ -570,8 +574,7 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
if (direct)
|
||||
r = amdgpu_job_submit_direct(job, ring, &f);
|
||||
else
|
||||
r = amdgpu_job_submit(job, &ring->adev->vce.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &f);
|
||||
f = amdgpu_job_submit(job);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
|
@ -600,15 +600,16 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
|
||||
struct amdgpu_ib *ib_msg,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
|
||||
struct amdgpu_device *adev = ring->adev;
|
||||
struct dma_fence *f = NULL;
|
||||
struct amdgpu_job *job;
|
||||
struct amdgpu_ib *ib;
|
||||
uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(adev, 64,
|
||||
AMDGPU_IB_POOL_DIRECT, &job);
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
|
||||
64, AMDGPU_IB_POOL_DIRECT,
|
||||
&job);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
@ -787,8 +788,9 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
|
||||
if (sq)
|
||||
ib_size_dw += 8;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4,
|
||||
AMDGPU_IB_POOL_DIRECT, &job);
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
|
||||
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
|
||||
&job);
|
||||
if (r)
|
||||
goto err;
|
||||
|
||||
@ -916,8 +918,9 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
|
||||
if (sq)
|
||||
ib_size_dw += 8;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
|
||||
AMDGPU_IB_POOL_DIRECT, &job);
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
|
||||
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
|
||||
&job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -982,8 +985,9 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
|
||||
if (sq)
|
||||
ib_size_dw += 8;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
|
||||
AMDGPU_IB_POOL_DIRECT, &job);
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL,
|
||||
ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT,
|
||||
&job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
|
@ -47,6 +47,32 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table)
|
||||
return r;
|
||||
}
|
||||
|
||||
/* Allocate a new job for @count PTE updates */
|
||||
static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p,
|
||||
unsigned int count)
|
||||
{
|
||||
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
|
||||
: AMDGPU_IB_POOL_DELAYED;
|
||||
struct drm_sched_entity *entity = p->immediate ? &p->vm->immediate
|
||||
: &p->vm->delayed;
|
||||
unsigned int ndw;
|
||||
int r;
|
||||
|
||||
/* estimate how many dw we need */
|
||||
ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
|
||||
if (p->pages_addr)
|
||||
ndw += count * 2;
|
||||
ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM,
|
||||
ndw * 4, pool, &p->job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
p->num_dw_left = ndw;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* amdgpu_vm_sdma_prepare - prepare SDMA command submission
|
||||
*
|
||||
@ -61,21 +87,22 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
|
||||
struct dma_resv *resv,
|
||||
enum amdgpu_sync_mode sync_mode)
|
||||
{
|
||||
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
|
||||
: AMDGPU_IB_POOL_DELAYED;
|
||||
unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
|
||||
struct amdgpu_sync sync;
|
||||
int r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, &p->job);
|
||||
r = amdgpu_vm_sdma_alloc_job(p, 0);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
p->num_dw_left = ndw;
|
||||
|
||||
if (!resv)
|
||||
return 0;
|
||||
|
||||
return amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, p->vm);
|
||||
amdgpu_sync_create(&sync);
|
||||
r = amdgpu_sync_resv(p->adev, &sync, resv, sync_mode, p->vm);
|
||||
if (!r)
|
||||
r = amdgpu_sync_push_to_job(&sync, p->job);
|
||||
amdgpu_sync_free(&sync);
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -91,20 +118,16 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
struct amdgpu_ib *ib = p->job->ibs;
|
||||
struct drm_sched_entity *entity;
|
||||
struct amdgpu_ring *ring;
|
||||
struct dma_fence *f;
|
||||
int r;
|
||||
|
||||
entity = p->immediate ? &p->vm->immediate : &p->vm->delayed;
|
||||
ring = container_of(entity->rq->sched, struct amdgpu_ring, sched);
|
||||
ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring,
|
||||
sched);
|
||||
|
||||
WARN_ON(ib->length_dw == 0);
|
||||
amdgpu_ring_pad_ib(ring, ib);
|
||||
WARN_ON(ib->length_dw > p->num_dw_left);
|
||||
r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f);
|
||||
if (r)
|
||||
goto error;
|
||||
f = amdgpu_job_submit(p->job);
|
||||
|
||||
if (p->unlocked) {
|
||||
struct dma_fence *tmp = dma_fence_get(f);
|
||||
@ -127,10 +150,6 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
|
||||
}
|
||||
dma_fence_put(f);
|
||||
return 0;
|
||||
|
||||
error:
|
||||
amdgpu_job_free(p->job);
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -210,8 +229,6 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
|
||||
uint64_t flags)
|
||||
{
|
||||
struct amdgpu_bo *bo = &vmbo->bo;
|
||||
enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE
|
||||
: AMDGPU_IB_POOL_DELAYED;
|
||||
struct dma_resv_iter cursor;
|
||||
unsigned int i, ndw, nptes;
|
||||
struct dma_fence *fence;
|
||||
@ -221,7 +238,7 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
|
||||
/* Wait for PD/PT moves to be completed */
|
||||
dma_resv_iter_begin(&cursor, bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL);
|
||||
dma_resv_for_each_fence_unlocked(&cursor, fence) {
|
||||
r = amdgpu_sync_fence(&p->job->sync, fence);
|
||||
r = drm_sched_job_add_dependency(&p->job->base, fence);
|
||||
if (r) {
|
||||
dma_resv_iter_end(&cursor);
|
||||
return r;
|
||||
@ -238,19 +255,9 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* estimate how many dw we need */
|
||||
ndw = 32;
|
||||
if (p->pages_addr)
|
||||
ndw += count * 2;
|
||||
ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW);
|
||||
ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW);
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool,
|
||||
&p->job);
|
||||
r = amdgpu_vm_sdma_alloc_job(p, count);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
p->num_dw_left = ndw;
|
||||
}
|
||||
|
||||
if (!p->pages_addr) {
|
||||
|
@ -21,6 +21,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
|
@ -23,6 +23,7 @@
|
||||
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
||||
|
@ -371,7 +371,9 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
||||
* translation. Avoid this by doing the invalidation from the SDMA
|
||||
* itself.
|
||||
*/
|
||||
r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
16 * 4, AMDGPU_IB_POOL_IMMEDIATE,
|
||||
&job);
|
||||
if (r)
|
||||
goto error_alloc;
|
||||
@ -380,10 +382,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
||||
job->vm_needs_flush = true;
|
||||
job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop;
|
||||
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
|
||||
r = amdgpu_job_submit(job, &adev->mman.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
|
||||
if (r)
|
||||
goto error_submit;
|
||||
fence = amdgpu_job_submit(job);
|
||||
|
||||
mutex_unlock(&adev->mman.gtt_window_lock);
|
||||
|
||||
@ -392,9 +391,6 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
|
||||
|
||||
return;
|
||||
|
||||
error_submit:
|
||||
amdgpu_job_free(job);
|
||||
|
||||
error_alloc:
|
||||
mutex_unlock(&adev->mman.gtt_window_lock);
|
||||
DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r);
|
||||
|
@ -216,7 +216,7 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
|
||||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
|
||||
AMDGPU_IB_POOL_DIRECT, &job);
|
||||
if (r)
|
||||
return r;
|
||||
@ -280,7 +280,7 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring,
|
||||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
|
||||
AMDGPU_IB_POOL_DIRECT, &job);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -213,7 +213,7 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
|
||||
*
|
||||
* Open up a stream for HW test
|
||||
*/
|
||||
static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle,
|
||||
struct amdgpu_bo *bo,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
@ -224,7 +224,7 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle
|
||||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
|
||||
AMDGPU_IB_POOL_DIRECT, &job);
|
||||
if (r)
|
||||
return r;
|
||||
@ -276,7 +276,7 @@ err:
|
||||
*
|
||||
* Close up a stream for HW test or if userspace failed to do so
|
||||
*/
|
||||
static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
|
||||
static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle,
|
||||
struct amdgpu_bo *bo,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
@ -287,7 +287,7 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl
|
||||
uint64_t addr;
|
||||
int i, r;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4,
|
||||
r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4,
|
||||
AMDGPU_IB_POOL_DIRECT, &job);
|
||||
if (r)
|
||||
return r;
|
||||
|
@ -65,8 +65,11 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
|
||||
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
|
||||
num_bytes = npages * 8;
|
||||
|
||||
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
|
||||
AMDGPU_IB_POOL_DELAYED, &job);
|
||||
r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED,
|
||||
num_dw * 4 + num_bytes,
|
||||
AMDGPU_IB_POOL_DELAYED,
|
||||
&job);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -89,18 +92,10 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages,
|
||||
cpu_addr = &job->ibs[0].ptr[num_dw];
|
||||
|
||||
amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr);
|
||||
r = amdgpu_job_submit(job, &adev->mman.entity,
|
||||
AMDGPU_FENCE_OWNER_UNDEFINED, &fence);
|
||||
if (r)
|
||||
goto error_free;
|
||||
|
||||
fence = amdgpu_job_submit(job);
|
||||
dma_fence_put(fence);
|
||||
|
||||
return r;
|
||||
|
||||
error_free:
|
||||
amdgpu_job_free(job);
|
||||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -82,7 +82,6 @@
|
||||
#include <drm/drm_atomic_uapi.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_blend.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
@ -2812,7 +2811,6 @@ const struct amdgpu_ip_block_version dm_ip_block =
|
||||
static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
|
||||
.fb_create = amdgpu_display_user_framebuffer_create,
|
||||
.get_format_info = amd_get_format_info,
|
||||
.output_poll_changed = drm_fb_helper_output_poll_changed,
|
||||
.atomic_check = amdgpu_dm_atomic_check,
|
||||
.atomic_commit = drm_atomic_helper_commit,
|
||||
};
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/component.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_module.h>
|
||||
#include <drm/drm_of.h>
|
||||
#include "komeda_dev.h"
|
||||
|
@ -10,7 +10,6 @@
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_managed.h>
|
||||
@ -59,7 +58,6 @@ static irqreturn_t komeda_kms_irq_handler(int irq, void *data)
|
||||
|
||||
static const struct drm_driver komeda_kms_driver = {
|
||||
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
|
||||
.lastclose = drm_fb_helper_lastclose,
|
||||
DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(komeda_gem_dma_dumb_create),
|
||||
.fops = &komeda_cma_fops,
|
||||
.name = "komeda",
|
||||
|
@ -19,7 +19,6 @@
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_fb_dma_helper.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_framebuffer.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
#include <drm/drm_of.h>
|
||||
@ -275,7 +274,7 @@ static void hdlcd_plane_atomic_update(struct drm_plane *plane,
|
||||
dest_h = drm_rect_height(&new_plane_state->dst);
|
||||
scanout_start = drm_fb_dma_get_gem_addr(fb, new_plane_state, 0);
|
||||
|
||||
hdlcd = plane->dev->dev_private;
|
||||
hdlcd = drm_to_hdlcd_priv(plane->dev);
|
||||
hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_LENGTH, fb->pitches[0]);
|
||||
hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_PITCH, fb->pitches[0]);
|
||||
hdlcd_write(hdlcd, HDLCD_REG_FB_LINE_COUNT, dest_h - 1);
|
||||
@ -290,7 +289,6 @@ static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = {
|
||||
static const struct drm_plane_funcs hdlcd_plane_funcs = {
|
||||
.update_plane = drm_atomic_helper_update_plane,
|
||||
.disable_plane = drm_atomic_helper_disable_plane,
|
||||
.destroy = drm_plane_cleanup,
|
||||
.reset = drm_atomic_helper_plane_reset,
|
||||
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
|
||||
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
|
||||
@ -298,24 +296,19 @@ static const struct drm_plane_funcs hdlcd_plane_funcs = {
|
||||
|
||||
static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
|
||||
{
|
||||
struct hdlcd_drm_private *hdlcd = drm->dev_private;
|
||||
struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
|
||||
struct drm_plane *plane = NULL;
|
||||
u32 formats[ARRAY_SIZE(supported_formats)], i;
|
||||
int ret;
|
||||
|
||||
plane = devm_kzalloc(drm->dev, sizeof(*plane), GFP_KERNEL);
|
||||
if (!plane)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(supported_formats); i++)
|
||||
formats[i] = supported_formats[i].fourcc;
|
||||
|
||||
ret = drm_universal_plane_init(drm, plane, 0xff, &hdlcd_plane_funcs,
|
||||
plane = drmm_universal_plane_alloc(drm, struct drm_plane, dev, 0xff,
|
||||
&hdlcd_plane_funcs,
|
||||
formats, ARRAY_SIZE(formats),
|
||||
NULL,
|
||||
DRM_PLANE_TYPE_PRIMARY, NULL);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
|
||||
if (IS_ERR(plane))
|
||||
return plane;
|
||||
|
||||
drm_plane_helper_add(plane, &hdlcd_plane_helper_funcs);
|
||||
hdlcd->plane = plane;
|
||||
@ -325,7 +318,7 @@ static struct drm_plane *hdlcd_plane_init(struct drm_device *drm)
|
||||
|
||||
int hdlcd_setup_crtc(struct drm_device *drm)
|
||||
{
|
||||
struct hdlcd_drm_private *hdlcd = drm->dev_private;
|
||||
struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
|
||||
struct drm_plane *primary;
|
||||
int ret;
|
||||
|
||||
|
@ -26,7 +26,7 @@
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_debugfs.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_modeset_helper.h>
|
||||
@ -98,7 +98,7 @@ static void hdlcd_irq_uninstall(struct hdlcd_drm_private *hdlcd)
|
||||
|
||||
static int hdlcd_load(struct drm_device *drm, unsigned long flags)
|
||||
{
|
||||
struct hdlcd_drm_private *hdlcd = drm->dev_private;
|
||||
struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
|
||||
struct platform_device *pdev = to_platform_device(drm->dev);
|
||||
struct resource *res;
|
||||
u32 version;
|
||||
@ -175,14 +175,21 @@ static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = {
|
||||
.atomic_commit = drm_atomic_helper_commit,
|
||||
};
|
||||
|
||||
static void hdlcd_setup_mode_config(struct drm_device *drm)
|
||||
static int hdlcd_setup_mode_config(struct drm_device *drm)
|
||||
{
|
||||
drm_mode_config_init(drm);
|
||||
int ret;
|
||||
|
||||
ret = drmm_mode_config_init(drm);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm->mode_config.min_width = 0;
|
||||
drm->mode_config.min_height = 0;
|
||||
drm->mode_config.max_width = HDLCD_MAX_XRES;
|
||||
drm->mode_config.max_height = HDLCD_MAX_YRES;
|
||||
drm->mode_config.funcs = &hdlcd_mode_config_funcs;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
@ -190,7 +197,7 @@ static int hdlcd_show_underrun_count(struct seq_file *m, void *arg)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *)m->private;
|
||||
struct drm_device *drm = node->minor->dev;
|
||||
struct hdlcd_drm_private *hdlcd = drm->dev_private;
|
||||
struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
|
||||
|
||||
seq_printf(m, "underrun : %d\n", atomic_read(&hdlcd->buffer_underrun_count));
|
||||
seq_printf(m, "dma_end : %d\n", atomic_read(&hdlcd->dma_end_count));
|
||||
@ -203,7 +210,7 @@ static int hdlcd_show_pxlclock(struct seq_file *m, void *arg)
|
||||
{
|
||||
struct drm_info_node *node = (struct drm_info_node *)m->private;
|
||||
struct drm_device *drm = node->minor->dev;
|
||||
struct hdlcd_drm_private *hdlcd = drm->dev_private;
|
||||
struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
|
||||
unsigned long clkrate = clk_get_rate(hdlcd->clk);
|
||||
unsigned long mode_clock = hdlcd->crtc.mode.crtc_clock * 1000;
|
||||
|
||||
@ -247,18 +254,18 @@ static int hdlcd_drm_bind(struct device *dev)
|
||||
struct hdlcd_drm_private *hdlcd;
|
||||
int ret;
|
||||
|
||||
hdlcd = devm_kzalloc(dev, sizeof(*hdlcd), GFP_KERNEL);
|
||||
if (!hdlcd)
|
||||
return -ENOMEM;
|
||||
hdlcd = devm_drm_dev_alloc(dev, &hdlcd_driver, typeof(*hdlcd), base);
|
||||
if (IS_ERR(hdlcd))
|
||||
return PTR_ERR(hdlcd);
|
||||
|
||||
drm = drm_dev_alloc(&hdlcd_driver, dev);
|
||||
if (IS_ERR(drm))
|
||||
return PTR_ERR(drm);
|
||||
drm = &hdlcd->base;
|
||||
|
||||
drm->dev_private = hdlcd;
|
||||
dev_set_drvdata(dev, drm);
|
||||
|
||||
hdlcd_setup_mode_config(drm);
|
||||
ret = hdlcd_setup_mode_config(drm);
|
||||
if (ret)
|
||||
goto err_free;
|
||||
|
||||
ret = hdlcd_load(drm, 0);
|
||||
if (ret)
|
||||
goto err_free;
|
||||
@ -317,17 +324,14 @@ err_unload:
|
||||
hdlcd_irq_uninstall(hdlcd);
|
||||
of_reserved_mem_device_release(drm->dev);
|
||||
err_free:
|
||||
drm_mode_config_cleanup(drm);
|
||||
dev_set_drvdata(dev, NULL);
|
||||
drm_dev_put(drm);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hdlcd_drm_unbind(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm = dev_get_drvdata(dev);
|
||||
struct hdlcd_drm_private *hdlcd = drm->dev_private;
|
||||
struct hdlcd_drm_private *hdlcd = drm_to_hdlcd_priv(drm);
|
||||
|
||||
drm_dev_unregister(drm);
|
||||
drm_kms_helper_poll_fini(drm);
|
||||
@ -341,10 +345,7 @@ static void hdlcd_drm_unbind(struct device *dev)
|
||||
if (pm_runtime_enabled(dev))
|
||||
pm_runtime_disable(dev);
|
||||
of_reserved_mem_device_release(dev);
|
||||
drm_mode_config_cleanup(drm);
|
||||
drm->dev_private = NULL;
|
||||
dev_set_drvdata(dev, NULL);
|
||||
drm_dev_put(drm);
|
||||
}
|
||||
|
||||
static const struct component_master_ops hdlcd_master_ops = {
|
||||
|
@ -7,6 +7,7 @@
|
||||
#define __HDLCD_DRV_H__
|
||||
|
||||
struct hdlcd_drm_private {
|
||||
struct drm_device base;
|
||||
void __iomem *mmio;
|
||||
struct clk *clk;
|
||||
struct drm_crtc crtc;
|
||||
@ -20,6 +21,7 @@ struct hdlcd_drm_private {
|
||||
#endif
|
||||
};
|
||||
|
||||
#define drm_to_hdlcd_priv(x) container_of(x, struct hdlcd_drm_private, base)
|
||||
#define crtc_to_hdlcd_priv(x) container_of(x, struct hdlcd_drm_private, crtc)
|
||||
|
||||
static inline void hdlcd_write(struct hdlcd_drm_private *hdlcd,
|
||||
|
@ -19,7 +19,7 @@
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
|
@ -19,6 +19,8 @@
|
||||
static const struct fb_ops armada_fb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
DRM_FB_HELPER_DEFAULT_OPS,
|
||||
.fb_read = drm_fb_helper_cfb_read,
|
||||
.fb_write = drm_fb_helper_cfb_write,
|
||||
.fb_fillrect = drm_fb_helper_cfb_fillrect,
|
||||
.fb_copyarea = drm_fb_helper_cfb_copyarea,
|
||||
.fb_imageblit = drm_fb_helper_cfb_imageblit,
|
||||
@ -72,7 +74,7 @@ static int armada_fbdev_create(struct drm_fb_helper *fbh,
|
||||
if (IS_ERR(dfb))
|
||||
return PTR_ERR(dfb);
|
||||
|
||||
info = drm_fb_helper_alloc_fbi(fbh);
|
||||
info = drm_fb_helper_alloc_info(fbh);
|
||||
if (IS_ERR(info)) {
|
||||
ret = PTR_ERR(info);
|
||||
goto err_fballoc;
|
||||
@ -155,7 +157,7 @@ void armada_fbdev_fini(struct drm_device *dev)
|
||||
struct drm_fb_helper *fbh = priv->fbdev;
|
||||
|
||||
if (fbh) {
|
||||
drm_fb_helper_unregister_fbi(fbh);
|
||||
drm_fb_helper_unregister_info(fbh);
|
||||
|
||||
drm_fb_helper_fini(fbh);
|
||||
|
||||
|
@ -16,7 +16,7 @@
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_device.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_module.h>
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_gem_shmem_helper.h>
|
||||
#include <drm/drm_module.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
|
@ -38,7 +38,6 @@
|
||||
#include <drm/drm_encoder.h>
|
||||
#include <drm/drm_mode.h>
|
||||
#include <drm/drm_framebuffer.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
|
||||
#define DRIVER_AUTHOR "Dave Airlie"
|
||||
|
||||
|
@ -19,7 +19,7 @@
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_module.h>
|
||||
|
@ -11,6 +11,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_graph.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
@ -19,7 +20,6 @@
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_mipi_dsi.h>
|
||||
#include <drm/drm_of.h>
|
||||
#include <drm/drm_panel.h>
|
||||
|
@ -2536,7 +2536,7 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
|
||||
if (funcs->prepare_fb) {
|
||||
ret = funcs->prepare_fb(plane, new_plane_state);
|
||||
if (ret)
|
||||
goto fail;
|
||||
goto fail_prepare_fb;
|
||||
} else {
|
||||
WARN_ON_ONCE(funcs->cleanup_fb);
|
||||
|
||||
@ -2545,13 +2545,34 @@ int drm_atomic_helper_prepare_planes(struct drm_device *dev,
|
||||
|
||||
ret = drm_gem_plane_helper_prepare_fb(plane, new_plane_state);
|
||||
if (ret)
|
||||
goto fail;
|
||||
goto fail_prepare_fb;
|
||||
}
|
||||
}
|
||||
|
||||
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
|
||||
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
|
||||
|
||||
if (funcs->begin_fb_access) {
|
||||
ret = funcs->begin_fb_access(plane, new_plane_state);
|
||||
if (ret)
|
||||
goto fail_begin_fb_access;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
fail_begin_fb_access:
|
||||
for_each_new_plane_in_state(state, plane, new_plane_state, j) {
|
||||
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
|
||||
|
||||
if (j >= i)
|
||||
continue;
|
||||
|
||||
if (funcs->end_fb_access)
|
||||
funcs->end_fb_access(plane, new_plane_state);
|
||||
}
|
||||
i = j; /* set i to upper limit to cleanup all planes */
|
||||
fail_prepare_fb:
|
||||
for_each_new_plane_in_state(state, plane, new_plane_state, j) {
|
||||
const struct drm_plane_helper_funcs *funcs;
|
||||
|
||||
@ -2827,6 +2848,13 @@ void drm_atomic_helper_cleanup_planes(struct drm_device *dev,
|
||||
struct drm_plane_state *old_plane_state, *new_plane_state;
|
||||
int i;
|
||||
|
||||
for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
|
||||
const struct drm_plane_helper_funcs *funcs = plane->helper_private;
|
||||
|
||||
if (funcs->end_fb_access)
|
||||
funcs->end_fb_access(plane, new_plane_state);
|
||||
}
|
||||
|
||||
for_each_oldnew_plane_in_state(old_state, plane, old_plane_state, new_plane_state, i) {
|
||||
const struct drm_plane_helper_funcs *funcs;
|
||||
struct drm_plane_state *plane_state;
|
||||
|
@ -43,7 +43,6 @@
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_encoder.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_framebuffer.h>
|
||||
#include <drm/drm_print.h>
|
||||
|
File diff suppressed because it is too large
Load Diff
494
drivers/gpu/drm/drm_fbdev_generic.c
Normal file
494
drivers/gpu/drm/drm_fbdev_generic.c
Normal file
@ -0,0 +1,494 @@
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_framebuffer.h>
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
|
||||
static bool drm_fbdev_use_shadow_fb(struct drm_fb_helper *fb_helper)
|
||||
{
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
struct drm_framebuffer *fb = fb_helper->fb;
|
||||
|
||||
return dev->mode_config.prefer_shadow_fbdev ||
|
||||
dev->mode_config.prefer_shadow ||
|
||||
fb->funcs->dirty;
|
||||
}
|
||||
|
||||
/* @user: 1=userspace, 0=fbcon */
|
||||
static int drm_fbdev_fb_open(struct fb_info *info, int user)
|
||||
{
|
||||
struct drm_fb_helper *fb_helper = info->par;
|
||||
|
||||
/* No need to take a ref for fbcon because it unbinds on unregister */
|
||||
if (user && !try_module_get(fb_helper->dev->driver->fops->owner))
|
||||
return -ENODEV;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drm_fbdev_fb_release(struct fb_info *info, int user)
|
||||
{
|
||||
struct drm_fb_helper *fb_helper = info->par;
|
||||
|
||||
if (user)
|
||||
module_put(fb_helper->dev->driver->fops->owner);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void drm_fbdev_cleanup(struct drm_fb_helper *fb_helper)
|
||||
{
|
||||
struct fb_info *fbi = fb_helper->info;
|
||||
void *shadow = NULL;
|
||||
|
||||
if (!fb_helper->dev)
|
||||
return;
|
||||
|
||||
if (fbi) {
|
||||
if (fbi->fbdefio)
|
||||
fb_deferred_io_cleanup(fbi);
|
||||
if (drm_fbdev_use_shadow_fb(fb_helper))
|
||||
shadow = fbi->screen_buffer;
|
||||
}
|
||||
|
||||
drm_fb_helper_fini(fb_helper);
|
||||
|
||||
if (shadow)
|
||||
vfree(shadow);
|
||||
else if (fb_helper->buffer)
|
||||
drm_client_buffer_vunmap(fb_helper->buffer);
|
||||
|
||||
drm_client_framebuffer_delete(fb_helper->buffer);
|
||||
}
|
||||
|
||||
static void drm_fbdev_release(struct drm_fb_helper *fb_helper)
|
||||
{
|
||||
drm_fbdev_cleanup(fb_helper);
|
||||
drm_client_release(&fb_helper->client);
|
||||
kfree(fb_helper);
|
||||
}
|
||||
|
||||
/*
|
||||
* fb_ops.fb_destroy is called by the last put_fb_info() call at the end of
|
||||
* unregister_framebuffer() or fb_release().
|
||||
*/
|
||||
static void drm_fbdev_fb_destroy(struct fb_info *info)
|
||||
{
|
||||
drm_fbdev_release(info->par);
|
||||
}
|
||||
|
||||
static int drm_fbdev_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_fb_helper *fb_helper = info->par;
|
||||
|
||||
if (drm_fbdev_use_shadow_fb(fb_helper))
|
||||
return fb_deferred_io_mmap(info, vma);
|
||||
else if (fb_helper->dev->driver->gem_prime_mmap)
|
||||
return fb_helper->dev->driver->gem_prime_mmap(fb_helper->buffer->gem, vma);
|
||||
else
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static bool drm_fbdev_use_iomem(struct fb_info *info)
|
||||
{
|
||||
struct drm_fb_helper *fb_helper = info->par;
|
||||
struct drm_client_buffer *buffer = fb_helper->buffer;
|
||||
|
||||
return !drm_fbdev_use_shadow_fb(fb_helper) && buffer->map.is_iomem;
|
||||
}
|
||||
|
||||
static ssize_t drm_fbdev_fb_read(struct fb_info *info, char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
ssize_t ret;
|
||||
|
||||
if (drm_fbdev_use_iomem(info))
|
||||
ret = drm_fb_helper_cfb_read(info, buf, count, ppos);
|
||||
else
|
||||
ret = drm_fb_helper_sys_read(info, buf, count, ppos);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t drm_fbdev_fb_write(struct fb_info *info, const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
ssize_t ret;
|
||||
|
||||
if (drm_fbdev_use_iomem(info))
|
||||
ret = drm_fb_helper_cfb_write(info, buf, count, ppos);
|
||||
else
|
||||
ret = drm_fb_helper_sys_write(info, buf, count, ppos);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void drm_fbdev_fb_fillrect(struct fb_info *info,
|
||||
const struct fb_fillrect *rect)
|
||||
{
|
||||
if (drm_fbdev_use_iomem(info))
|
||||
drm_fb_helper_cfb_fillrect(info, rect);
|
||||
else
|
||||
drm_fb_helper_sys_fillrect(info, rect);
|
||||
}
|
||||
|
||||
static void drm_fbdev_fb_copyarea(struct fb_info *info,
|
||||
const struct fb_copyarea *area)
|
||||
{
|
||||
if (drm_fbdev_use_iomem(info))
|
||||
drm_fb_helper_cfb_copyarea(info, area);
|
||||
else
|
||||
drm_fb_helper_sys_copyarea(info, area);
|
||||
}
|
||||
|
||||
static void drm_fbdev_fb_imageblit(struct fb_info *info,
|
||||
const struct fb_image *image)
|
||||
{
|
||||
if (drm_fbdev_use_iomem(info))
|
||||
drm_fb_helper_cfb_imageblit(info, image);
|
||||
else
|
||||
drm_fb_helper_sys_imageblit(info, image);
|
||||
}
|
||||
|
||||
static const struct fb_ops drm_fbdev_fb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
DRM_FB_HELPER_DEFAULT_OPS,
|
||||
.fb_open = drm_fbdev_fb_open,
|
||||
.fb_release = drm_fbdev_fb_release,
|
||||
.fb_destroy = drm_fbdev_fb_destroy,
|
||||
.fb_mmap = drm_fbdev_fb_mmap,
|
||||
.fb_read = drm_fbdev_fb_read,
|
||||
.fb_write = drm_fbdev_fb_write,
|
||||
.fb_fillrect = drm_fbdev_fb_fillrect,
|
||||
.fb_copyarea = drm_fbdev_fb_copyarea,
|
||||
.fb_imageblit = drm_fbdev_fb_imageblit,
|
||||
};
|
||||
|
||||
static struct fb_deferred_io drm_fbdev_defio = {
|
||||
.delay = HZ / 20,
|
||||
.deferred_io = drm_fb_helper_deferred_io,
|
||||
};
|
||||
|
||||
/*
|
||||
* This function uses the client API to create a framebuffer backed by a dumb buffer.
|
||||
*/
|
||||
static int drm_fbdev_fb_probe(struct drm_fb_helper *fb_helper,
|
||||
struct drm_fb_helper_surface_size *sizes)
|
||||
{
|
||||
struct drm_client_dev *client = &fb_helper->client;
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
struct drm_client_buffer *buffer;
|
||||
struct drm_framebuffer *fb;
|
||||
struct fb_info *fbi;
|
||||
u32 format;
|
||||
struct iosys_map map;
|
||||
int ret;
|
||||
|
||||
drm_dbg_kms(dev, "surface width(%d), height(%d) and bpp(%d)\n",
|
||||
sizes->surface_width, sizes->surface_height,
|
||||
sizes->surface_bpp);
|
||||
|
||||
format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth);
|
||||
buffer = drm_client_framebuffer_create(client, sizes->surface_width,
|
||||
sizes->surface_height, format);
|
||||
if (IS_ERR(buffer))
|
||||
return PTR_ERR(buffer);
|
||||
|
||||
fb_helper->buffer = buffer;
|
||||
fb_helper->fb = buffer->fb;
|
||||
fb = buffer->fb;
|
||||
|
||||
fbi = drm_fb_helper_alloc_info(fb_helper);
|
||||
if (IS_ERR(fbi))
|
||||
return PTR_ERR(fbi);
|
||||
|
||||
fbi->fbops = &drm_fbdev_fb_ops;
|
||||
fbi->screen_size = sizes->surface_height * fb->pitches[0];
|
||||
fbi->fix.smem_len = fbi->screen_size;
|
||||
fbi->flags = FBINFO_DEFAULT;
|
||||
|
||||
drm_fb_helper_fill_info(fbi, fb_helper, sizes);
|
||||
|
||||
if (drm_fbdev_use_shadow_fb(fb_helper)) {
|
||||
fbi->screen_buffer = vzalloc(fbi->screen_size);
|
||||
if (!fbi->screen_buffer)
|
||||
return -ENOMEM;
|
||||
fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST;
|
||||
|
||||
fbi->fbdefio = &drm_fbdev_defio;
|
||||
fb_deferred_io_init(fbi);
|
||||
} else {
|
||||
/* buffer is mapped for HW framebuffer */
|
||||
ret = drm_client_buffer_vmap(fb_helper->buffer, &map);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (map.is_iomem) {
|
||||
fbi->screen_base = map.vaddr_iomem;
|
||||
} else {
|
||||
fbi->screen_buffer = map.vaddr;
|
||||
fbi->flags |= FBINFO_VIRTFB;
|
||||
}
|
||||
|
||||
/*
|
||||
* Shamelessly leak the physical address to user-space. As
|
||||
* page_to_phys() is undefined for I/O memory, warn in this
|
||||
* case.
|
||||
*/
|
||||
#if IS_ENABLED(CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM)
|
||||
if (fb_helper->hint_leak_smem_start && fbi->fix.smem_start == 0 &&
|
||||
!drm_WARN_ON_ONCE(dev, map.is_iomem))
|
||||
fbi->fix.smem_start =
|
||||
page_to_phys(virt_to_page(fbi->screen_buffer));
|
||||
#endif
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void drm_fbdev_damage_blit_real(struct drm_fb_helper *fb_helper,
|
||||
struct drm_clip_rect *clip,
|
||||
struct iosys_map *dst)
|
||||
{
|
||||
struct drm_framebuffer *fb = fb_helper->fb;
|
||||
size_t offset = clip->y1 * fb->pitches[0];
|
||||
size_t len = clip->x2 - clip->x1;
|
||||
unsigned int y;
|
||||
void *src;
|
||||
|
||||
switch (drm_format_info_bpp(fb->format, 0)) {
|
||||
case 1:
|
||||
offset += clip->x1 / 8;
|
||||
len = DIV_ROUND_UP(len + clip->x1 % 8, 8);
|
||||
break;
|
||||
case 2:
|
||||
offset += clip->x1 / 4;
|
||||
len = DIV_ROUND_UP(len + clip->x1 % 4, 4);
|
||||
break;
|
||||
case 4:
|
||||
offset += clip->x1 / 2;
|
||||
len = DIV_ROUND_UP(len + clip->x1 % 2, 2);
|
||||
break;
|
||||
default:
|
||||
offset += clip->x1 * fb->format->cpp[0];
|
||||
len *= fb->format->cpp[0];
|
||||
break;
|
||||
}
|
||||
|
||||
src = fb_helper->info->screen_buffer + offset;
|
||||
iosys_map_incr(dst, offset); /* go to first pixel within clip rect */
|
||||
|
||||
for (y = clip->y1; y < clip->y2; y++) {
|
||||
iosys_map_memcpy_to(dst, 0, src, len);
|
||||
iosys_map_incr(dst, fb->pitches[0]);
|
||||
src += fb->pitches[0];
|
||||
}
|
||||
}
|
||||
|
||||
static int drm_fbdev_damage_blit(struct drm_fb_helper *fb_helper,
|
||||
struct drm_clip_rect *clip)
|
||||
{
|
||||
struct drm_client_buffer *buffer = fb_helper->buffer;
|
||||
struct iosys_map map, dst;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* We have to pin the client buffer to its current location while
|
||||
* flushing the shadow buffer. In the general case, concurrent
|
||||
* modesetting operations could try to move the buffer and would
|
||||
* fail. The modeset has to be serialized by acquiring the reservation
|
||||
* object of the underlying BO here.
|
||||
*
|
||||
* For fbdev emulation, we only have to protect against fbdev modeset
|
||||
* operations. Nothing else will involve the client buffer's BO. So it
|
||||
* is sufficient to acquire struct drm_fb_helper.lock here.
|
||||
*/
|
||||
mutex_lock(&fb_helper->lock);
|
||||
|
||||
ret = drm_client_buffer_vmap(buffer, &map);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
dst = map;
|
||||
drm_fbdev_damage_blit_real(fb_helper, clip, &dst);
|
||||
|
||||
drm_client_buffer_vunmap(buffer);
|
||||
|
||||
out:
|
||||
mutex_unlock(&fb_helper->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int drm_fbdev_fb_dirty(struct drm_fb_helper *helper, struct drm_clip_rect *clip)
|
||||
{
|
||||
struct drm_device *dev = helper->dev;
|
||||
int ret;
|
||||
|
||||
if (!drm_fbdev_use_shadow_fb(helper))
|
||||
return 0;
|
||||
|
||||
/* Call damage handlers only if necessary */
|
||||
if (!(clip->x1 < clip->x2 && clip->y1 < clip->y2))
|
||||
return 0;
|
||||
|
||||
if (helper->buffer) {
|
||||
ret = drm_fbdev_damage_blit(helper, clip);
|
||||
if (drm_WARN_ONCE(dev, ret, "Damage blitter failed: ret=%d\n", ret))
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (helper->fb->funcs->dirty) {
|
||||
ret = helper->fb->funcs->dirty(helper->fb, NULL, 0, 0, clip, 1);
|
||||
if (drm_WARN_ONCE(dev, ret, "Dirty helper failed: ret=%d\n", ret))
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct drm_fb_helper_funcs drm_fb_helper_generic_funcs = {
|
||||
.fb_probe = drm_fbdev_fb_probe,
|
||||
.fb_dirty = drm_fbdev_fb_dirty,
|
||||
};
|
||||
|
||||
static void drm_fbdev_client_unregister(struct drm_client_dev *client)
|
||||
{
|
||||
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
|
||||
|
||||
if (fb_helper->info)
|
||||
/* drm_fbdev_fb_destroy() takes care of cleanup */
|
||||
drm_fb_helper_unregister_info(fb_helper);
|
||||
else
|
||||
drm_fbdev_release(fb_helper);
|
||||
}
|
||||
|
||||
static int drm_fbdev_client_restore(struct drm_client_dev *client)
|
||||
{
|
||||
drm_fb_helper_lastclose(client->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
|
||||
{
|
||||
struct drm_fb_helper *fb_helper = drm_fb_helper_from_client(client);
|
||||
struct drm_device *dev = client->dev;
|
||||
int ret;
|
||||
|
||||
/* Setup is not retried if it has failed */
|
||||
if (!fb_helper->dev && fb_helper->funcs)
|
||||
return 0;
|
||||
|
||||
if (dev->fb_helper)
|
||||
return drm_fb_helper_hotplug_event(dev->fb_helper);
|
||||
|
||||
if (!dev->mode_config.num_connector) {
|
||||
drm_dbg_kms(dev, "No connectors found, will not create framebuffer!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs);
|
||||
|
||||
ret = drm_fb_helper_init(dev, fb_helper);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (!drm_drv_uses_atomic_modeset(dev))
|
||||
drm_helper_disable_unused_functions(dev);
|
||||
|
||||
ret = drm_fb_helper_initial_config(fb_helper, fb_helper->preferred_bpp);
|
||||
if (ret)
|
||||
goto err_cleanup;
|
||||
|
||||
return 0;
|
||||
|
||||
err_cleanup:
|
||||
drm_fbdev_cleanup(fb_helper);
|
||||
err:
|
||||
fb_helper->dev = NULL;
|
||||
fb_helper->info = NULL;
|
||||
|
||||
drm_err(dev, "fbdev: Failed to setup generic emulation (ret=%d)\n", ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct drm_client_funcs drm_fbdev_client_funcs = {
|
||||
.owner = THIS_MODULE,
|
||||
.unregister = drm_fbdev_client_unregister,
|
||||
.restore = drm_fbdev_client_restore,
|
||||
.hotplug = drm_fbdev_client_hotplug,
|
||||
};
|
||||
|
||||
/**
|
||||
* drm_fbdev_generic_setup() - Setup generic fbdev emulation
|
||||
* @dev: DRM device
|
||||
* @preferred_bpp: Preferred bits per pixel for the device.
|
||||
* @dev->mode_config.preferred_depth is used if this is zero.
|
||||
*
|
||||
* This function sets up generic fbdev emulation for drivers that supports
|
||||
* dumb buffers with a virtual address and that can be mmap'ed.
|
||||
* drm_fbdev_generic_setup() shall be called after the DRM driver registered
|
||||
* the new DRM device with drm_dev_register().
|
||||
*
|
||||
* Restore, hotplug events and teardown are all taken care of. Drivers that do
|
||||
* suspend/resume need to call drm_fb_helper_set_suspend_unlocked() themselves.
|
||||
* Simple drivers might use drm_mode_config_helper_suspend().
|
||||
*
|
||||
* Drivers that set the dirty callback on their framebuffer will get a shadow
|
||||
* fbdev buffer that is blitted onto the real buffer. This is done in order to
|
||||
* make deferred I/O work with all kinds of buffers. A shadow buffer can be
|
||||
* requested explicitly by setting struct drm_mode_config.prefer_shadow or
|
||||
* struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is
|
||||
* required to use generic fbdev emulation with SHMEM helpers.
|
||||
*
|
||||
* This function is safe to call even when there are no connectors present.
|
||||
* Setup will be retried on the next hotplug event.
|
||||
*
|
||||
* The fbdev is destroyed by drm_dev_unregister().
|
||||
*/
|
||||
void drm_fbdev_generic_setup(struct drm_device *dev,
|
||||
unsigned int preferred_bpp)
|
||||
{
|
||||
struct drm_fb_helper *fb_helper;
|
||||
int ret;
|
||||
|
||||
drm_WARN(dev, !dev->registered, "Device has not been registered.\n");
|
||||
drm_WARN(dev, dev->fb_helper, "fb_helper is already set!\n");
|
||||
|
||||
fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
|
||||
if (!fb_helper)
|
||||
return;
|
||||
|
||||
ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
|
||||
if (ret) {
|
||||
kfree(fb_helper);
|
||||
drm_err(dev, "Failed to register client: %d\n", ret);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: This mixes up depth with bpp, which results in a glorious
|
||||
* mess, resulting in some drivers picking wrong fbdev defaults and
|
||||
* others wrong preferred_depth defaults.
|
||||
*/
|
||||
if (!preferred_bpp)
|
||||
preferred_bpp = dev->mode_config.preferred_depth;
|
||||
if (!preferred_bpp)
|
||||
preferred_bpp = 32;
|
||||
fb_helper->preferred_bpp = preferred_bpp;
|
||||
|
||||
ret = drm_fbdev_client_hotplug(&fb_helper->client);
|
||||
if (ret)
|
||||
drm_dbg_kms(dev, "client hotplug ret=%d\n", ret);
|
||||
|
||||
drm_client_register(&fb_helper->client);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fbdev_generic_setup);
|
@ -360,48 +360,43 @@ void drm_gem_reset_shadow_plane(struct drm_plane *plane)
|
||||
EXPORT_SYMBOL(drm_gem_reset_shadow_plane);
|
||||
|
||||
/**
|
||||
* drm_gem_prepare_shadow_fb - prepares shadow framebuffers
|
||||
* drm_gem_begin_shadow_fb_access - prepares shadow framebuffers for CPU access
|
||||
* @plane: the plane
|
||||
* @plane_state: the plane state of type struct drm_shadow_plane_state
|
||||
*
|
||||
* This function implements struct &drm_plane_helper_funcs.prepare_fb. It
|
||||
* This function implements struct &drm_plane_helper_funcs.begin_fb_access. It
|
||||
* maps all buffer objects of the plane's framebuffer into kernel address
|
||||
* space and stores them in &struct drm_shadow_plane_state.map. The
|
||||
* framebuffer will be synchronized as part of the atomic commit.
|
||||
* space and stores them in struct &drm_shadow_plane_state.map. The first data
|
||||
* bytes are available in struct &drm_shadow_plane_state.data.
|
||||
*
|
||||
* See drm_gem_cleanup_shadow_fb() for cleanup.
|
||||
* See drm_gem_end_shadow_fb_access() for cleanup.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, or a negative errno code otherwise.
|
||||
*/
|
||||
int drm_gem_prepare_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state)
|
||||
int drm_gem_begin_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state)
|
||||
{
|
||||
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
|
||||
struct drm_framebuffer *fb = plane_state->fb;
|
||||
int ret;
|
||||
|
||||
if (!fb)
|
||||
return 0;
|
||||
|
||||
ret = drm_gem_plane_helper_prepare_fb(plane, plane_state);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return drm_gem_fb_vmap(fb, shadow_plane_state->map, shadow_plane_state->data);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_prepare_shadow_fb);
|
||||
EXPORT_SYMBOL(drm_gem_begin_shadow_fb_access);
|
||||
|
||||
/**
|
||||
* drm_gem_cleanup_shadow_fb - releases shadow framebuffers
|
||||
* drm_gem_end_shadow_fb_access - releases shadow framebuffers from CPU access
|
||||
* @plane: the plane
|
||||
* @plane_state: the plane state of type struct drm_shadow_plane_state
|
||||
*
|
||||
* This function implements struct &drm_plane_helper_funcs.cleanup_fb.
|
||||
* This function unmaps all buffer objects of the plane's framebuffer.
|
||||
* This function implements struct &drm_plane_helper_funcs.end_fb_access. It
|
||||
* undoes all effects of drm_gem_begin_shadow_fb_access() in reverse order.
|
||||
*
|
||||
* See drm_gem_prepare_shadow_fb() for more information.
|
||||
* See drm_gem_begin_shadow_fb_access() for more information.
|
||||
*/
|
||||
void drm_gem_cleanup_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state)
|
||||
void drm_gem_end_shadow_fb_access(struct drm_plane *plane, struct drm_plane_state *plane_state)
|
||||
{
|
||||
struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
|
||||
struct drm_framebuffer *fb = plane_state->fb;
|
||||
@ -411,46 +406,45 @@ void drm_gem_cleanup_shadow_fb(struct drm_plane *plane, struct drm_plane_state *
|
||||
|
||||
drm_gem_fb_vunmap(fb, shadow_plane_state->map);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_cleanup_shadow_fb);
|
||||
EXPORT_SYMBOL(drm_gem_end_shadow_fb_access);
|
||||
|
||||
/**
|
||||
* drm_gem_simple_kms_prepare_shadow_fb - prepares shadow framebuffers
|
||||
* drm_gem_simple_kms_begin_shadow_fb_access - prepares shadow framebuffers for CPU access
|
||||
* @pipe: the simple display pipe
|
||||
* @plane_state: the plane state of type struct drm_shadow_plane_state
|
||||
*
|
||||
* This function implements struct drm_simple_display_funcs.prepare_fb. It
|
||||
* maps all buffer objects of the plane's framebuffer into kernel address
|
||||
* space and stores them in struct drm_shadow_plane_state.map. The
|
||||
* framebuffer will be synchronized as part of the atomic commit.
|
||||
* This function implements struct drm_simple_display_funcs.begin_fb_access.
|
||||
*
|
||||
* See drm_gem_simple_kms_cleanup_shadow_fb() for cleanup.
|
||||
* See drm_gem_begin_shadow_fb_access() for details and
|
||||
* drm_gem_simple_kms_cleanup_shadow_fb() for cleanup.
|
||||
*
|
||||
* Returns:
|
||||
* 0 on success, or a negative errno code otherwise.
|
||||
*/
|
||||
int drm_gem_simple_kms_prepare_shadow_fb(struct drm_simple_display_pipe *pipe,
|
||||
int drm_gem_simple_kms_begin_shadow_fb_access(struct drm_simple_display_pipe *pipe,
|
||||
struct drm_plane_state *plane_state)
|
||||
{
|
||||
return drm_gem_prepare_shadow_fb(&pipe->plane, plane_state);
|
||||
return drm_gem_begin_shadow_fb_access(&pipe->plane, plane_state);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_simple_kms_prepare_shadow_fb);
|
||||
EXPORT_SYMBOL(drm_gem_simple_kms_begin_shadow_fb_access);
|
||||
|
||||
/**
|
||||
* drm_gem_simple_kms_cleanup_shadow_fb - releases shadow framebuffers
|
||||
* drm_gem_simple_kms_end_shadow_fb_access - releases shadow framebuffers from CPU access
|
||||
* @pipe: the simple display pipe
|
||||
* @plane_state: the plane state of type struct drm_shadow_plane_state
|
||||
*
|
||||
* This function implements struct drm_simple_display_funcs.cleanup_fb.
|
||||
* This function unmaps all buffer objects of the plane's framebuffer.
|
||||
* This function implements struct drm_simple_display_funcs.end_fb_access.
|
||||
* It undoes all effects of drm_gem_simple_kms_begin_shadow_fb_access() in
|
||||
* reverse order.
|
||||
*
|
||||
* See drm_gem_simple_kms_prepare_shadow_fb().
|
||||
* See drm_gem_simple_kms_begin_shadow_fb_access().
|
||||
*/
|
||||
void drm_gem_simple_kms_cleanup_shadow_fb(struct drm_simple_display_pipe *pipe,
|
||||
void drm_gem_simple_kms_end_shadow_fb_access(struct drm_simple_display_pipe *pipe,
|
||||
struct drm_plane_state *plane_state)
|
||||
{
|
||||
drm_gem_cleanup_shadow_fb(&pipe->plane, plane_state);
|
||||
drm_gem_end_shadow_fb_access(&pipe->plane, plane_state);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_gem_simple_kms_cleanup_shadow_fb);
|
||||
EXPORT_SYMBOL(drm_gem_simple_kms_end_shadow_fb_access);
|
||||
|
||||
/**
|
||||
* drm_gem_simple_kms_reset_shadow_plane - resets a shadow-buffered plane
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <drm/drm_damage_helper.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_framebuffer.h>
|
||||
#include <drm/drm_gem.h>
|
||||
|
@ -36,7 +36,6 @@
|
||||
#include <drm/drm_client.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_modeset_helper_vtables.h>
|
||||
#include <drm/drm_print.h>
|
||||
|
@ -285,6 +285,30 @@ static void drm_simple_kms_plane_cleanup_fb(struct drm_plane *plane,
|
||||
pipe->funcs->cleanup_fb(pipe, state);
|
||||
}
|
||||
|
||||
static int drm_simple_kms_plane_begin_fb_access(struct drm_plane *plane,
|
||||
struct drm_plane_state *new_plane_state)
|
||||
{
|
||||
struct drm_simple_display_pipe *pipe;
|
||||
|
||||
pipe = container_of(plane, struct drm_simple_display_pipe, plane);
|
||||
if (!pipe->funcs || !pipe->funcs->begin_fb_access)
|
||||
return 0;
|
||||
|
||||
return pipe->funcs->begin_fb_access(pipe, new_plane_state);
|
||||
}
|
||||
|
||||
static void drm_simple_kms_plane_end_fb_access(struct drm_plane *plane,
|
||||
struct drm_plane_state *new_plane_state)
|
||||
{
|
||||
struct drm_simple_display_pipe *pipe;
|
||||
|
||||
pipe = container_of(plane, struct drm_simple_display_pipe, plane);
|
||||
if (!pipe->funcs || !pipe->funcs->end_fb_access)
|
||||
return;
|
||||
|
||||
pipe->funcs->end_fb_access(pipe, new_plane_state);
|
||||
}
|
||||
|
||||
static bool drm_simple_kms_format_mod_supported(struct drm_plane *plane,
|
||||
uint32_t format,
|
||||
uint64_t modifier)
|
||||
@ -295,6 +319,8 @@ static bool drm_simple_kms_format_mod_supported(struct drm_plane *plane,
|
||||
static const struct drm_plane_helper_funcs drm_simple_kms_plane_helper_funcs = {
|
||||
.prepare_fb = drm_simple_kms_plane_prepare_fb,
|
||||
.cleanup_fb = drm_simple_kms_plane_cleanup_fb,
|
||||
.begin_fb_access = drm_simple_kms_plane_begin_fb_access,
|
||||
.end_fb_access = drm_simple_kms_plane_end_fb_access,
|
||||
.atomic_check = drm_simple_kms_plane_atomic_check,
|
||||
.atomic_update = drm_simple_kms_plane_atomic_update,
|
||||
};
|
||||
|
@ -6,13 +6,14 @@
|
||||
#ifndef __ETNAVIV_DRV_H__
|
||||
#define __ETNAVIV_DRV_H__
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/mm_types.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/time64.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_gem.h>
|
||||
#include <drm/etnaviv_drm.h>
|
||||
#include <drm/gpu_scheduler.h>
|
||||
|
@ -49,6 +49,8 @@ static const struct fb_ops exynos_drm_fb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
DRM_FB_HELPER_DEFAULT_OPS,
|
||||
.fb_mmap = exynos_drm_fb_mmap,
|
||||
.fb_read = drm_fb_helper_cfb_read,
|
||||
.fb_write = drm_fb_helper_cfb_write,
|
||||
.fb_fillrect = drm_fb_helper_cfb_fillrect,
|
||||
.fb_copyarea = drm_fb_helper_cfb_copyarea,
|
||||
.fb_imageblit = drm_fb_helper_cfb_imageblit,
|
||||
@ -63,7 +65,7 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
|
||||
unsigned int size = fb->width * fb->height * fb->format->cpp[0];
|
||||
unsigned long offset;
|
||||
|
||||
fbi = drm_fb_helper_alloc_fbi(helper);
|
||||
fbi = drm_fb_helper_alloc_info(helper);
|
||||
if (IS_ERR(fbi)) {
|
||||
DRM_DEV_ERROR(to_dma_dev(helper->dev),
|
||||
"failed to allocate fb info.\n");
|
||||
@ -201,7 +203,7 @@ static void exynos_drm_fbdev_destroy(struct drm_device *dev,
|
||||
drm_framebuffer_remove(fb);
|
||||
}
|
||||
|
||||
drm_fb_helper_unregister_fbi(fb_helper);
|
||||
drm_fb_helper_unregister_info(fb_helper);
|
||||
|
||||
drm_fb_helper_fini(fb_helper);
|
||||
}
|
||||
|
@ -20,7 +20,7 @@
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
#include <drm/drm_modeset_helper.h>
|
||||
#include <drm/drm_module.h>
|
||||
|
@ -147,6 +147,8 @@ static const struct fb_ops psbfb_unaccel_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
DRM_FB_HELPER_DEFAULT_OPS,
|
||||
.fb_setcolreg = psbfb_setcolreg,
|
||||
.fb_read = drm_fb_helper_cfb_read,
|
||||
.fb_write = drm_fb_helper_cfb_write,
|
||||
.fb_fillrect = drm_fb_helper_cfb_fillrect,
|
||||
.fb_copyarea = drm_fb_helper_cfb_copyarea,
|
||||
.fb_imageblit = drm_fb_helper_cfb_imageblit,
|
||||
@ -268,7 +270,7 @@ static int psbfb_create(struct drm_fb_helper *fb_helper,
|
||||
|
||||
memset(dev_priv->vram_addr + backing->offset, 0, size);
|
||||
|
||||
info = drm_fb_helper_alloc_fbi(fb_helper);
|
||||
info = drm_fb_helper_alloc_info(fb_helper);
|
||||
if (IS_ERR(info)) {
|
||||
ret = PTR_ERR(info);
|
||||
goto err_drm_gem_object_put;
|
||||
@ -383,7 +385,7 @@ static int psb_fbdev_destroy(struct drm_device *dev,
|
||||
{
|
||||
struct drm_framebuffer *fb = fb_helper->fb;
|
||||
|
||||
drm_fb_helper_unregister_fbi(fb_helper);
|
||||
drm_fb_helper_unregister_info(fb_helper);
|
||||
|
||||
drm_fb_helper_fini(fb_helper);
|
||||
drm_framebuffer_unregister_private(fb);
|
||||
|
@ -18,7 +18,7 @@
|
||||
#include <drm/drm_damage_helper.h>
|
||||
#include <drm/drm_debugfs.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_gem_atomic_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <drm/drm_aperture.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_gem_vram_helper.h>
|
||||
#include <drm/drm_managed.h>
|
||||
|
@ -19,7 +19,6 @@
|
||||
#include <linux/i2c.h>
|
||||
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_framebuffer.h>
|
||||
|
||||
struct hibmc_connector {
|
||||
|
@ -11,6 +11,8 @@
|
||||
* Jianhua Li <lijianhua@huawei.com>
|
||||
*/
|
||||
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/drm_print.h>
|
||||
|
@ -19,7 +19,7 @@
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_module.h>
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include <drm/drm_aperture.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_gem_shmem_helper.h>
|
||||
#include <drm/drm_simple_kms_helper.h>
|
||||
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include <drm/drm_damage_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_format_helper.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_framebuffer.h>
|
||||
|
@ -124,6 +124,8 @@ static const struct fb_ops intelfb_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
DRM_FB_HELPER_DEFAULT_OPS,
|
||||
.fb_set_par = intel_fbdev_set_par,
|
||||
.fb_read = drm_fb_helper_cfb_read,
|
||||
.fb_write = drm_fb_helper_cfb_write,
|
||||
.fb_fillrect = drm_fb_helper_cfb_fillrect,
|
||||
.fb_copyarea = drm_fb_helper_cfb_copyarea,
|
||||
.fb_imageblit = drm_fb_helper_cfb_imageblit,
|
||||
@ -254,7 +256,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
info = drm_fb_helper_alloc_fbi(helper);
|
||||
info = drm_fb_helper_alloc_info(helper);
|
||||
if (IS_ERR(info)) {
|
||||
drm_err(&dev_priv->drm, "Failed to allocate fb_info (%pe)\n", info);
|
||||
ret = PTR_ERR(info);
|
||||
@ -584,7 +586,7 @@ void intel_fbdev_unregister(struct drm_i915_private *dev_priv)
|
||||
if (!current_is_async())
|
||||
intel_fbdev_sync(ifbdev);
|
||||
|
||||
drm_fb_helper_unregister_fbi(&ifbdev->helper);
|
||||
drm_fb_helper_unregister_info(&ifbdev->helper);
|
||||
}
|
||||
|
||||
void intel_fbdev_fini(struct drm_i915_private *dev_priv)
|
||||
@ -627,7 +629,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
|
||||
if (!ifbdev || !ifbdev->vma)
|
||||
goto set_suspend;
|
||||
|
||||
info = ifbdev->helper.fbdev;
|
||||
info = ifbdev->helper.info;
|
||||
|
||||
if (synchronous) {
|
||||
/* Flush any pending work to turn the console on, and then
|
||||
|
@ -7,7 +7,7 @@
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_bridge_connector.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_of.h>
|
||||
@ -21,7 +21,6 @@ DEFINE_DRM_GEM_DMA_FOPS(dcss_cma_fops);
|
||||
|
||||
static const struct drm_mode_config_funcs dcss_drm_mode_config_funcs = {
|
||||
.fb_create = drm_gem_fb_create,
|
||||
.output_poll_changed = drm_fb_helper_output_poll_changed,
|
||||
.atomic_check = drm_atomic_helper_check,
|
||||
.atomic_commit = drm_atomic_helper_commit,
|
||||
};
|
||||
|
@ -16,7 +16,7 @@
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_managed.h>
|
||||
|
@ -7,6 +7,7 @@
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/component.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/media-bus-format.h>
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
|
||||
@ -23,7 +24,6 @@
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_bridge.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_managed.h>
|
||||
#include <drm/drm_of.h>
|
||||
#include <drm/drm_panel.h>
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_managed.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/drm_simple_kms_helper.h>
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <linux/component.h>
|
||||
#include <linux/media-bus-format.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/videodev2.h>
|
||||
|
||||
@ -16,7 +17,6 @@
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_bridge.h>
|
||||
#include <drm/drm_edid.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_managed.h>
|
||||
#include <drm/drm_of.h>
|
||||
#include <drm/drm_panel.h>
|
||||
|
@ -32,7 +32,7 @@
|
||||
#include <drm/drm_encoder.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
#include <drm/drm_fb_dma_helper.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_framebuffer.h>
|
||||
#include <drm/drm_gem_atomic_helper.h>
|
||||
@ -1018,7 +1018,6 @@ static const struct drm_bridge_funcs ingenic_drm_bridge_funcs = {
|
||||
|
||||
static const struct drm_mode_config_funcs ingenic_drm_mode_config_funcs = {
|
||||
.fb_create = ingenic_drm_gem_fb_create,
|
||||
.output_poll_changed = drm_fb_helper_output_poll_changed,
|
||||
.atomic_check = drm_atomic_helper_check,
|
||||
.atomic_commit = drm_atomic_helper_commit,
|
||||
};
|
||||
@ -1629,7 +1628,11 @@ static int ingenic_drm_init(void)
|
||||
return err;
|
||||
}
|
||||
|
||||
return platform_driver_register(&ingenic_drm_driver);
|
||||
err = platform_driver_register(&ingenic_drm_driver);
|
||||
if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU) && err)
|
||||
platform_driver_unregister(ingenic_ipu_driver_ptr);
|
||||
|
||||
return err;
|
||||
}
|
||||
module_init(ingenic_drm_init);
|
||||
|
||||
|
@ -15,7 +15,7 @@
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_module.h>
|
||||
|
@ -9,7 +9,6 @@
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_fb_dma_helper.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_framebuffer.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
|
@ -17,7 +17,7 @@
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
|
@ -10,7 +10,6 @@
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_mode_config.h>
|
||||
@ -26,7 +25,6 @@
|
||||
|
||||
static const struct drm_mode_config_funcs logicvc_mode_config_funcs = {
|
||||
.fb_create = drm_gem_fb_create,
|
||||
.output_poll_changed = drm_fb_helper_output_poll_changed,
|
||||
.atomic_check = drm_atomic_helper_check,
|
||||
.atomic_commit = drm_atomic_helper_commit,
|
||||
};
|
||||
|
@ -69,7 +69,7 @@
|
||||
#include <drm/drm_bridge.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_dma_helper.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_gem.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
@ -203,7 +203,6 @@ DEFINE_DRM_GEM_DMA_FOPS(drm_fops);
|
||||
static const struct drm_driver mcde_drm_driver = {
|
||||
.driver_features =
|
||||
DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC,
|
||||
.lastclose = drm_fb_helper_lastclose,
|
||||
.ioctls = NULL,
|
||||
.fops = &drm_fops,
|
||||
.name = "mcde",
|
||||
|
@ -17,7 +17,7 @@
|
||||
#include <drm/drm_atomic.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_gem.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
|
@ -18,7 +18,7 @@
|
||||
#include <drm/drm_aperture.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_modeset_helper_vtables.h>
|
||||
|
@ -116,7 +116,8 @@ static int meson_encoder_cvbs_get_modes(struct drm_bridge *bridge,
|
||||
return i;
|
||||
}
|
||||
|
||||
static int meson_encoder_cvbs_mode_valid(struct drm_bridge *bridge,
|
||||
static enum drm_mode_status
|
||||
meson_encoder_cvbs_mode_valid(struct drm_bridge *bridge,
|
||||
const struct drm_display_info *display_info,
|
||||
const struct drm_display_mode *mode)
|
||||
{
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
#include <drm/drm_aperture.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_file.h>
|
||||
#include <drm/drm_ioctl.h>
|
||||
#include <drm/drm_managed.h>
|
||||
|
@ -18,7 +18,6 @@
|
||||
#include <drm/drm_connector.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_encoder.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_gem.h>
|
||||
#include <drm/drm_gem_shmem_helper.h>
|
||||
#include <drm/drm_plane.h>
|
||||
|
@ -93,7 +93,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
fbi = drm_fb_helper_alloc_fbi(helper);
|
||||
fbi = drm_fb_helper_alloc_info(helper);
|
||||
if (IS_ERR(fbi)) {
|
||||
DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n");
|
||||
ret = PTR_ERR(fbi);
|
||||
@ -182,7 +182,7 @@ void msm_fbdev_free(struct drm_device *dev)
|
||||
|
||||
DBG();
|
||||
|
||||
drm_fb_helper_unregister_fbi(helper);
|
||||
drm_fb_helper_unregister_info(helper);
|
||||
|
||||
drm_fb_helper_fini(helper);
|
||||
|
||||
|
@ -16,7 +16,7 @@
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_bridge.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
#include <drm/drm_mode_config.h>
|
||||
|
@ -5,6 +5,7 @@
|
||||
* This code is based on drivers/gpu/drm/mxsfb/mxsfb*
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
@ -332,6 +333,18 @@ static void lcdif_enable_controller(struct lcdif_drm_private *lcdif)
|
||||
{
|
||||
u32 reg;
|
||||
|
||||
/* Set FIFO Panic watermarks, low 1/3, high 2/3 . */
|
||||
writel(FIELD_PREP(PANIC0_THRES_LOW_MASK, 1 * PANIC0_THRES_MAX / 3) |
|
||||
FIELD_PREP(PANIC0_THRES_HIGH_MASK, 2 * PANIC0_THRES_MAX / 3),
|
||||
lcdif->base + LCDC_V8_PANIC0_THRES);
|
||||
|
||||
/*
|
||||
* Enable FIFO Panic, this does not generate interrupt, but
|
||||
* boosts NoC priority based on FIFO Panic watermarks.
|
||||
*/
|
||||
writel(INT_ENABLE_D1_PLANE_PANIC_EN,
|
||||
lcdif->base + LCDC_V8_INT_ENABLE_D1);
|
||||
|
||||
reg = readl(lcdif->base + LCDC_V8_DISP_PARA);
|
||||
reg |= DISP_PARA_DISP_ON;
|
||||
writel(reg, lcdif->base + LCDC_V8_DISP_PARA);
|
||||
@ -359,6 +372,9 @@ static void lcdif_disable_controller(struct lcdif_drm_private *lcdif)
|
||||
reg = readl(lcdif->base + LCDC_V8_DISP_PARA);
|
||||
reg &= ~DISP_PARA_DISP_ON;
|
||||
writel(reg, lcdif->base + LCDC_V8_DISP_PARA);
|
||||
|
||||
/* Disable FIFO Panic NoC priority booster. */
|
||||
writel(0, lcdif->base + LCDC_V8_INT_ENABLE_D1);
|
||||
}
|
||||
|
||||
static void lcdif_reset_block(struct lcdif_drm_private *lcdif)
|
||||
|
@ -255,6 +255,7 @@
|
||||
|
||||
#define PANIC0_THRES_LOW_MASK GENMASK(24, 16)
|
||||
#define PANIC0_THRES_HIGH_MASK GENMASK(8, 0)
|
||||
#define PANIC0_THRES_MAX 511
|
||||
|
||||
#define LCDIF_MIN_XRES 120
|
||||
#define LCDIF_MIN_YRES 120
|
||||
|
@ -20,7 +20,7 @@
|
||||
#include <drm/drm_bridge.h>
|
||||
#include <drm/drm_connector.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_gem_ttm_helper.h>
|
||||
#include <drm/drm_ioctl.h>
|
||||
#include <drm/drm_vblank.h>
|
||||
|
613
drivers/gpu/drm/nouveau/nouveau_fbcon.c
Normal file
613
drivers/gpu/drm/nouveau/nouveau_fbcon.c
Normal file
@ -0,0 +1,613 @@
|
||||
/*
|
||||
* Copyright © 2007 David Airlie
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* David Airlie
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/tty.h>
|
||||
#include <linux/sysrq.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/screen_info.h>
|
||||
#include <linux/vga_switcheroo.h>
|
||||
#include <linux/console.h>
|
||||
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_crtc_helper.h>
|
||||
#include <drm/drm_probe_helper.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_atomic.h>
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_gem.h"
|
||||
#include "nouveau_bo.h"
|
||||
#include "nouveau_fbcon.h"
|
||||
#include "nouveau_chan.h"
|
||||
#include "nouveau_vmm.h"
|
||||
|
||||
#include "nouveau_crtc.h"
|
||||
|
||||
MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
|
||||
int nouveau_nofbaccel = 0;
|
||||
module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
|
||||
|
||||
MODULE_PARM_DESC(fbcon_bpp, "fbcon bits-per-pixel (default: auto)");
|
||||
static int nouveau_fbcon_bpp;
|
||||
module_param_named(fbcon_bpp, nouveau_fbcon_bpp, int, 0400);
|
||||
|
||||
static void
|
||||
nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
||||
{
|
||||
struct nouveau_fbdev *fbcon = info->par;
|
||||
struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
|
||||
struct nvif_device *device = &drm->client.device;
|
||||
int ret;
|
||||
|
||||
if (info->state != FBINFO_STATE_RUNNING)
|
||||
return;
|
||||
|
||||
ret = -ENODEV;
|
||||
if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
|
||||
mutex_trylock(&drm->client.mutex)) {
|
||||
if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
|
||||
ret = nv04_fbcon_fillrect(info, rect);
|
||||
else
|
||||
if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
|
||||
ret = nv50_fbcon_fillrect(info, rect);
|
||||
else
|
||||
ret = nvc0_fbcon_fillrect(info, rect);
|
||||
mutex_unlock(&drm->client.mutex);
|
||||
}
|
||||
|
||||
if (ret == 0)
|
||||
return;
|
||||
|
||||
if (ret != -ENODEV)
|
||||
nouveau_fbcon_gpu_lockup(info);
|
||||
drm_fb_helper_cfb_fillrect(info, rect);
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
|
||||
{
|
||||
struct nouveau_fbdev *fbcon = info->par;
|
||||
struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
|
||||
struct nvif_device *device = &drm->client.device;
|
||||
int ret;
|
||||
|
||||
if (info->state != FBINFO_STATE_RUNNING)
|
||||
return;
|
||||
|
||||
ret = -ENODEV;
|
||||
if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
|
||||
mutex_trylock(&drm->client.mutex)) {
|
||||
if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
|
||||
ret = nv04_fbcon_copyarea(info, image);
|
||||
else
|
||||
if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
|
||||
ret = nv50_fbcon_copyarea(info, image);
|
||||
else
|
||||
ret = nvc0_fbcon_copyarea(info, image);
|
||||
mutex_unlock(&drm->client.mutex);
|
||||
}
|
||||
|
||||
if (ret == 0)
|
||||
return;
|
||||
|
||||
if (ret != -ENODEV)
|
||||
nouveau_fbcon_gpu_lockup(info);
|
||||
drm_fb_helper_cfb_copyarea(info, image);
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||
{
|
||||
struct nouveau_fbdev *fbcon = info->par;
|
||||
struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
|
||||
struct nvif_device *device = &drm->client.device;
|
||||
int ret;
|
||||
|
||||
if (info->state != FBINFO_STATE_RUNNING)
|
||||
return;
|
||||
|
||||
ret = -ENODEV;
|
||||
if (!in_interrupt() && !(info->flags & FBINFO_HWACCEL_DISABLED) &&
|
||||
mutex_trylock(&drm->client.mutex)) {
|
||||
if (device->info.family < NV_DEVICE_INFO_V0_TESLA)
|
||||
ret = nv04_fbcon_imageblit(info, image);
|
||||
else
|
||||
if (device->info.family < NV_DEVICE_INFO_V0_FERMI)
|
||||
ret = nv50_fbcon_imageblit(info, image);
|
||||
else
|
||||
ret = nvc0_fbcon_imageblit(info, image);
|
||||
mutex_unlock(&drm->client.mutex);
|
||||
}
|
||||
|
||||
if (ret == 0)
|
||||
return;
|
||||
|
||||
if (ret != -ENODEV)
|
||||
nouveau_fbcon_gpu_lockup(info);
|
||||
drm_fb_helper_cfb_imageblit(info, image);
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_fbcon_sync(struct fb_info *info)
|
||||
{
|
||||
struct nouveau_fbdev *fbcon = info->par;
|
||||
struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
|
||||
struct nouveau_channel *chan = drm->channel;
|
||||
int ret;
|
||||
|
||||
if (!chan || !chan->accel_done || in_interrupt() ||
|
||||
info->state != FBINFO_STATE_RUNNING ||
|
||||
info->flags & FBINFO_HWACCEL_DISABLED)
|
||||
return 0;
|
||||
|
||||
if (!mutex_trylock(&drm->client.mutex))
|
||||
return 0;
|
||||
|
||||
ret = nouveau_channel_idle(chan);
|
||||
mutex_unlock(&drm->client.mutex);
|
||||
if (ret) {
|
||||
nouveau_fbcon_gpu_lockup(info);
|
||||
return 0;
|
||||
}
|
||||
|
||||
chan->accel_done = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_fbcon_open(struct fb_info *info, int user)
|
||||
{
|
||||
struct nouveau_fbdev *fbcon = info->par;
|
||||
struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
|
||||
int ret = pm_runtime_get_sync(drm->dev->dev);
|
||||
if (ret < 0 && ret != -EACCES) {
|
||||
pm_runtime_put(drm->dev->dev);
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_fbcon_release(struct fb_info *info, int user)
|
||||
{
|
||||
struct nouveau_fbdev *fbcon = info->par;
|
||||
struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
|
||||
pm_runtime_put(drm->dev->dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct fb_ops nouveau_fbcon_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
DRM_FB_HELPER_DEFAULT_OPS,
|
||||
.fb_open = nouveau_fbcon_open,
|
||||
.fb_release = nouveau_fbcon_release,
|
||||
.fb_fillrect = nouveau_fbcon_fillrect,
|
||||
.fb_copyarea = nouveau_fbcon_copyarea,
|
||||
.fb_imageblit = nouveau_fbcon_imageblit,
|
||||
.fb_sync = nouveau_fbcon_sync,
|
||||
};
|
||||
|
||||
static const struct fb_ops nouveau_fbcon_sw_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
DRM_FB_HELPER_DEFAULT_OPS,
|
||||
.fb_open = nouveau_fbcon_open,
|
||||
.fb_release = nouveau_fbcon_release,
|
||||
.fb_fillrect = drm_fb_helper_cfb_fillrect,
|
||||
.fb_copyarea = drm_fb_helper_cfb_copyarea,
|
||||
.fb_imageblit = drm_fb_helper_cfb_imageblit,
|
||||
};
|
||||
|
||||
void
|
||||
nouveau_fbcon_accel_save_disable(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
if (drm->fbcon && drm->fbcon->helper.info) {
|
||||
drm->fbcon->saved_flags = drm->fbcon->helper.info->flags;
|
||||
drm->fbcon->helper.info->flags |= FBINFO_HWACCEL_DISABLED;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_fbcon_accel_restore(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
if (drm->fbcon && drm->fbcon->helper.info)
|
||||
drm->fbcon->helper.info->flags = drm->fbcon->saved_flags;
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_fbcon_accel_fini(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nouveau_fbdev *fbcon = drm->fbcon;
|
||||
if (fbcon && drm->channel) {
|
||||
console_lock();
|
||||
if (fbcon->helper.info)
|
||||
fbcon->helper.info->flags |= FBINFO_HWACCEL_DISABLED;
|
||||
console_unlock();
|
||||
nouveau_channel_idle(drm->channel);
|
||||
nvif_object_dtor(&fbcon->twod);
|
||||
nvif_object_dtor(&fbcon->blit);
|
||||
nvif_object_dtor(&fbcon->gdi);
|
||||
nvif_object_dtor(&fbcon->patt);
|
||||
nvif_object_dtor(&fbcon->rop);
|
||||
nvif_object_dtor(&fbcon->clip);
|
||||
nvif_object_dtor(&fbcon->surf2d);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_fbcon_accel_init(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nouveau_fbdev *fbcon = drm->fbcon;
|
||||
struct fb_info *info = fbcon->helper.info;
|
||||
int ret;
|
||||
|
||||
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA)
|
||||
ret = nv04_fbcon_accel_init(info);
|
||||
else
|
||||
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
|
||||
ret = nv50_fbcon_accel_init(info);
|
||||
else
|
||||
ret = nvc0_fbcon_accel_init(info);
|
||||
|
||||
if (ret == 0)
|
||||
info->fbops = &nouveau_fbcon_ops;
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_fbcon_zfill(struct drm_device *dev, struct nouveau_fbdev *fbcon)
|
||||
{
|
||||
struct fb_info *info = fbcon->helper.info;
|
||||
struct fb_fillrect rect;
|
||||
|
||||
/* Clear the entire fbcon. The drm will program every connector
|
||||
* with it's preferred mode. If the sizes differ, one display will
|
||||
* quite likely have garbage around the console.
|
||||
*/
|
||||
rect.dx = rect.dy = 0;
|
||||
rect.width = info->var.xres_virtual;
|
||||
rect.height = info->var.yres_virtual;
|
||||
rect.color = 0;
|
||||
rect.rop = ROP_COPY;
|
||||
info->fbops->fb_fillrect(info, &rect);
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_fbcon_create(struct drm_fb_helper *helper,
|
||||
struct drm_fb_helper_surface_size *sizes)
|
||||
{
|
||||
struct nouveau_fbdev *fbcon =
|
||||
container_of(helper, struct nouveau_fbdev, helper);
|
||||
struct drm_device *dev = fbcon->helper.dev;
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvif_device *device = &drm->client.device;
|
||||
struct fb_info *info;
|
||||
struct drm_framebuffer *fb;
|
||||
struct nouveau_channel *chan;
|
||||
struct nouveau_bo *nvbo;
|
||||
struct drm_mode_fb_cmd2 mode_cmd = {};
|
||||
int ret;
|
||||
|
||||
mode_cmd.width = sizes->surface_width;
|
||||
mode_cmd.height = sizes->surface_height;
|
||||
|
||||
mode_cmd.pitches[0] = mode_cmd.width * (sizes->surface_bpp >> 3);
|
||||
mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0], 256);
|
||||
|
||||
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
|
||||
sizes->surface_depth);
|
||||
|
||||
ret = nouveau_gem_new(&drm->client, mode_cmd.pitches[0] *
|
||||
mode_cmd.height, 0, NOUVEAU_GEM_DOMAIN_VRAM,
|
||||
0, 0x0000, &nvbo);
|
||||
if (ret) {
|
||||
NV_ERROR(drm, "failed to allocate framebuffer\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = nouveau_framebuffer_new(dev, &mode_cmd, &nvbo->bo.base, &fb);
|
||||
if (ret)
|
||||
goto out_unref;
|
||||
|
||||
ret = nouveau_bo_pin(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, false);
|
||||
if (ret) {
|
||||
NV_ERROR(drm, "failed to pin fb: %d\n", ret);
|
||||
goto out_unref;
|
||||
}
|
||||
|
||||
ret = nouveau_bo_map(nvbo);
|
||||
if (ret) {
|
||||
NV_ERROR(drm, "failed to map fb: %d\n", ret);
|
||||
goto out_unpin;
|
||||
}
|
||||
|
||||
chan = nouveau_nofbaccel ? NULL : drm->channel;
|
||||
if (chan && device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
ret = nouveau_vma_new(nvbo, chan->vmm, &fbcon->vma);
|
||||
if (ret) {
|
||||
NV_ERROR(drm, "failed to map fb into chan: %d\n", ret);
|
||||
chan = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
info = drm_fb_helper_alloc_info(helper);
|
||||
if (IS_ERR(info)) {
|
||||
ret = PTR_ERR(info);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* setup helper */
|
||||
fbcon->helper.fb = fb;
|
||||
|
||||
if (!chan)
|
||||
info->flags = FBINFO_HWACCEL_DISABLED;
|
||||
else
|
||||
info->flags = FBINFO_HWACCEL_COPYAREA |
|
||||
FBINFO_HWACCEL_FILLRECT |
|
||||
FBINFO_HWACCEL_IMAGEBLIT;
|
||||
info->fbops = &nouveau_fbcon_sw_ops;
|
||||
info->fix.smem_start = nvbo->bo.resource->bus.offset;
|
||||
info->fix.smem_len = nvbo->bo.base.size;
|
||||
|
||||
info->screen_base = nvbo_kmap_obj_iovirtual(nvbo);
|
||||
info->screen_size = nvbo->bo.base.size;
|
||||
|
||||
drm_fb_helper_fill_info(info, &fbcon->helper, sizes);
|
||||
|
||||
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
|
||||
|
||||
if (chan)
|
||||
nouveau_fbcon_accel_init(dev);
|
||||
nouveau_fbcon_zfill(dev, fbcon);
|
||||
|
||||
/* To allow resizeing without swapping buffers */
|
||||
NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n",
|
||||
fb->width, fb->height, nvbo->offset, nvbo);
|
||||
|
||||
if (dev_is_pci(dev->dev))
|
||||
vga_switcheroo_client_fb_set(to_pci_dev(dev->dev), info);
|
||||
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
if (chan)
|
||||
nouveau_vma_del(&fbcon->vma);
|
||||
nouveau_bo_unmap(nvbo);
|
||||
out_unpin:
|
||||
nouveau_bo_unpin(nvbo);
|
||||
out_unref:
|
||||
nouveau_bo_ref(NULL, &nvbo);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *fbcon)
|
||||
{
|
||||
struct drm_framebuffer *fb = fbcon->helper.fb;
|
||||
struct nouveau_bo *nvbo;
|
||||
|
||||
drm_fb_helper_unregister_info(&fbcon->helper);
|
||||
drm_fb_helper_fini(&fbcon->helper);
|
||||
|
||||
if (fb && fb->obj[0]) {
|
||||
nvbo = nouveau_gem_object(fb->obj[0]);
|
||||
nouveau_vma_del(&fbcon->vma);
|
||||
nouveau_bo_unmap(nvbo);
|
||||
nouveau_bo_unpin(nvbo);
|
||||
drm_framebuffer_put(fb);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nouveau_fbcon_gpu_lockup(struct fb_info *info)
|
||||
{
|
||||
struct nouveau_fbdev *fbcon = info->par;
|
||||
struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
|
||||
|
||||
NV_ERROR(drm, "GPU lockup - switching to software fbcon\n");
|
||||
info->flags |= FBINFO_HWACCEL_DISABLED;
|
||||
}
|
||||
|
||||
static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
|
||||
.fb_probe = nouveau_fbcon_create,
|
||||
};
|
||||
|
||||
static void
|
||||
nouveau_fbcon_set_suspend_work(struct work_struct *work)
|
||||
{
|
||||
struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work);
|
||||
int state = READ_ONCE(drm->fbcon_new_state);
|
||||
|
||||
if (state == FBINFO_STATE_RUNNING)
|
||||
pm_runtime_get_sync(drm->dev->dev);
|
||||
|
||||
console_lock();
|
||||
if (state == FBINFO_STATE_RUNNING)
|
||||
nouveau_fbcon_accel_restore(drm->dev);
|
||||
drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
|
||||
if (state != FBINFO_STATE_RUNNING)
|
||||
nouveau_fbcon_accel_save_disable(drm->dev);
|
||||
console_unlock();
|
||||
|
||||
if (state == FBINFO_STATE_RUNNING) {
|
||||
nouveau_fbcon_hotplug_resume(drm->fbcon);
|
||||
pm_runtime_mark_last_busy(drm->dev->dev);
|
||||
pm_runtime_put_autosuspend(drm->dev->dev);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
|
||||
if (!drm->fbcon)
|
||||
return;
|
||||
|
||||
drm->fbcon_new_state = state;
|
||||
/* Since runtime resume can happen as a result of a sysfs operation,
|
||||
* it's possible we already have the console locked. So handle fbcon
|
||||
* init/deinit from a seperate work thread
|
||||
*/
|
||||
schedule_work(&drm->fbcon_work);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_fbcon_output_poll_changed(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nouveau_fbdev *fbcon = drm->fbcon;
|
||||
int ret;
|
||||
|
||||
if (!fbcon)
|
||||
return;
|
||||
|
||||
mutex_lock(&fbcon->hotplug_lock);
|
||||
|
||||
ret = pm_runtime_get(dev->dev);
|
||||
if (ret == 1 || ret == -EACCES) {
|
||||
drm_fb_helper_hotplug_event(&fbcon->helper);
|
||||
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
} else if (ret == 0) {
|
||||
/* If the GPU was already in the process of suspending before
|
||||
* this event happened, then we can't block here as we'll
|
||||
* deadlock the runtime pmops since they wait for us to
|
||||
* finish. So, just defer this event for when we runtime
|
||||
* resume again. It will be handled by fbcon_work.
|
||||
*/
|
||||
NV_DEBUG(drm, "fbcon HPD event deferred until runtime resume\n");
|
||||
fbcon->hotplug_waiting = true;
|
||||
pm_runtime_put_noidle(drm->dev->dev);
|
||||
} else {
|
||||
DRM_WARN("fbcon HPD event lost due to RPM failure: %d\n",
|
||||
ret);
|
||||
}
|
||||
|
||||
mutex_unlock(&fbcon->hotplug_lock);
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_fbcon_hotplug_resume(struct nouveau_fbdev *fbcon)
|
||||
{
|
||||
struct nouveau_drm *drm;
|
||||
|
||||
if (!fbcon)
|
||||
return;
|
||||
drm = nouveau_drm(fbcon->helper.dev);
|
||||
|
||||
mutex_lock(&fbcon->hotplug_lock);
|
||||
if (fbcon->hotplug_waiting) {
|
||||
fbcon->hotplug_waiting = false;
|
||||
|
||||
NV_DEBUG(drm, "Handling deferred fbcon HPD events\n");
|
||||
drm_fb_helper_hotplug_event(&fbcon->helper);
|
||||
}
|
||||
mutex_unlock(&fbcon->hotplug_lock);
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_fbcon_init(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nouveau_fbdev *fbcon;
|
||||
int preferred_bpp = nouveau_fbcon_bpp;
|
||||
int ret;
|
||||
|
||||
if (!dev->mode_config.num_crtc ||
|
||||
(to_pci_dev(dev->dev)->class >> 8) != PCI_CLASS_DISPLAY_VGA)
|
||||
return 0;
|
||||
|
||||
fbcon = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
|
||||
if (!fbcon)
|
||||
return -ENOMEM;
|
||||
|
||||
drm->fbcon = fbcon;
|
||||
INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
|
||||
mutex_init(&fbcon->hotplug_lock);
|
||||
|
||||
drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
|
||||
|
||||
ret = drm_fb_helper_init(dev, &fbcon->helper);
|
||||
if (ret)
|
||||
goto free;
|
||||
|
||||
if (preferred_bpp != 8 && preferred_bpp != 16 && preferred_bpp != 32) {
|
||||
if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
|
||||
preferred_bpp = 8;
|
||||
else
|
||||
if (drm->client.device.info.ram_size <= 64 * 1024 * 1024)
|
||||
preferred_bpp = 16;
|
||||
else
|
||||
preferred_bpp = 32;
|
||||
}
|
||||
|
||||
/* disable all the possible outputs/crtcs before entering KMS mode */
|
||||
if (!drm_drv_uses_atomic_modeset(dev))
|
||||
drm_helper_disable_unused_functions(dev);
|
||||
|
||||
ret = drm_fb_helper_initial_config(&fbcon->helper, preferred_bpp);
|
||||
if (ret)
|
||||
goto fini;
|
||||
|
||||
if (fbcon->helper.info)
|
||||
fbcon->helper.info->pixmap.buf_align = 4;
|
||||
return 0;
|
||||
|
||||
fini:
|
||||
drm_fb_helper_fini(&fbcon->helper);
|
||||
free:
|
||||
kfree(fbcon);
|
||||
drm->fbcon = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_fbcon_fini(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
|
||||
if (!drm->fbcon)
|
||||
return;
|
||||
|
||||
drm_kms_helper_poll_fini(dev);
|
||||
nouveau_fbcon_accel_fini(dev);
|
||||
nouveau_fbcon_destroy(dev, drm->fbcon);
|
||||
kfree(drm->fbcon);
|
||||
drm->fbcon = NULL;
|
||||
}
|
@ -38,7 +38,7 @@ static struct drm_fb_helper *get_fb(struct fb_info *fbi);
|
||||
static void pan_worker(struct work_struct *work)
|
||||
{
|
||||
struct omap_fbdev *fbdev = container_of(work, struct omap_fbdev, work);
|
||||
struct fb_info *fbi = fbdev->base.fbdev;
|
||||
struct fb_info *fbi = fbdev->base.info;
|
||||
int npages;
|
||||
|
||||
/* DMM roll shifts in 4K pages: */
|
||||
@ -161,7 +161,7 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
fbi = drm_fb_helper_alloc_fbi(helper);
|
||||
fbi = drm_fb_helper_alloc_info(helper);
|
||||
if (IS_ERR(fbi)) {
|
||||
dev_err(dev->dev, "failed to allocate fb info\n");
|
||||
ret = PTR_ERR(fbi);
|
||||
@ -272,7 +272,7 @@ void omap_fbdev_fini(struct drm_device *dev)
|
||||
if (!helper)
|
||||
return;
|
||||
|
||||
drm_fb_helper_unregister_fbi(helper);
|
||||
drm_fb_helper_unregister_info(helper);
|
||||
|
||||
drm_fb_helper_fini(helper);
|
||||
|
||||
|
@ -203,6 +203,16 @@ config DRM_PANEL_INNOLUX_P079ZCA
|
||||
24 bit RGB per pixel. It provides a MIPI DSI interface to
|
||||
the host and has a built-in LED backlight.
|
||||
|
||||
config DRM_PANEL_JADARD_JD9365DA_H3
|
||||
tristate "Jadard JD9365DA-H3 WXGA DSI panel"
|
||||
depends on OF
|
||||
depends on DRM_MIPI_DSI
|
||||
depends on BACKLIGHT_CLASS_DEVICE
|
||||
help
|
||||
Say Y here if you want to enable support for Jadard JD9365DA-H3
|
||||
WXGA MIPI DSI panel. The panel support TFT dot matrix LCD with
|
||||
800RGBx1280 dots at maximum.
|
||||
|
||||
config DRM_PANEL_JDI_LT070ME05000
|
||||
tristate "JDI LT070ME05000 WUXGA DSI panel"
|
||||
depends on OF
|
||||
|
@ -18,6 +18,7 @@ obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9341) += panel-ilitek-ili9341.o
|
||||
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
|
||||
obj-$(CONFIG_DRM_PANEL_INNOLUX_EJ030NA) += panel-innolux-ej030na.o
|
||||
obj-$(CONFIG_DRM_PANEL_INNOLUX_P079ZCA) += panel-innolux-p079zca.o
|
||||
obj-$(CONFIG_DRM_PANEL_JADARD_JD9365DA_H3) += panel-jadard-jd9365da-h3.o
|
||||
obj-$(CONFIG_DRM_PANEL_JDI_LT070ME05000) += panel-jdi-lt070me05000.o
|
||||
obj-$(CONFIG_DRM_PANEL_JDI_R63452) += panel-jdi-fhd-r63452.o
|
||||
obj-$(CONFIG_DRM_PANEL_KHADAS_TS050) += panel-khadas-ts050.o
|
||||
|
@ -18,6 +18,7 @@
|
||||
* Copyright 2018 David Lechner <david@lechnology.com>
|
||||
*/
|
||||
|
||||
#include <linux/backlight.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
@ -30,7 +31,7 @@
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_gem_atomic_helper.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
|
473
drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
Normal file
473
drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
Normal file
@ -0,0 +1,473 @@
|
||||
// SPDX-License-Identifier: GPL-2.0+
|
||||
/*
|
||||
* Copyright (c) 2019 Radxa Limited
|
||||
* Copyright (c) 2022 Edgeble AI Technologies Pvt. Ltd.
|
||||
*
|
||||
* Author:
|
||||
* - Jagan Teki <jagan@amarulasolutions.com>
|
||||
* - Stephen Chen <stephen@radxa.com>
|
||||
*/
|
||||
|
||||
#include <drm/drm_mipi_dsi.h>
|
||||
#include <drm/drm_modes.h>
|
||||
#include <drm/drm_panel.h>
|
||||
#include <drm/drm_print.h>
|
||||
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
|
||||
#define JD9365DA_INIT_CMD_LEN 2
|
||||
|
||||
struct jadard_init_cmd {
|
||||
u8 data[JD9365DA_INIT_CMD_LEN];
|
||||
};
|
||||
|
||||
struct jadard_panel_desc {
|
||||
const struct drm_display_mode mode;
|
||||
unsigned int lanes;
|
||||
enum mipi_dsi_pixel_format format;
|
||||
const struct jadard_init_cmd *init_cmds;
|
||||
u32 num_init_cmds;
|
||||
};
|
||||
|
||||
struct jadard {
|
||||
struct drm_panel panel;
|
||||
struct mipi_dsi_device *dsi;
|
||||
const struct jadard_panel_desc *desc;
|
||||
|
||||
struct regulator *vdd;
|
||||
struct regulator *vccio;
|
||||
struct gpio_desc *reset;
|
||||
};
|
||||
|
||||
static inline struct jadard *panel_to_jadard(struct drm_panel *panel)
|
||||
{
|
||||
return container_of(panel, struct jadard, panel);
|
||||
}
|
||||
|
||||
static int jadard_enable(struct drm_panel *panel)
|
||||
{
|
||||
struct device *dev = panel->dev;
|
||||
struct jadard *jadard = panel_to_jadard(panel);
|
||||
const struct jadard_panel_desc *desc = jadard->desc;
|
||||
struct mipi_dsi_device *dsi = jadard->dsi;
|
||||
unsigned int i;
|
||||
int err;
|
||||
|
||||
msleep(10);
|
||||
|
||||
for (i = 0; i < desc->num_init_cmds; i++) {
|
||||
const struct jadard_init_cmd *cmd = &desc->init_cmds[i];
|
||||
|
||||
err = mipi_dsi_dcs_write_buffer(dsi, cmd->data, JD9365DA_INIT_CMD_LEN);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
msleep(120);
|
||||
|
||||
err = mipi_dsi_dcs_exit_sleep_mode(dsi);
|
||||
if (err < 0)
|
||||
DRM_DEV_ERROR(dev, "failed to exit sleep mode ret = %d\n", err);
|
||||
|
||||
err = mipi_dsi_dcs_set_display_on(dsi);
|
||||
if (err < 0)
|
||||
DRM_DEV_ERROR(dev, "failed to set display on ret = %d\n", err);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int jadard_disable(struct drm_panel *panel)
|
||||
{
|
||||
struct device *dev = panel->dev;
|
||||
struct jadard *jadard = panel_to_jadard(panel);
|
||||
int ret;
|
||||
|
||||
ret = mipi_dsi_dcs_set_display_off(jadard->dsi);
|
||||
if (ret < 0)
|
||||
DRM_DEV_ERROR(dev, "failed to set display off: %d\n", ret);
|
||||
|
||||
ret = mipi_dsi_dcs_enter_sleep_mode(jadard->dsi);
|
||||
if (ret < 0)
|
||||
DRM_DEV_ERROR(dev, "failed to enter sleep mode: %d\n", ret);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int jadard_prepare(struct drm_panel *panel)
|
||||
{
|
||||
struct jadard *jadard = panel_to_jadard(panel);
|
||||
int ret;
|
||||
|
||||
ret = regulator_enable(jadard->vccio);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = regulator_enable(jadard->vdd);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
gpiod_set_value(jadard->reset, 1);
|
||||
msleep(5);
|
||||
|
||||
gpiod_set_value(jadard->reset, 0);
|
||||
msleep(10);
|
||||
|
||||
gpiod_set_value(jadard->reset, 1);
|
||||
msleep(120);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int jadard_unprepare(struct drm_panel *panel)
|
||||
{
|
||||
struct jadard *jadard = panel_to_jadard(panel);
|
||||
|
||||
gpiod_set_value(jadard->reset, 1);
|
||||
msleep(120);
|
||||
|
||||
regulator_disable(jadard->vdd);
|
||||
regulator_disable(jadard->vccio);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int jadard_get_modes(struct drm_panel *panel,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
struct jadard *jadard = panel_to_jadard(panel);
|
||||
const struct drm_display_mode *desc_mode = &jadard->desc->mode;
|
||||
struct drm_display_mode *mode;
|
||||
|
||||
mode = drm_mode_duplicate(connector->dev, desc_mode);
|
||||
if (!mode) {
|
||||
DRM_DEV_ERROR(&jadard->dsi->dev, "failed to add mode %ux%ux@%u\n",
|
||||
desc_mode->hdisplay, desc_mode->vdisplay,
|
||||
drm_mode_vrefresh(desc_mode));
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
drm_mode_set_name(mode);
|
||||
drm_mode_probed_add(connector, mode);
|
||||
|
||||
connector->display_info.width_mm = mode->width_mm;
|
||||
connector->display_info.height_mm = mode->height_mm;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static const struct drm_panel_funcs jadard_funcs = {
|
||||
.disable = jadard_disable,
|
||||
.unprepare = jadard_unprepare,
|
||||
.prepare = jadard_prepare,
|
||||
.enable = jadard_enable,
|
||||
.get_modes = jadard_get_modes,
|
||||
};
|
||||
|
||||
static const struct jadard_init_cmd cz101b4001_init_cmds[] = {
|
||||
{ .data = { 0xE0, 0x00 } },
|
||||
{ .data = { 0xE1, 0x93 } },
|
||||
{ .data = { 0xE2, 0x65 } },
|
||||
{ .data = { 0xE3, 0xF8 } },
|
||||
{ .data = { 0x80, 0x03 } },
|
||||
{ .data = { 0xE0, 0x01 } },
|
||||
{ .data = { 0x00, 0x00 } },
|
||||
{ .data = { 0x01, 0x3B } },
|
||||
{ .data = { 0x0C, 0x74 } },
|
||||
{ .data = { 0x17, 0x00 } },
|
||||
{ .data = { 0x18, 0xAF } },
|
||||
{ .data = { 0x19, 0x00 } },
|
||||
{ .data = { 0x1A, 0x00 } },
|
||||
{ .data = { 0x1B, 0xAF } },
|
||||
{ .data = { 0x1C, 0x00 } },
|
||||
{ .data = { 0x35, 0x26 } },
|
||||
{ .data = { 0x37, 0x09 } },
|
||||
{ .data = { 0x38, 0x04 } },
|
||||
{ .data = { 0x39, 0x00 } },
|
||||
{ .data = { 0x3A, 0x01 } },
|
||||
{ .data = { 0x3C, 0x78 } },
|
||||
{ .data = { 0x3D, 0xFF } },
|
||||
{ .data = { 0x3E, 0xFF } },
|
||||
{ .data = { 0x3F, 0x7F } },
|
||||
{ .data = { 0x40, 0x06 } },
|
||||
{ .data = { 0x41, 0xA0 } },
|
||||
{ .data = { 0x42, 0x81 } },
|
||||
{ .data = { 0x43, 0x14 } },
|
||||
{ .data = { 0x44, 0x23 } },
|
||||
{ .data = { 0x45, 0x28 } },
|
||||
{ .data = { 0x55, 0x02 } },
|
||||
{ .data = { 0x57, 0x69 } },
|
||||
{ .data = { 0x59, 0x0A } },
|
||||
{ .data = { 0x5A, 0x2A } },
|
||||
{ .data = { 0x5B, 0x17 } },
|
||||
{ .data = { 0x5D, 0x7F } },
|
||||
{ .data = { 0x5E, 0x6B } },
|
||||
{ .data = { 0x5F, 0x5C } },
|
||||
{ .data = { 0x60, 0x4F } },
|
||||
{ .data = { 0x61, 0x4D } },
|
||||
{ .data = { 0x62, 0x3F } },
|
||||
{ .data = { 0x63, 0x42 } },
|
||||
{ .data = { 0x64, 0x2B } },
|
||||
{ .data = { 0x65, 0x44 } },
|
||||
{ .data = { 0x66, 0x43 } },
|
||||
{ .data = { 0x67, 0x43 } },
|
||||
{ .data = { 0x68, 0x63 } },
|
||||
{ .data = { 0x69, 0x52 } },
|
||||
{ .data = { 0x6A, 0x5A } },
|
||||
{ .data = { 0x6B, 0x4F } },
|
||||
{ .data = { 0x6C, 0x4E } },
|
||||
{ .data = { 0x6D, 0x20 } },
|
||||
{ .data = { 0x6E, 0x0F } },
|
||||
{ .data = { 0x6F, 0x00 } },
|
||||
{ .data = { 0x70, 0x7F } },
|
||||
{ .data = { 0x71, 0x6B } },
|
||||
{ .data = { 0x72, 0x5C } },
|
||||
{ .data = { 0x73, 0x4F } },
|
||||
{ .data = { 0x74, 0x4D } },
|
||||
{ .data = { 0x75, 0x3F } },
|
||||
{ .data = { 0x76, 0x42 } },
|
||||
{ .data = { 0x77, 0x2B } },
|
||||
{ .data = { 0x78, 0x44 } },
|
||||
{ .data = { 0x79, 0x43 } },
|
||||
{ .data = { 0x7A, 0x43 } },
|
||||
{ .data = { 0x7B, 0x63 } },
|
||||
{ .data = { 0x7C, 0x52 } },
|
||||
{ .data = { 0x7D, 0x5A } },
|
||||
{ .data = { 0x7E, 0x4F } },
|
||||
{ .data = { 0x7F, 0x4E } },
|
||||
{ .data = { 0x80, 0x20 } },
|
||||
{ .data = { 0x81, 0x0F } },
|
||||
{ .data = { 0x82, 0x00 } },
|
||||
{ .data = { 0xE0, 0x02 } },
|
||||
{ .data = { 0x00, 0x02 } },
|
||||
{ .data = { 0x01, 0x02 } },
|
||||
{ .data = { 0x02, 0x00 } },
|
||||
{ .data = { 0x03, 0x00 } },
|
||||
{ .data = { 0x04, 0x1E } },
|
||||
{ .data = { 0x05, 0x1E } },
|
||||
{ .data = { 0x06, 0x1F } },
|
||||
{ .data = { 0x07, 0x1F } },
|
||||
{ .data = { 0x08, 0x1F } },
|
||||
{ .data = { 0x09, 0x17 } },
|
||||
{ .data = { 0x0A, 0x17 } },
|
||||
{ .data = { 0x0B, 0x37 } },
|
||||
{ .data = { 0x0C, 0x37 } },
|
||||
{ .data = { 0x0D, 0x47 } },
|
||||
{ .data = { 0x0E, 0x47 } },
|
||||
{ .data = { 0x0F, 0x45 } },
|
||||
{ .data = { 0x10, 0x45 } },
|
||||
{ .data = { 0x11, 0x4B } },
|
||||
{ .data = { 0x12, 0x4B } },
|
||||
{ .data = { 0x13, 0x49 } },
|
||||
{ .data = { 0x14, 0x49 } },
|
||||
{ .data = { 0x15, 0x1F } },
|
||||
{ .data = { 0x16, 0x01 } },
|
||||
{ .data = { 0x17, 0x01 } },
|
||||
{ .data = { 0x18, 0x00 } },
|
||||
{ .data = { 0x19, 0x00 } },
|
||||
{ .data = { 0x1A, 0x1E } },
|
||||
{ .data = { 0x1B, 0x1E } },
|
||||
{ .data = { 0x1C, 0x1F } },
|
||||
{ .data = { 0x1D, 0x1F } },
|
||||
{ .data = { 0x1E, 0x1F } },
|
||||
{ .data = { 0x1F, 0x17 } },
|
||||
{ .data = { 0x20, 0x17 } },
|
||||
{ .data = { 0x21, 0x37 } },
|
||||
{ .data = { 0x22, 0x37 } },
|
||||
{ .data = { 0x23, 0x46 } },
|
||||
{ .data = { 0x24, 0x46 } },
|
||||
{ .data = { 0x25, 0x44 } },
|
||||
{ .data = { 0x26, 0x44 } },
|
||||
{ .data = { 0x27, 0x4A } },
|
||||
{ .data = { 0x28, 0x4A } },
|
||||
{ .data = { 0x29, 0x48 } },
|
||||
{ .data = { 0x2A, 0x48 } },
|
||||
{ .data = { 0x2B, 0x1F } },
|
||||
{ .data = { 0x2C, 0x01 } },
|
||||
{ .data = { 0x2D, 0x01 } },
|
||||
{ .data = { 0x2E, 0x00 } },
|
||||
{ .data = { 0x2F, 0x00 } },
|
||||
{ .data = { 0x30, 0x1F } },
|
||||
{ .data = { 0x31, 0x1F } },
|
||||
{ .data = { 0x32, 0x1E } },
|
||||
{ .data = { 0x33, 0x1E } },
|
||||
{ .data = { 0x34, 0x1F } },
|
||||
{ .data = { 0x35, 0x17 } },
|
||||
{ .data = { 0x36, 0x17 } },
|
||||
{ .data = { 0x37, 0x37 } },
|
||||
{ .data = { 0x38, 0x37 } },
|
||||
{ .data = { 0x39, 0x08 } },
|
||||
{ .data = { 0x3A, 0x08 } },
|
||||
{ .data = { 0x3B, 0x0A } },
|
||||
{ .data = { 0x3C, 0x0A } },
|
||||
{ .data = { 0x3D, 0x04 } },
|
||||
{ .data = { 0x3E, 0x04 } },
|
||||
{ .data = { 0x3F, 0x06 } },
|
||||
{ .data = { 0x40, 0x06 } },
|
||||
{ .data = { 0x41, 0x1F } },
|
||||
{ .data = { 0x42, 0x02 } },
|
||||
{ .data = { 0x43, 0x02 } },
|
||||
{ .data = { 0x44, 0x00 } },
|
||||
{ .data = { 0x45, 0x00 } },
|
||||
{ .data = { 0x46, 0x1F } },
|
||||
{ .data = { 0x47, 0x1F } },
|
||||
{ .data = { 0x48, 0x1E } },
|
||||
{ .data = { 0x49, 0x1E } },
|
||||
{ .data = { 0x4A, 0x1F } },
|
||||
{ .data = { 0x4B, 0x17 } },
|
||||
{ .data = { 0x4C, 0x17 } },
|
||||
{ .data = { 0x4D, 0x37 } },
|
||||
{ .data = { 0x4E, 0x37 } },
|
||||
{ .data = { 0x4F, 0x09 } },
|
||||
{ .data = { 0x50, 0x09 } },
|
||||
{ .data = { 0x51, 0x0B } },
|
||||
{ .data = { 0x52, 0x0B } },
|
||||
{ .data = { 0x53, 0x05 } },
|
||||
{ .data = { 0x54, 0x05 } },
|
||||
{ .data = { 0x55, 0x07 } },
|
||||
{ .data = { 0x56, 0x07 } },
|
||||
{ .data = { 0x57, 0x1F } },
|
||||
{ .data = { 0x58, 0x40 } },
|
||||
{ .data = { 0x5B, 0x30 } },
|
||||
{ .data = { 0x5C, 0x16 } },
|
||||
{ .data = { 0x5D, 0x34 } },
|
||||
{ .data = { 0x5E, 0x05 } },
|
||||
{ .data = { 0x5F, 0x02 } },
|
||||
{ .data = { 0x63, 0x00 } },
|
||||
{ .data = { 0x64, 0x6A } },
|
||||
{ .data = { 0x67, 0x73 } },
|
||||
{ .data = { 0x68, 0x1D } },
|
||||
{ .data = { 0x69, 0x08 } },
|
||||
{ .data = { 0x6A, 0x6A } },
|
||||
{ .data = { 0x6B, 0x08 } },
|
||||
{ .data = { 0x6C, 0x00 } },
|
||||
{ .data = { 0x6D, 0x00 } },
|
||||
{ .data = { 0x6E, 0x00 } },
|
||||
{ .data = { 0x6F, 0x88 } },
|
||||
{ .data = { 0x75, 0xFF } },
|
||||
{ .data = { 0x77, 0xDD } },
|
||||
{ .data = { 0x78, 0x3F } },
|
||||
{ .data = { 0x79, 0x15 } },
|
||||
{ .data = { 0x7A, 0x17 } },
|
||||
{ .data = { 0x7D, 0x14 } },
|
||||
{ .data = { 0x7E, 0x82 } },
|
||||
{ .data = { 0xE0, 0x04 } },
|
||||
{ .data = { 0x00, 0x0E } },
|
||||
{ .data = { 0x02, 0xB3 } },
|
||||
{ .data = { 0x09, 0x61 } },
|
||||
{ .data = { 0x0E, 0x48 } },
|
||||
{ .data = { 0xE0, 0x00 } },
|
||||
{ .data = { 0xE6, 0x02 } },
|
||||
{ .data = { 0xE7, 0x0C } },
|
||||
};
|
||||
|
||||
static const struct jadard_panel_desc cz101b4001_desc = {
|
||||
.mode = {
|
||||
.clock = 70000,
|
||||
|
||||
.hdisplay = 800,
|
||||
.hsync_start = 800 + 40,
|
||||
.hsync_end = 800 + 40 + 18,
|
||||
.htotal = 800 + 40 + 18 + 20,
|
||||
|
||||
.vdisplay = 1280,
|
||||
.vsync_start = 1280 + 20,
|
||||
.vsync_end = 1280 + 20 + 4,
|
||||
.vtotal = 1280 + 20 + 4 + 20,
|
||||
|
||||
.width_mm = 62,
|
||||
.height_mm = 110,
|
||||
.type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
|
||||
},
|
||||
.lanes = 4,
|
||||
.format = MIPI_DSI_FMT_RGB888,
|
||||
.init_cmds = cz101b4001_init_cmds,
|
||||
.num_init_cmds = ARRAY_SIZE(cz101b4001_init_cmds),
|
||||
};
|
||||
|
||||
static int jadard_dsi_probe(struct mipi_dsi_device *dsi)
|
||||
{
|
||||
struct device *dev = &dsi->dev;
|
||||
const struct jadard_panel_desc *desc;
|
||||
struct jadard *jadard;
|
||||
int ret;
|
||||
|
||||
jadard = devm_kzalloc(&dsi->dev, sizeof(*jadard), GFP_KERNEL);
|
||||
if (!jadard)
|
||||
return -ENOMEM;
|
||||
|
||||
desc = of_device_get_match_data(dev);
|
||||
dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
|
||||
MIPI_DSI_MODE_NO_EOT_PACKET;
|
||||
dsi->format = desc->format;
|
||||
dsi->lanes = desc->lanes;
|
||||
|
||||
jadard->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
|
||||
if (IS_ERR(jadard->reset)) {
|
||||
DRM_DEV_ERROR(&dsi->dev, "failed to get our reset GPIO\n");
|
||||
return PTR_ERR(jadard->reset);
|
||||
}
|
||||
|
||||
jadard->vdd = devm_regulator_get(dev, "vdd");
|
||||
if (IS_ERR(jadard->vdd)) {
|
||||
DRM_DEV_ERROR(&dsi->dev, "failed to get vdd regulator\n");
|
||||
return PTR_ERR(jadard->vdd);
|
||||
}
|
||||
|
||||
jadard->vccio = devm_regulator_get(dev, "vccio");
|
||||
if (IS_ERR(jadard->vccio)) {
|
||||
DRM_DEV_ERROR(&dsi->dev, "failed to get vccio regulator\n");
|
||||
return PTR_ERR(jadard->vccio);
|
||||
}
|
||||
|
||||
drm_panel_init(&jadard->panel, dev, &jadard_funcs,
|
||||
DRM_MODE_CONNECTOR_DSI);
|
||||
|
||||
ret = drm_panel_of_backlight(&jadard->panel);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
drm_panel_add(&jadard->panel);
|
||||
|
||||
mipi_dsi_set_drvdata(dsi, jadard);
|
||||
jadard->dsi = dsi;
|
||||
jadard->desc = desc;
|
||||
|
||||
ret = mipi_dsi_attach(dsi);
|
||||
if (ret < 0)
|
||||
drm_panel_remove(&jadard->panel);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void jadard_dsi_remove(struct mipi_dsi_device *dsi)
|
||||
{
|
||||
struct jadard *jadard = mipi_dsi_get_drvdata(dsi);
|
||||
|
||||
mipi_dsi_detach(dsi);
|
||||
drm_panel_remove(&jadard->panel);
|
||||
}
|
||||
|
||||
static const struct of_device_id jadard_of_match[] = {
|
||||
{ .compatible = "chongzhou,cz101b4001", .data = &cz101b4001_desc },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, jadard_of_match);
|
||||
|
||||
static struct mipi_dsi_driver jadard_driver = {
|
||||
.probe = jadard_dsi_probe,
|
||||
.remove = jadard_dsi_remove,
|
||||
.driver = {
|
||||
.name = "jadard-jd9365da",
|
||||
.of_match_table = jadard_of_match,
|
||||
},
|
||||
};
|
||||
module_mipi_dsi_driver(jadard_driver);
|
||||
|
||||
MODULE_AUTHOR("Jagan Teki <jagan@edgeble.ai>");
|
||||
MODULE_AUTHOR("Stephen Chen <stephen@radxa.com>");
|
||||
MODULE_DESCRIPTION("Jadard JD9365DA-H3 WXGA DSI panel");
|
||||
MODULE_LICENSE("GPL");
|
@ -48,7 +48,7 @@
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_bridge.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_fourcc.h>
|
||||
#include <drm/drm_gem_dma_helper.h>
|
||||
#include <drm/drm_gem_framebuffer_helper.h>
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include <drm/drm_aperture.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_file.h>
|
||||
#include <drm/drm_gem_ttm_helper.h>
|
||||
#include <drm/drm_module.h>
|
||||
|
@ -38,7 +38,6 @@
|
||||
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_encoder.h>
|
||||
#include <drm/drm_fb_helper.h>
|
||||
#include <drm/drm_gem_ttm_helper.h>
|
||||
#include <drm/drm_ioctl.h>
|
||||
#include <drm/drm_gem.h>
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user