mirror of
https://github.com/torvalds/linux.git
synced 2024-11-26 06:02:05 +00:00
Merge tag 'drm-misc-next-2023-09-27' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for v6.7-rc1: UAPI Changes: - drm_file owner is now updated during use, in the case of a drm fd opened by the display server for a client, the correct owner is displayed. - Qaic gains support for the QAIC_DETACH_SLICE_BO ioctl to allow bo recycling. Cross-subsystem Changes: - Disable boot logo for au1200fb, mmpfb and unexport logo helpers. Only fbcon should manage display of logo. - Update freescale in MAINTAINERS. - Add some bridge files to bridge in MAINTAINERS. - Update gma500 driver repo in MAINTAINERS to point to drm-misc. Core Changes: - Move size computations to drm buddy allocator. - Make drm_atomic_helper_shutdown(NULL) a nop. - Assorted small fixes in drm_debugfs, DP-MST payload addition error handling. - Fix DRM_BRIDGE_ATTACH_NO_CONNECTOR handling. - Handle bad (h/v)sync_end in EDID by clipping to htotal. - Build GPUVM as a module. Driver Changes: - Simple drivers don't need to cache prepared result. - Call drm_atomic_helper_shutdown() in shutdown/unbind for a whole lot more drm drivers. - Assorted small fixes in amdgpu, ssd130x, bridge/it6621, accel/qaic, nouveau, tc358768. - Add NV12 for komeda writeback. - Add arbitration lost event to synopsis/dw-hdmi-cec. - Speed up s/r in nouveau by not restoring some big bo's. - Assorted nouveau display rework in preparation for GSP-RM, especially related to how the modeset sequence works and the DP sequence in relation to link training. - Update anx7816 panel. - Support NVSYNC and NHSYNC in tegra. - Allow multiple power domains in simple driver. Signed-off-by: Dave Airlie <airlied@redhat.com> From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/f1fae5eb-25b8-192a-9a53-215e1184ce81@linux.intel.com
This commit is contained in:
commit
79fb229b88
@ -123,6 +123,16 @@ DRM_IOCTL_QAIC_PART_DEV
|
||||
AIC100 device and can be used for limiting a process to some subset of
|
||||
resources.
|
||||
|
||||
DRM_IOCTL_QAIC_DETACH_SLICE_BO
|
||||
This IOCTL allows userspace to remove the slicing information from a BO that
|
||||
was originally provided by a call to DRM_IOCTL_QAIC_ATTACH_SLICE_BO. This
|
||||
is the inverse of DRM_IOCTL_QAIC_ATTACH_SLICE_BO. The BO must be idle for
|
||||
DRM_IOCTL_QAIC_DETACH_SLICE_BO to be called. After a successful detach slice
|
||||
operation the BO may have new slicing information attached with a new call
|
||||
to DRM_IOCTL_QAIC_ATTACH_SLICE_BO. After detach slice, the BO cannot be
|
||||
executed until after a new attach slice operation. Combining attach slice
|
||||
and detach slice calls allows userspace to use a BO with multiple workloads.
|
||||
|
||||
Userspace Client Isolation
|
||||
==========================
|
||||
|
||||
|
@ -17,6 +17,7 @@ properties:
|
||||
- analogix,anx7808
|
||||
- analogix,anx7812
|
||||
- analogix,anx7814
|
||||
- analogix,anx7816
|
||||
- analogix,anx7818
|
||||
|
||||
reg:
|
||||
|
@ -6908,7 +6908,9 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
|
||||
F: Documentation/devicetree/bindings/display/bridge/
|
||||
F: drivers/gpu/drm/bridge/
|
||||
F: drivers/gpu/drm/drm_bridge.c
|
||||
F: drivers/gpu/drm/drm_bridge_connector.c
|
||||
F: include/drm/drm_bridge.h
|
||||
F: include/drm/drm_bridge_connector.h
|
||||
|
||||
DRM DRIVERS FOR EXYNOS
|
||||
M: Inki Dae <inki.dae@samsung.com>
|
||||
@ -6932,10 +6934,12 @@ F: Documentation/devicetree/bindings/display/fsl,dcu.txt
|
||||
F: Documentation/devicetree/bindings/display/fsl,tcon.txt
|
||||
F: drivers/gpu/drm/fsl-dcu/
|
||||
|
||||
DRM DRIVERS FOR FREESCALE IMX
|
||||
DRM DRIVERS FOR FREESCALE IMX 5/6
|
||||
M: Philipp Zabel <p.zabel@pengutronix.de>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
T: git git://anongit.freedesktop.org/drm/drm-misc
|
||||
T: git git://git.pengutronix.de/git/pza/linux
|
||||
F: Documentation/devicetree/bindings/display/imx/
|
||||
F: drivers/gpu/drm/imx/ipuv3/
|
||||
F: drivers/gpu/ipu-v3/
|
||||
@ -6954,7 +6958,7 @@ DRM DRIVERS FOR GMA500 (Poulsbo, Moorestown and derivative chipsets)
|
||||
M: Patrik Jakobsson <patrik.r.jakobsson@gmail.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
S: Maintained
|
||||
T: git git://github.com/patjak/drm-gma500
|
||||
T: git git://anongit.freedesktop.org/drm/drm-misc
|
||||
F: drivers/gpu/drm/gma500/
|
||||
|
||||
DRM DRIVERS FOR HISILICON
|
||||
|
@ -27,6 +27,9 @@
|
||||
#define QAIC_DBC_OFF(i) ((i) * QAIC_DBC_SIZE + QAIC_DBC_BASE)
|
||||
|
||||
#define to_qaic_bo(obj) container_of(obj, struct qaic_bo, base)
|
||||
#define to_qaic_drm_device(dev) container_of(dev, struct qaic_drm_device, drm)
|
||||
#define to_drm(qddev) (&(qddev)->drm)
|
||||
#define to_accel_kdev(qddev) (to_drm(qddev)->accel->kdev) /* Return Linux device of accel node */
|
||||
|
||||
extern bool datapath_polling;
|
||||
|
||||
@ -137,6 +140,8 @@ struct qaic_device {
|
||||
};
|
||||
|
||||
struct qaic_drm_device {
|
||||
/* The drm device struct of this drm device */
|
||||
struct drm_device drm;
|
||||
/* Pointer to the root device struct driven by this driver */
|
||||
struct qaic_device *qdev;
|
||||
/*
|
||||
@ -146,8 +151,6 @@ struct qaic_drm_device {
|
||||
* device is the actual physical device
|
||||
*/
|
||||
s32 partition_id;
|
||||
/* Pointer to the drm device struct of this drm device */
|
||||
struct drm_device *ddev;
|
||||
/* Head in list of users who have opened this drm device */
|
||||
struct list_head users;
|
||||
/* Synchronizes access to users list */
|
||||
@ -158,8 +161,6 @@ struct qaic_bo {
|
||||
struct drm_gem_object base;
|
||||
/* Scatter/gather table for allocate/imported BO */
|
||||
struct sg_table *sgt;
|
||||
/* BO size requested by user. GEM object might be bigger in size. */
|
||||
u64 size;
|
||||
/* Head in list of slices of this BO */
|
||||
struct list_head slices;
|
||||
/* Total nents, for all slices of this BO */
|
||||
@ -221,7 +222,8 @@ struct qaic_bo {
|
||||
*/
|
||||
u32 queue_level_before;
|
||||
} perf_stats;
|
||||
|
||||
/* Synchronizes BO operations */
|
||||
struct mutex lock;
|
||||
};
|
||||
|
||||
struct bo_slice {
|
||||
@ -277,6 +279,7 @@ int qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *f
|
||||
int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
||||
int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
||||
int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
||||
int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
|
||||
void irq_polling_work(struct work_struct *work);
|
||||
|
||||
#endif /* _QAIC_H_ */
|
||||
|
@ -154,6 +154,7 @@ static void free_slice(struct kref *kref)
|
||||
{
|
||||
struct bo_slice *slice = container_of(kref, struct bo_slice, ref_count);
|
||||
|
||||
slice->bo->total_slice_nents -= slice->nents;
|
||||
list_del(&slice->slice);
|
||||
drm_gem_object_put(&slice->bo->base);
|
||||
sg_free_table(slice->sgt);
|
||||
@ -579,7 +580,7 @@ static void qaic_gem_print_info(struct drm_printer *p, unsigned int indent,
|
||||
{
|
||||
struct qaic_bo *bo = to_qaic_bo(obj);
|
||||
|
||||
drm_printf_indent(p, indent, "user requested size=%llu\n", bo->size);
|
||||
drm_printf_indent(p, indent, "BO DMA direction %d\n", bo->dir);
|
||||
}
|
||||
|
||||
static const struct vm_operations_struct drm_vm_ops = {
|
||||
@ -623,6 +624,7 @@ static void qaic_free_object(struct drm_gem_object *obj)
|
||||
qaic_free_sgt(bo->sgt);
|
||||
}
|
||||
|
||||
mutex_destroy(&bo->lock);
|
||||
drm_gem_object_release(obj);
|
||||
kfree(bo);
|
||||
}
|
||||
@ -634,6 +636,19 @@ static const struct drm_gem_object_funcs qaic_gem_funcs = {
|
||||
.vm_ops = &drm_vm_ops,
|
||||
};
|
||||
|
||||
static void qaic_init_bo(struct qaic_bo *bo, bool reinit)
|
||||
{
|
||||
if (reinit) {
|
||||
bo->sliced = false;
|
||||
reinit_completion(&bo->xfer_done);
|
||||
} else {
|
||||
mutex_init(&bo->lock);
|
||||
init_completion(&bo->xfer_done);
|
||||
}
|
||||
complete_all(&bo->xfer_done);
|
||||
INIT_LIST_HEAD(&bo->slices);
|
||||
}
|
||||
|
||||
static struct qaic_bo *qaic_alloc_init_bo(void)
|
||||
{
|
||||
struct qaic_bo *bo;
|
||||
@ -642,9 +657,7 @@ static struct qaic_bo *qaic_alloc_init_bo(void)
|
||||
if (!bo)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
INIT_LIST_HEAD(&bo->slices);
|
||||
init_completion(&bo->xfer_done);
|
||||
complete_all(&bo->xfer_done);
|
||||
qaic_init_bo(bo, false);
|
||||
|
||||
return bo;
|
||||
}
|
||||
@ -695,8 +708,6 @@ int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
|
||||
if (ret)
|
||||
goto free_bo;
|
||||
|
||||
bo->size = args->size;
|
||||
|
||||
ret = drm_gem_handle_create(file_priv, obj, &args->handle);
|
||||
if (ret)
|
||||
goto free_sgt;
|
||||
@ -828,7 +839,6 @@ static int qaic_prepare_import_bo(struct qaic_bo *bo, struct qaic_attach_slice_h
|
||||
}
|
||||
|
||||
bo->sgt = sgt;
|
||||
bo->size = hdr->size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -838,7 +848,7 @@ static int qaic_prepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo,
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (bo->size != hdr->size)
|
||||
if (bo->base.size < hdr->size)
|
||||
return -EINVAL;
|
||||
|
||||
ret = dma_map_sgtable(&qdev->pdev->dev, bo->sgt, hdr->dir, 0);
|
||||
@ -857,9 +867,9 @@ static int qaic_prepare_bo(struct qaic_device *qdev, struct qaic_bo *bo,
|
||||
ret = qaic_prepare_import_bo(bo, hdr);
|
||||
else
|
||||
ret = qaic_prepare_export_bo(qdev, bo, hdr);
|
||||
|
||||
if (ret == 0)
|
||||
bo->dir = hdr->dir;
|
||||
bo->dir = hdr->dir;
|
||||
bo->dbc = &qdev->dbc[hdr->dbc_id];
|
||||
bo->nr_slice = hdr->count;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -868,7 +878,6 @@ static void qaic_unprepare_import_bo(struct qaic_bo *bo)
|
||||
{
|
||||
dma_buf_unmap_attachment(bo->base.import_attach, bo->sgt, bo->dir);
|
||||
bo->sgt = NULL;
|
||||
bo->size = 0;
|
||||
}
|
||||
|
||||
static void qaic_unprepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo)
|
||||
@ -884,6 +893,8 @@ static void qaic_unprepare_bo(struct qaic_device *qdev, struct qaic_bo *bo)
|
||||
qaic_unprepare_export_bo(qdev, bo);
|
||||
|
||||
bo->dir = 0;
|
||||
bo->dbc = NULL;
|
||||
bo->nr_slice = 0;
|
||||
}
|
||||
|
||||
static void qaic_free_slices_bo(struct qaic_bo *bo)
|
||||
@ -892,6 +903,9 @@ static void qaic_free_slices_bo(struct qaic_bo *bo)
|
||||
|
||||
list_for_each_entry_safe(slice, temp, &bo->slices, slice)
|
||||
kref_put(&slice->ref_count, free_slice);
|
||||
if (WARN_ON_ONCE(bo->total_slice_nents != 0))
|
||||
bo->total_slice_nents = 0;
|
||||
bo->nr_slice = 0;
|
||||
}
|
||||
|
||||
static int qaic_attach_slicing_bo(struct qaic_device *qdev, struct qaic_bo *bo,
|
||||
@ -908,15 +922,11 @@ static int qaic_attach_slicing_bo(struct qaic_device *qdev, struct qaic_bo *bo,
|
||||
}
|
||||
}
|
||||
|
||||
if (bo->total_slice_nents > qdev->dbc[hdr->dbc_id].nelem) {
|
||||
if (bo->total_slice_nents > bo->dbc->nelem) {
|
||||
qaic_free_slices_bo(bo);
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
bo->sliced = true;
|
||||
bo->nr_slice = hdr->count;
|
||||
list_add_tail(&bo->bo_list, &qdev->dbc[hdr->dbc_id].bo_lists);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -994,10 +1004,13 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
|
||||
}
|
||||
|
||||
bo = to_qaic_bo(obj);
|
||||
ret = mutex_lock_interruptible(&bo->lock);
|
||||
if (ret)
|
||||
goto put_bo;
|
||||
|
||||
if (bo->sliced) {
|
||||
ret = -EINVAL;
|
||||
goto put_bo;
|
||||
goto unlock_bo;
|
||||
}
|
||||
|
||||
dbc = &qdev->dbc[args->hdr.dbc_id];
|
||||
@ -1018,9 +1031,10 @@ int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_fi
|
||||
if (args->hdr.dir == DMA_TO_DEVICE)
|
||||
dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, args->hdr.dir);
|
||||
|
||||
bo->dbc = dbc;
|
||||
bo->sliced = true;
|
||||
list_add_tail(&bo->bo_list, &bo->dbc->bo_lists);
|
||||
srcu_read_unlock(&dbc->ch_lock, rcu_id);
|
||||
drm_gem_object_put(obj);
|
||||
mutex_unlock(&bo->lock);
|
||||
kfree(slice_ent);
|
||||
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
|
||||
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
|
||||
@ -1031,6 +1045,8 @@ unprepare_bo:
|
||||
qaic_unprepare_bo(qdev, bo);
|
||||
unlock_ch_srcu:
|
||||
srcu_read_unlock(&dbc->ch_lock, rcu_id);
|
||||
unlock_bo:
|
||||
mutex_unlock(&bo->lock);
|
||||
put_bo:
|
||||
drm_gem_object_put(obj);
|
||||
free_slice_ent:
|
||||
@ -1185,15 +1201,18 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
|
||||
}
|
||||
|
||||
bo = to_qaic_bo(obj);
|
||||
ret = mutex_lock_interruptible(&bo->lock);
|
||||
if (ret)
|
||||
goto failed_to_send_bo;
|
||||
|
||||
if (!bo->sliced) {
|
||||
ret = -EINVAL;
|
||||
goto failed_to_send_bo;
|
||||
goto unlock_bo;
|
||||
}
|
||||
|
||||
if (is_partial && pexec[i].resize > bo->size) {
|
||||
if (is_partial && pexec[i].resize > bo->base.size) {
|
||||
ret = -EINVAL;
|
||||
goto failed_to_send_bo;
|
||||
goto unlock_bo;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&dbc->xfer_lock, flags);
|
||||
@ -1202,7 +1221,7 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
|
||||
if (queued) {
|
||||
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
|
||||
ret = -EINVAL;
|
||||
goto failed_to_send_bo;
|
||||
goto unlock_bo;
|
||||
}
|
||||
|
||||
bo->req_id = dbc->next_req_id++;
|
||||
@ -1233,17 +1252,20 @@ static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *fil
|
||||
if (ret) {
|
||||
bo->queued = false;
|
||||
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
|
||||
goto failed_to_send_bo;
|
||||
goto unlock_bo;
|
||||
}
|
||||
}
|
||||
reinit_completion(&bo->xfer_done);
|
||||
list_add_tail(&bo->xfer_list, &dbc->xfer_list);
|
||||
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
|
||||
dma_sync_sgtable_for_device(&qdev->pdev->dev, bo->sgt, bo->dir);
|
||||
mutex_unlock(&bo->lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
unlock_bo:
|
||||
mutex_unlock(&bo->lock);
|
||||
failed_to_send_bo:
|
||||
if (likely(obj))
|
||||
drm_gem_object_put(obj);
|
||||
@ -1799,6 +1821,91 @@ unlock_usr_srcu:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void detach_slice_bo(struct qaic_device *qdev, struct qaic_bo *bo)
|
||||
{
|
||||
qaic_free_slices_bo(bo);
|
||||
qaic_unprepare_bo(qdev, bo);
|
||||
qaic_init_bo(bo, true);
|
||||
list_del(&bo->bo_list);
|
||||
drm_gem_object_put(&bo->base);
|
||||
}
|
||||
|
||||
int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
||||
{
|
||||
struct qaic_detach_slice *args = data;
|
||||
int rcu_id, usr_rcu_id, qdev_rcu_id;
|
||||
struct dma_bridge_chan *dbc;
|
||||
struct drm_gem_object *obj;
|
||||
struct qaic_device *qdev;
|
||||
struct qaic_user *usr;
|
||||
unsigned long flags;
|
||||
struct qaic_bo *bo;
|
||||
int ret;
|
||||
|
||||
if (args->pad != 0)
|
||||
return -EINVAL;
|
||||
|
||||
usr = file_priv->driver_priv;
|
||||
usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
|
||||
if (!usr->qddev) {
|
||||
ret = -ENODEV;
|
||||
goto unlock_usr_srcu;
|
||||
}
|
||||
|
||||
qdev = usr->qddev->qdev;
|
||||
qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
|
||||
if (qdev->in_reset) {
|
||||
ret = -ENODEV;
|
||||
goto unlock_dev_srcu;
|
||||
}
|
||||
|
||||
obj = drm_gem_object_lookup(file_priv, args->handle);
|
||||
if (!obj) {
|
||||
ret = -ENOENT;
|
||||
goto unlock_dev_srcu;
|
||||
}
|
||||
|
||||
bo = to_qaic_bo(obj);
|
||||
ret = mutex_lock_interruptible(&bo->lock);
|
||||
if (ret)
|
||||
goto put_bo;
|
||||
|
||||
if (!bo->sliced) {
|
||||
ret = -EINVAL;
|
||||
goto unlock_bo;
|
||||
}
|
||||
|
||||
dbc = bo->dbc;
|
||||
rcu_id = srcu_read_lock(&dbc->ch_lock);
|
||||
if (dbc->usr != usr) {
|
||||
ret = -EINVAL;
|
||||
goto unlock_ch_srcu;
|
||||
}
|
||||
|
||||
/* Check if BO is committed to H/W for DMA */
|
||||
spin_lock_irqsave(&dbc->xfer_lock, flags);
|
||||
if (bo->queued) {
|
||||
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
|
||||
ret = -EBUSY;
|
||||
goto unlock_ch_srcu;
|
||||
}
|
||||
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
|
||||
|
||||
detach_slice_bo(qdev, bo);
|
||||
|
||||
unlock_ch_srcu:
|
||||
srcu_read_unlock(&dbc->ch_lock, rcu_id);
|
||||
unlock_bo:
|
||||
mutex_unlock(&bo->lock);
|
||||
put_bo:
|
||||
drm_gem_object_put(obj);
|
||||
unlock_dev_srcu:
|
||||
srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
|
||||
unlock_usr_srcu:
|
||||
srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *dbc)
|
||||
{
|
||||
unsigned long flags;
|
||||
@ -1810,6 +1917,12 @@ static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *db
|
||||
bo->queued = false;
|
||||
list_del(&bo->xfer_list);
|
||||
spin_unlock_irqrestore(&dbc->xfer_lock, flags);
|
||||
bo->nr_slice_xfer_done = 0;
|
||||
bo->req_id = 0;
|
||||
bo->perf_stats.req_received_ts = 0;
|
||||
bo->perf_stats.req_submit_ts = 0;
|
||||
bo->perf_stats.req_processed_ts = 0;
|
||||
bo->perf_stats.queue_level_before = 0;
|
||||
dma_sync_sgtable_for_cpu(&qdev->pdev->dev, bo->sgt, bo->dir);
|
||||
complete_all(&bo->xfer_done);
|
||||
drm_gem_object_put(&bo->base);
|
||||
@ -1857,7 +1970,6 @@ void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id)
|
||||
|
||||
void release_dbc(struct qaic_device *qdev, u32 dbc_id)
|
||||
{
|
||||
struct bo_slice *slice, *slice_temp;
|
||||
struct qaic_bo *bo, *bo_temp;
|
||||
struct dma_bridge_chan *dbc;
|
||||
|
||||
@ -1875,24 +1987,11 @@ void release_dbc(struct qaic_device *qdev, u32 dbc_id)
|
||||
dbc->usr = NULL;
|
||||
|
||||
list_for_each_entry_safe(bo, bo_temp, &dbc->bo_lists, bo_list) {
|
||||
list_for_each_entry_safe(slice, slice_temp, &bo->slices, slice)
|
||||
kref_put(&slice->ref_count, free_slice);
|
||||
bo->sliced = false;
|
||||
INIT_LIST_HEAD(&bo->slices);
|
||||
bo->total_slice_nents = 0;
|
||||
bo->dir = 0;
|
||||
bo->dbc = NULL;
|
||||
bo->nr_slice = 0;
|
||||
bo->nr_slice_xfer_done = 0;
|
||||
bo->queued = false;
|
||||
bo->req_id = 0;
|
||||
init_completion(&bo->xfer_done);
|
||||
complete_all(&bo->xfer_done);
|
||||
list_del(&bo->bo_list);
|
||||
bo->perf_stats.req_received_ts = 0;
|
||||
bo->perf_stats.req_submit_ts = 0;
|
||||
bo->perf_stats.req_processed_ts = 0;
|
||||
bo->perf_stats.queue_level_before = 0;
|
||||
drm_gem_object_get(&bo->base);
|
||||
mutex_lock(&bo->lock);
|
||||
detach_slice_bo(qdev, bo);
|
||||
mutex_unlock(&bo->lock);
|
||||
drm_gem_object_put(&bo->base);
|
||||
}
|
||||
|
||||
dbc->in_use = false;
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <drm/drm_file.h>
|
||||
#include <drm/drm_gem.h>
|
||||
#include <drm/drm_ioctl.h>
|
||||
#include <drm/drm_managed.h>
|
||||
#include <uapi/drm/qaic_accel.h>
|
||||
|
||||
#include "mhi_controller.h"
|
||||
@ -55,7 +56,7 @@ static void free_usr(struct kref *kref)
|
||||
|
||||
static int qaic_open(struct drm_device *dev, struct drm_file *file)
|
||||
{
|
||||
struct qaic_drm_device *qddev = dev->dev_private;
|
||||
struct qaic_drm_device *qddev = to_qaic_drm_device(dev);
|
||||
struct qaic_device *qdev = qddev->qdev;
|
||||
struct qaic_user *usr;
|
||||
int rcu_id;
|
||||
@ -150,6 +151,7 @@ static const struct drm_ioctl_desc qaic_drm_ioctls[] = {
|
||||
DRM_IOCTL_DEF_DRV(QAIC_PARTIAL_EXECUTE_BO, qaic_partial_execute_bo_ioctl, 0),
|
||||
DRM_IOCTL_DEF_DRV(QAIC_WAIT_BO, qaic_wait_bo_ioctl, 0),
|
||||
DRM_IOCTL_DEF_DRV(QAIC_PERF_STATS_BO, qaic_perf_stats_bo_ioctl, 0),
|
||||
DRM_IOCTL_DEF_DRV(QAIC_DETACH_SLICE_BO, qaic_detach_slice_bo_ioctl, 0),
|
||||
};
|
||||
|
||||
static const struct drm_driver qaic_accel_driver = {
|
||||
@ -170,64 +172,39 @@ static const struct drm_driver qaic_accel_driver = {
|
||||
|
||||
static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id)
|
||||
{
|
||||
struct qaic_drm_device *qddev;
|
||||
struct drm_device *ddev;
|
||||
struct device *pdev;
|
||||
struct qaic_drm_device *qddev = qdev->qddev;
|
||||
struct drm_device *drm = to_drm(qddev);
|
||||
int ret;
|
||||
|
||||
/* Hold off implementing partitions until the uapi is determined */
|
||||
if (partition_id != QAIC_NO_PARTITION)
|
||||
return -EINVAL;
|
||||
|
||||
pdev = &qdev->pdev->dev;
|
||||
|
||||
qddev = kzalloc(sizeof(*qddev), GFP_KERNEL);
|
||||
if (!qddev)
|
||||
return -ENOMEM;
|
||||
|
||||
ddev = drm_dev_alloc(&qaic_accel_driver, pdev);
|
||||
if (IS_ERR(ddev)) {
|
||||
ret = PTR_ERR(ddev);
|
||||
goto ddev_fail;
|
||||
}
|
||||
|
||||
ddev->dev_private = qddev;
|
||||
qddev->ddev = ddev;
|
||||
|
||||
qddev->qdev = qdev;
|
||||
qddev->partition_id = partition_id;
|
||||
INIT_LIST_HEAD(&qddev->users);
|
||||
mutex_init(&qddev->users_mutex);
|
||||
|
||||
qdev->qddev = qddev;
|
||||
/*
|
||||
* drm_dev_unregister() sets the driver data to NULL and
|
||||
* drm_dev_register() does not update the driver data. During a SOC
|
||||
* reset drm dev is unregistered and registered again leaving the
|
||||
* driver data to NULL.
|
||||
*/
|
||||
dev_set_drvdata(to_accel_kdev(qddev), drm->accel);
|
||||
ret = drm_dev_register(drm, 0);
|
||||
if (ret)
|
||||
pci_dbg(qdev->pdev, "drm_dev_register failed %d\n", ret);
|
||||
|
||||
ret = drm_dev_register(ddev, 0);
|
||||
if (ret) {
|
||||
pci_dbg(qdev->pdev, "%s: drm_dev_register failed %d\n", __func__, ret);
|
||||
goto drm_reg_fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
drm_reg_fail:
|
||||
mutex_destroy(&qddev->users_mutex);
|
||||
qdev->qddev = NULL;
|
||||
drm_dev_put(ddev);
|
||||
ddev_fail:
|
||||
kfree(qddev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
|
||||
{
|
||||
struct qaic_drm_device *qddev;
|
||||
struct qaic_drm_device *qddev = qdev->qddev;
|
||||
struct drm_device *drm = to_drm(qddev);
|
||||
struct qaic_user *usr;
|
||||
|
||||
qddev = qdev->qddev;
|
||||
qdev->qddev = NULL;
|
||||
if (!qddev)
|
||||
return;
|
||||
|
||||
drm_dev_get(drm);
|
||||
drm_dev_unregister(drm);
|
||||
qddev->partition_id = 0;
|
||||
/*
|
||||
* Existing users get unresolvable errors till they close FDs.
|
||||
* Need to sync carefully with users calling close(). The
|
||||
@ -254,13 +231,7 @@ static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
|
||||
mutex_lock(&qddev->users_mutex);
|
||||
}
|
||||
mutex_unlock(&qddev->users_mutex);
|
||||
|
||||
if (qddev->ddev) {
|
||||
drm_dev_unregister(qddev->ddev);
|
||||
drm_dev_put(qddev->ddev);
|
||||
}
|
||||
|
||||
kfree(qddev);
|
||||
drm_dev_put(drm);
|
||||
}
|
||||
|
||||
static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
|
||||
@ -344,8 +315,20 @@ void qaic_dev_reset_clean_local_state(struct qaic_device *qdev, bool exit_reset)
|
||||
qdev->in_reset = false;
|
||||
}
|
||||
|
||||
static void cleanup_qdev(struct qaic_device *qdev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < qdev->num_dbc; ++i)
|
||||
cleanup_srcu_struct(&qdev->dbc[i].ch_lock);
|
||||
cleanup_srcu_struct(&qdev->dev_lock);
|
||||
pci_set_drvdata(qdev->pdev, NULL);
|
||||
destroy_workqueue(qdev->cntl_wq);
|
||||
}
|
||||
|
||||
static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
{
|
||||
struct qaic_drm_device *qddev;
|
||||
struct qaic_device *qdev;
|
||||
int i;
|
||||
|
||||
@ -381,20 +364,20 @@ static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_de
|
||||
INIT_LIST_HEAD(&qdev->dbc[i].bo_lists);
|
||||
}
|
||||
|
||||
qddev = devm_drm_dev_alloc(&pdev->dev, &qaic_accel_driver, struct qaic_drm_device, drm);
|
||||
if (IS_ERR(qddev)) {
|
||||
cleanup_qdev(qdev);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
drmm_mutex_init(to_drm(qddev), &qddev->users_mutex);
|
||||
INIT_LIST_HEAD(&qddev->users);
|
||||
qddev->qdev = qdev;
|
||||
qdev->qddev = qddev;
|
||||
|
||||
return qdev;
|
||||
}
|
||||
|
||||
static void cleanup_qdev(struct qaic_device *qdev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < qdev->num_dbc; ++i)
|
||||
cleanup_srcu_struct(&qdev->dbc[i].ch_lock);
|
||||
cleanup_srcu_struct(&qdev->dev_lock);
|
||||
pci_set_drvdata(qdev->pdev, NULL);
|
||||
destroy_workqueue(qdev->cntl_wq);
|
||||
}
|
||||
|
||||
static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
|
||||
{
|
||||
int bars;
|
||||
@ -591,22 +574,22 @@ static int __init qaic_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = mhi_driver_register(&qaic_mhi_driver);
|
||||
if (ret) {
|
||||
pr_debug("qaic: mhi_driver_register failed %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = pci_register_driver(&qaic_pci_driver);
|
||||
if (ret) {
|
||||
pr_debug("qaic: pci_register_driver failed %d\n", ret);
|
||||
goto free_mhi;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = mhi_driver_register(&qaic_mhi_driver);
|
||||
if (ret) {
|
||||
pr_debug("qaic: mhi_driver_register failed %d\n", ret);
|
||||
goto free_pci;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
free_mhi:
|
||||
mhi_driver_unregister(&qaic_mhi_driver);
|
||||
free_pci:
|
||||
pci_unregister_driver(&qaic_pci_driver);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -628,8 +611,8 @@ static void __exit qaic_exit(void)
|
||||
* reinitializing the link_up state after the cleanup is done.
|
||||
*/
|
||||
link_up = true;
|
||||
pci_unregister_driver(&qaic_pci_driver);
|
||||
mhi_driver_unregister(&qaic_mhi_driver);
|
||||
pci_unregister_driver(&qaic_pci_driver);
|
||||
}
|
||||
|
||||
module_init(qaic_init);
|
||||
|
@ -216,6 +216,13 @@ config DRM_EXEC
|
||||
help
|
||||
Execution context for command submissions
|
||||
|
||||
config DRM_GPUVM
|
||||
tristate
|
||||
depends on DRM
|
||||
help
|
||||
GPU-VM representation providing helpers to manage a GPUs virtual
|
||||
address space
|
||||
|
||||
config DRM_BUDDY
|
||||
tristate
|
||||
depends on DRM
|
||||
|
@ -45,7 +45,6 @@ drm-y := \
|
||||
drm_vblank.o \
|
||||
drm_vblank_work.o \
|
||||
drm_vma_manager.o \
|
||||
drm_gpuva_mgr.o \
|
||||
drm_writeback.o
|
||||
drm-$(CONFIG_DRM_LEGACY) += \
|
||||
drm_agpsupport.o \
|
||||
@ -81,6 +80,7 @@ obj-$(CONFIG_DRM_PANEL_ORIENTATION_QUIRKS) += drm_panel_orientation_quirks.o
|
||||
#
|
||||
#
|
||||
obj-$(CONFIG_DRM_EXEC) += drm_exec.o
|
||||
obj-$(CONFIG_DRM_GPUVM) += drm_gpuvm.o
|
||||
|
||||
obj-$(CONFIG_DRM_BUDDY) += drm_buddy.o
|
||||
|
||||
|
@ -962,6 +962,7 @@ static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
|
||||
list_for_each_entry(file, &dev->filelist, lhead) {
|
||||
struct task_struct *task;
|
||||
struct drm_gem_object *gobj;
|
||||
struct pid *pid;
|
||||
int id;
|
||||
|
||||
/*
|
||||
@ -971,8 +972,9 @@ static int amdgpu_debugfs_gem_info_show(struct seq_file *m, void *unused)
|
||||
* Therefore, we need to protect this ->comm access using RCU.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
task = pid_task(file->pid, PIDTYPE_TGID);
|
||||
seq_printf(m, "pid %8d command %s:\n", pid_nr(file->pid),
|
||||
pid = rcu_dereference(file->pid);
|
||||
task = pid_task(pid, PIDTYPE_TGID);
|
||||
seq_printf(m, "pid %8d command %s:\n", pid_nr(pid),
|
||||
task ? task->comm : "<unknown>");
|
||||
rcu_read_unlock();
|
||||
|
||||
|
@ -424,9 +424,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
||||
const struct ttm_place *place,
|
||||
struct ttm_resource **res)
|
||||
{
|
||||
u64 vis_usage = 0, max_bytes, cur_size, min_block_size;
|
||||
struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
|
||||
struct amdgpu_device *adev = to_amdgpu_device(mgr);
|
||||
u64 vis_usage = 0, max_bytes, min_block_size;
|
||||
struct amdgpu_vram_mgr_resource *vres;
|
||||
u64 size, remaining_size, lpfn, fpfn;
|
||||
struct drm_buddy *mm = &mgr->mm;
|
||||
@ -474,6 +474,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
||||
if (place->flags & TTM_PL_FLAG_TOPDOWN)
|
||||
vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
|
||||
|
||||
if (place->flags & TTM_PL_FLAG_CONTIGUOUS)
|
||||
vres->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
|
||||
|
||||
if (fpfn || lpfn != mgr->mm.size)
|
||||
/* Allocate blocks in desired range */
|
||||
vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
|
||||
@ -496,25 +499,6 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
||||
!(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
|
||||
min_block_size = (u64)pages_per_block << PAGE_SHIFT;
|
||||
|
||||
cur_size = size;
|
||||
|
||||
if (fpfn + size != (u64)place->lpfn << PAGE_SHIFT) {
|
||||
/*
|
||||
* Except for actual range allocation, modify the size and
|
||||
* min_block_size conforming to continuous flag enablement
|
||||
*/
|
||||
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
|
||||
size = roundup_pow_of_two(size);
|
||||
min_block_size = size;
|
||||
/*
|
||||
* Modify the size value if size is not
|
||||
* aligned with min_block_size
|
||||
*/
|
||||
} else if (!IS_ALIGNED(size, min_block_size)) {
|
||||
size = round_up(size, min_block_size);
|
||||
}
|
||||
}
|
||||
|
||||
r = drm_buddy_alloc_blocks(mm, fpfn,
|
||||
lpfn,
|
||||
size,
|
||||
@ -531,40 +515,6 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
|
||||
}
|
||||
mutex_unlock(&mgr->lock);
|
||||
|
||||
if (cur_size != size) {
|
||||
struct drm_buddy_block *block;
|
||||
struct list_head *trim_list;
|
||||
u64 original_size;
|
||||
LIST_HEAD(temp);
|
||||
|
||||
trim_list = &vres->blocks;
|
||||
original_size = (u64)vres->base.size;
|
||||
|
||||
/*
|
||||
* If size value is rounded up to min_block_size, trim the last
|
||||
* block to the required size
|
||||
*/
|
||||
if (!list_is_singular(&vres->blocks)) {
|
||||
block = list_last_entry(&vres->blocks, typeof(*block), link);
|
||||
list_move_tail(&block->link, &temp);
|
||||
trim_list = &temp;
|
||||
/*
|
||||
* Compute the original_size value by subtracting the
|
||||
* last block size with (aligned size - original size)
|
||||
*/
|
||||
original_size = amdgpu_vram_mgr_block_size(block) - (size - cur_size);
|
||||
}
|
||||
|
||||
mutex_lock(&mgr->lock);
|
||||
drm_buddy_block_trim(mm,
|
||||
original_size,
|
||||
trim_list);
|
||||
mutex_unlock(&mgr->lock);
|
||||
|
||||
if (!list_empty(&temp))
|
||||
list_splice_tail(trim_list, &vres->blocks);
|
||||
}
|
||||
|
||||
vres->base.start = 0;
|
||||
list_for_each_entry(block, &vres->blocks, link) {
|
||||
unsigned long start;
|
||||
|
@ -343,7 +343,7 @@ bool dm_helpers_dp_mst_send_payload_allocation(
|
||||
struct amdgpu_dm_connector *aconnector;
|
||||
struct drm_dp_mst_topology_state *mst_state;
|
||||
struct drm_dp_mst_topology_mgr *mst_mgr;
|
||||
struct drm_dp_mst_atomic_payload *new_payload, *old_payload;
|
||||
struct drm_dp_mst_atomic_payload *new_payload, old_payload;
|
||||
enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD;
|
||||
enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD;
|
||||
int ret = 0;
|
||||
@ -367,8 +367,8 @@ bool dm_helpers_dp_mst_send_payload_allocation(
|
||||
ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, new_payload);
|
||||
} else {
|
||||
dm_helpers_construct_old_payload(stream->link, mst_state->pbn_div,
|
||||
new_payload, old_payload);
|
||||
drm_dp_remove_payload_part2(mst_mgr, mst_state, old_payload, new_payload);
|
||||
new_payload, &old_payload);
|
||||
drm_dp_remove_payload_part2(mst_mgr, mst_state, &old_payload, new_payload);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
|
@ -521,7 +521,7 @@ static struct komeda_format_caps d71_format_caps_table[] = {
|
||||
{__HW_ID(5, 1), DRM_FORMAT_YUYV, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH}, /* afbc */
|
||||
{__HW_ID(5, 2), DRM_FORMAT_YUYV, RICH, Flip_H_V, 0, 0},
|
||||
{__HW_ID(5, 3), DRM_FORMAT_UYVY, RICH, Flip_H_V, 0, 0},
|
||||
{__HW_ID(5, 6), DRM_FORMAT_NV12, RICH, Flip_H_V, 0, 0},
|
||||
{__HW_ID(5, 6), DRM_FORMAT_NV12, RICH_WB, Flip_H_V, 0, 0},
|
||||
{__HW_ID(5, 6), DRM_FORMAT_YUV420_8BIT, RICH, Rot_ALL_H_V, LYT_NM, AFB_TH}, /* afbc */
|
||||
{__HW_ID(5, 7), DRM_FORMAT_YUV420, RICH, Flip_H_V, 0, 0},
|
||||
/* YUV 10bit*/
|
||||
|
@ -45,6 +45,14 @@ static void komeda_platform_remove(struct platform_device *pdev)
|
||||
devm_kfree(dev, mdrv);
|
||||
}
|
||||
|
||||
static void komeda_platform_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct komeda_drv *mdrv = dev_get_drvdata(dev);
|
||||
|
||||
komeda_kms_shutdown(mdrv->kms);
|
||||
}
|
||||
|
||||
static int komeda_platform_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
@ -142,6 +150,7 @@ static const struct dev_pm_ops komeda_pm_ops = {
|
||||
static struct platform_driver komeda_platform_driver = {
|
||||
.probe = komeda_platform_probe,
|
||||
.remove_new = komeda_platform_remove,
|
||||
.shutdown = komeda_platform_shutdown,
|
||||
.driver = {
|
||||
.name = "komeda",
|
||||
.of_match_table = komeda_of_match,
|
||||
|
@ -340,3 +340,10 @@ void komeda_kms_detach(struct komeda_kms_dev *kms)
|
||||
komeda_kms_cleanup_private_objs(kms);
|
||||
drm->dev_private = NULL;
|
||||
}
|
||||
|
||||
void komeda_kms_shutdown(struct komeda_kms_dev *kms)
|
||||
{
|
||||
struct drm_device *drm = &kms->base;
|
||||
|
||||
drm_atomic_helper_shutdown(drm);
|
||||
}
|
||||
|
@ -190,5 +190,6 @@ void komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc,
|
||||
|
||||
struct komeda_kms_dev *komeda_kms_attach(struct komeda_dev *mdev);
|
||||
void komeda_kms_detach(struct komeda_kms_dev *kms);
|
||||
void komeda_kms_shutdown(struct komeda_kms_dev *kms);
|
||||
|
||||
#endif /*_KOMEDA_KMS_H_*/
|
||||
|
@ -372,6 +372,11 @@ static void hdlcd_remove(struct platform_device *pdev)
|
||||
component_master_del(&pdev->dev, &hdlcd_master_ops);
|
||||
}
|
||||
|
||||
static void hdlcd_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
|
||||
}
|
||||
|
||||
static const struct of_device_id hdlcd_of_match[] = {
|
||||
{ .compatible = "arm,hdlcd" },
|
||||
{},
|
||||
@ -399,6 +404,7 @@ static SIMPLE_DEV_PM_OPS(hdlcd_pm_ops, hdlcd_pm_suspend, hdlcd_pm_resume);
|
||||
static struct platform_driver hdlcd_platform_driver = {
|
||||
.probe = hdlcd_probe,
|
||||
.remove_new = hdlcd_remove,
|
||||
.shutdown = hdlcd_shutdown,
|
||||
.driver = {
|
||||
.name = "hdlcd",
|
||||
.pm = &hdlcd_pm_ops,
|
||||
|
@ -941,6 +941,11 @@ static void malidp_platform_remove(struct platform_device *pdev)
|
||||
component_master_del(&pdev->dev, &malidp_master_ops);
|
||||
}
|
||||
|
||||
static void malidp_platform_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
|
||||
}
|
||||
|
||||
static int __maybe_unused malidp_pm_suspend(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm = dev_get_drvdata(dev);
|
||||
@ -982,6 +987,7 @@ static const struct dev_pm_ops malidp_pm_ops = {
|
||||
static struct platform_driver malidp_platform_driver = {
|
||||
.probe = malidp_platform_probe,
|
||||
.remove_new = malidp_platform_remove,
|
||||
.shutdown = malidp_platform_shutdown,
|
||||
.driver = {
|
||||
.name = "mali-dp",
|
||||
.pm = &malidp_pm_ops,
|
||||
|
@ -148,6 +148,7 @@ static int armada_drm_bind(struct device *dev)
|
||||
err_kms:
|
||||
drm_mode_config_cleanup(&priv->drm);
|
||||
drm_mm_takedown(&priv->linear);
|
||||
dev_set_drvdata(dev, NULL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -166,6 +167,7 @@ static void armada_drm_unbind(struct device *dev)
|
||||
|
||||
drm_mode_config_cleanup(&priv->drm);
|
||||
drm_mm_takedown(&priv->linear);
|
||||
dev_set_drvdata(dev, NULL);
|
||||
}
|
||||
|
||||
static void armada_add_endpoints(struct device *dev,
|
||||
@ -230,6 +232,11 @@ static int armada_drm_remove(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void armada_drm_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
|
||||
}
|
||||
|
||||
static const struct platform_device_id armada_drm_platform_ids[] = {
|
||||
{
|
||||
.name = "armada-drm",
|
||||
@ -243,6 +250,7 @@ MODULE_DEVICE_TABLE(platform, armada_drm_platform_ids);
|
||||
static struct platform_driver armada_drm_platform_driver = {
|
||||
.probe = armada_drm_probe,
|
||||
.remove = armada_drm_remove,
|
||||
.shutdown = armada_drm_shutdown,
|
||||
.driver = {
|
||||
.name = "armada-drm",
|
||||
},
|
||||
|
@ -358,11 +358,18 @@ static void aspeed_gfx_remove(struct platform_device *pdev)
|
||||
sysfs_remove_group(&pdev->dev.kobj, &aspeed_sysfs_attr_group);
|
||||
drm_dev_unregister(drm);
|
||||
aspeed_gfx_unload(drm);
|
||||
drm_atomic_helper_shutdown(drm);
|
||||
}
|
||||
|
||||
static void aspeed_gfx_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
|
||||
}
|
||||
|
||||
static struct platform_driver aspeed_gfx_platform_driver = {
|
||||
.probe = aspeed_gfx_probe,
|
||||
.remove_new = aspeed_gfx_remove,
|
||||
.shutdown = aspeed_gfx_shutdown,
|
||||
.driver = {
|
||||
.name = "aspeed_gfx",
|
||||
.of_match_table = aspeed_gfx_match,
|
||||
|
@ -125,6 +125,11 @@ static void ast_pci_remove(struct pci_dev *pdev)
|
||||
drm_atomic_helper_shutdown(dev);
|
||||
}
|
||||
|
||||
static void ast_pci_shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
drm_atomic_helper_shutdown(pci_get_drvdata(pdev));
|
||||
}
|
||||
|
||||
static int ast_drm_freeze(struct drm_device *dev)
|
||||
{
|
||||
int error;
|
||||
@ -209,6 +214,7 @@ static struct pci_driver ast_pci_driver = {
|
||||
.id_table = ast_pciidlist,
|
||||
.probe = ast_pci_probe,
|
||||
.remove = ast_pci_remove,
|
||||
.shutdown = ast_pci_shutdown,
|
||||
.driver.pm = &ast_pm_ops,
|
||||
};
|
||||
|
||||
|
@ -782,6 +782,11 @@ static void atmel_hlcdc_dc_drm_remove(struct platform_device *pdev)
|
||||
drm_dev_put(ddev);
|
||||
}
|
||||
|
||||
static void atmel_hlcdc_dc_drm_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
|
||||
}
|
||||
|
||||
static int atmel_hlcdc_dc_drm_suspend(struct device *dev)
|
||||
{
|
||||
struct drm_device *drm_dev = dev_get_drvdata(dev);
|
||||
@ -825,6 +830,7 @@ static const struct of_device_id atmel_hlcdc_dc_of_match[] = {
|
||||
static struct platform_driver atmel_hlcdc_dc_platform_driver = {
|
||||
.probe = atmel_hlcdc_dc_drm_probe,
|
||||
.remove_new = atmel_hlcdc_dc_drm_remove,
|
||||
.shutdown = atmel_hlcdc_dc_drm_shutdown,
|
||||
.driver = {
|
||||
.name = "atmel-hlcdc-display-controller",
|
||||
.pm = pm_sleep_ptr(&atmel_hlcdc_dc_drm_pm_ops),
|
||||
|
@ -1211,6 +1211,7 @@ static const u16 anx78xx_chipid_list[] = {
|
||||
0x7808,
|
||||
0x7812,
|
||||
0x7814,
|
||||
0x7816,
|
||||
0x7818,
|
||||
};
|
||||
|
||||
@ -1369,6 +1370,7 @@ static const struct of_device_id anx78xx_match_table[] = {
|
||||
{ .compatible = "analogix,anx7808", .data = anx7808_i2c_addresses },
|
||||
{ .compatible = "analogix,anx7812", .data = anx781x_i2c_addresses },
|
||||
{ .compatible = "analogix,anx7814", .data = anx781x_i2c_addresses },
|
||||
{ .compatible = "analogix,anx7816", .data = anx781x_i2c_addresses },
|
||||
{ .compatible = "analogix,anx7818", .data = anx781x_i2c_addresses },
|
||||
{ /* sentinel */ },
|
||||
};
|
||||
|
@ -1464,6 +1464,9 @@ static int _anx7625_hpd_polling(struct anx7625_data *ctx,
|
||||
if (ctx->pdata.intp_irq)
|
||||
return 0;
|
||||
|
||||
/* Delay 200ms for FW HPD de-bounce */
|
||||
msleep(200);
|
||||
|
||||
ret = readx_poll_timeout(anx7625_read_hpd_status_p0,
|
||||
ctx, val,
|
||||
((val & HPD_STATUS) || (val < 0)),
|
||||
|
@ -884,14 +884,14 @@ static struct edid *it66121_bridge_get_edid(struct drm_bridge *bridge,
|
||||
mutex_lock(&ctx->lock);
|
||||
ret = it66121_preamble_ddc(ctx);
|
||||
if (ret) {
|
||||
edid = ERR_PTR(ret);
|
||||
edid = NULL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
ret = regmap_write(ctx->regmap, IT66121_DDC_HEADER_REG,
|
||||
IT66121_DDC_HEADER_EDID);
|
||||
if (ret) {
|
||||
edid = ERR_PTR(ret);
|
||||
edid = NULL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
|
@ -67,14 +67,6 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
|
||||
struct drm_device *drm_dev = bridge->dev;
|
||||
int ret;
|
||||
|
||||
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
|
||||
return 0;
|
||||
|
||||
if (!bridge->encoder) {
|
||||
DRM_ERROR("Missing encoder\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
panel_bridge->link = device_link_add(drm_dev->dev, panel->dev,
|
||||
DL_FLAG_STATELESS);
|
||||
if (!panel_bridge->link) {
|
||||
@ -83,6 +75,15 @@ static int panel_bridge_attach(struct drm_bridge *bridge,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
|
||||
return 0;
|
||||
|
||||
if (!bridge->encoder) {
|
||||
DRM_ERROR("Missing encoder\n");
|
||||
device_link_del(panel_bridge->link);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
drm_connector_helper_add(connector,
|
||||
&panel_bridge_connector_helper_funcs);
|
||||
|
||||
|
@ -145,6 +145,10 @@ static irqreturn_t dw_hdmi_cec_hardirq(int irq, void *data)
|
||||
cec->tx_status = CEC_TX_STATUS_NACK;
|
||||
cec->tx_done = true;
|
||||
ret = IRQ_WAKE_THREAD;
|
||||
} else if (stat & CEC_STAT_ARBLOST) {
|
||||
cec->tx_status = CEC_TX_STATUS_ARB_LOST;
|
||||
cec->tx_done = true;
|
||||
ret = IRQ_WAKE_THREAD;
|
||||
}
|
||||
|
||||
if (stat & CEC_STAT_EOM) {
|
||||
@ -209,7 +213,7 @@ static int dw_hdmi_cec_enable(struct cec_adapter *adap, bool enable)
|
||||
cec->ops->enable(cec->hdmi);
|
||||
|
||||
irqs = CEC_STAT_ERROR_INIT | CEC_STAT_NACK | CEC_STAT_EOM |
|
||||
CEC_STAT_DONE;
|
||||
CEC_STAT_ARBLOST | CEC_STAT_DONE;
|
||||
dw_hdmi_write(cec, irqs, HDMI_CEC_POLARITY);
|
||||
dw_hdmi_write(cec, ~irqs, HDMI_CEC_MASK);
|
||||
dw_hdmi_write(cec, ~irqs, HDMI_IH_MUTE_CEC_STAT0);
|
||||
|
@ -9,12 +9,14 @@
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/math64.h>
|
||||
#include <linux/media-bus-format.h>
|
||||
#include <linux/minmax.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/units.h>
|
||||
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
@ -156,6 +158,7 @@ struct tc358768_priv {
|
||||
u32 frs; /* PLL Freqency range for HSCK (post divider) */
|
||||
|
||||
u32 dsiclk; /* pll_clk / 2 */
|
||||
u32 pclk; /* incoming pclk rate */
|
||||
};
|
||||
|
||||
static inline struct tc358768_priv *dsi_host_to_tc358768(struct mipi_dsi_host
|
||||
@ -216,6 +219,10 @@ static void tc358768_update_bits(struct tc358768_priv *priv, u32 reg, u32 mask,
|
||||
u32 tmp, orig;
|
||||
|
||||
tc358768_read(priv, reg, &orig);
|
||||
|
||||
if (priv->error)
|
||||
return;
|
||||
|
||||
tmp = orig & ~mask;
|
||||
tmp |= val & mask;
|
||||
if (tmp != orig)
|
||||
@ -312,7 +319,7 @@ static int tc358768_calc_pll(struct tc358768_priv *priv,
|
||||
|
||||
target_pll = tc358768_pclk_to_pll(priv, mode->clock * 1000);
|
||||
|
||||
/* pll_clk = RefClk * [(FBD + 1)/ (PRD + 1)] * [1 / (2^FRS)] */
|
||||
/* pll_clk = RefClk * FBD / PRD * (1 / (2^FRS)) */
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(frs_limits); i++)
|
||||
if (target_pll >= frs_limits[i])
|
||||
@ -332,19 +339,19 @@ static int tc358768_calc_pll(struct tc358768_priv *priv,
|
||||
best_prd = 0;
|
||||
best_fbd = 0;
|
||||
|
||||
for (prd = 0; prd < 16; ++prd) {
|
||||
u32 divisor = (prd + 1) * (1 << frs);
|
||||
for (prd = 1; prd <= 16; ++prd) {
|
||||
u32 divisor = prd * (1 << frs);
|
||||
u32 fbd;
|
||||
|
||||
for (fbd = 0; fbd < 512; ++fbd) {
|
||||
for (fbd = 1; fbd <= 512; ++fbd) {
|
||||
u32 pll, diff, pll_in;
|
||||
|
||||
pll = (u32)div_u64((u64)refclk * (fbd + 1), divisor);
|
||||
pll = (u32)div_u64((u64)refclk * fbd, divisor);
|
||||
|
||||
if (pll >= max_pll || pll < min_pll)
|
||||
continue;
|
||||
|
||||
pll_in = (u32)div_u64((u64)refclk, prd + 1);
|
||||
pll_in = (u32)div_u64((u64)refclk, prd);
|
||||
if (pll_in < 4000000)
|
||||
continue;
|
||||
|
||||
@ -375,6 +382,7 @@ found:
|
||||
priv->prd = best_prd;
|
||||
priv->frs = frs;
|
||||
priv->dsiclk = best_pll / 2;
|
||||
priv->pclk = mode->clock * 1000;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -600,14 +608,14 @@ static int tc358768_setup_pll(struct tc358768_priv *priv,
|
||||
|
||||
dev_dbg(priv->dev, "PLL: refclk %lu, fbd %u, prd %u, frs %u\n",
|
||||
clk_get_rate(priv->refclk), fbd, prd, frs);
|
||||
dev_dbg(priv->dev, "PLL: pll_clk: %u, DSIClk %u, DSIByteClk %u\n",
|
||||
dev_dbg(priv->dev, "PLL: pll_clk: %u, DSIClk %u, HSByteClk %u\n",
|
||||
priv->dsiclk * 2, priv->dsiclk, priv->dsiclk / 4);
|
||||
dev_dbg(priv->dev, "PLL: pclk %u (panel: %u)\n",
|
||||
tc358768_pll_to_pclk(priv, priv->dsiclk * 2),
|
||||
mode->clock * 1000);
|
||||
|
||||
/* PRD[15:12] FBD[8:0] */
|
||||
tc358768_write(priv, TC358768_PLLCTL0, (prd << 12) | fbd);
|
||||
tc358768_write(priv, TC358768_PLLCTL0, ((prd - 1) << 12) | (fbd - 1));
|
||||
|
||||
/* FRS[11:10] LBWS[9:8] CKEN[4] RESETB[1] EN[0] */
|
||||
tc358768_write(priv, TC358768_PLLCTL1,
|
||||
@ -623,15 +631,36 @@ static int tc358768_setup_pll(struct tc358768_priv *priv,
|
||||
return tc358768_clear_error(priv);
|
||||
}
|
||||
|
||||
#define TC358768_PRECISION 1000
|
||||
static u32 tc358768_ns_to_cnt(u32 ns, u32 period_nsk)
|
||||
static u32 tc358768_ns_to_cnt(u32 ns, u32 period_ps)
|
||||
{
|
||||
return (ns * TC358768_PRECISION + period_nsk) / period_nsk;
|
||||
return DIV_ROUND_UP(ns * 1000, period_ps);
|
||||
}
|
||||
|
||||
static u32 tc358768_to_ns(u32 nsk)
|
||||
static u32 tc358768_ps_to_ns(u32 ps)
|
||||
{
|
||||
return (nsk / TC358768_PRECISION);
|
||||
return ps / 1000;
|
||||
}
|
||||
|
||||
static u32 tc358768_dpi_to_ns(u32 val, u32 pclk)
|
||||
{
|
||||
return (u32)div_u64((u64)val * NANO, pclk);
|
||||
}
|
||||
|
||||
/* Convert value in DPI pixel clock units to DSI byte count */
|
||||
static u32 tc358768_dpi_to_dsi_bytes(struct tc358768_priv *priv, u32 val)
|
||||
{
|
||||
u64 m = (u64)val * priv->dsiclk / 4 * priv->dsi_lanes;
|
||||
u64 n = priv->pclk;
|
||||
|
||||
return (u32)div_u64(m + n - 1, n);
|
||||
}
|
||||
|
||||
static u32 tc358768_dsi_bytes_to_ns(struct tc358768_priv *priv, u32 val)
|
||||
{
|
||||
u64 m = (u64)val * NANO;
|
||||
u64 n = priv->dsiclk / 4 * priv->dsi_lanes;
|
||||
|
||||
return (u32)div_u64(m, n);
|
||||
}
|
||||
|
||||
static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
|
||||
@ -642,13 +671,23 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
|
||||
u32 val, val2, lptxcnt, hact, data_type;
|
||||
s32 raw_val;
|
||||
const struct drm_display_mode *mode;
|
||||
u32 dsibclk_nsk, dsiclk_nsk, ui_nsk;
|
||||
u32 dsiclk, dsibclk, video_start;
|
||||
const u32 internal_delay = 40;
|
||||
u32 hsbyteclk_ps, dsiclk_ps, ui_ps;
|
||||
u32 dsiclk, hsbyteclk;
|
||||
int ret, i;
|
||||
struct videomode vm;
|
||||
struct device *dev = priv->dev;
|
||||
/* In pixelclock units */
|
||||
u32 dpi_htot, dpi_data_start;
|
||||
/* In byte units */
|
||||
u32 dsi_dpi_htot, dsi_dpi_data_start;
|
||||
u32 dsi_hsw, dsi_hbp, dsi_hact, dsi_hfp;
|
||||
const u32 dsi_hss = 4; /* HSS is a short packet (4 bytes) */
|
||||
/* In hsbyteclk units */
|
||||
u32 dsi_vsdly;
|
||||
const u32 internal_dly = 40;
|
||||
|
||||
if (mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) {
|
||||
dev_warn_once(priv->dev, "Non-continuous mode unimplemented, falling back to continuous\n");
|
||||
dev_warn_once(dev, "Non-continuous mode unimplemented, falling back to continuous\n");
|
||||
mode_flags &= ~MIPI_DSI_CLOCK_NON_CONTINUOUS;
|
||||
}
|
||||
|
||||
@ -656,7 +695,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
|
||||
|
||||
ret = tc358768_sw_reset(priv);
|
||||
if (ret) {
|
||||
dev_err(priv->dev, "Software reset failed: %d\n", ret);
|
||||
dev_err(dev, "Software reset failed: %d\n", ret);
|
||||
tc358768_hw_disable(priv);
|
||||
return;
|
||||
}
|
||||
@ -664,53 +703,194 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
|
||||
mode = &bridge->encoder->crtc->state->adjusted_mode;
|
||||
ret = tc358768_setup_pll(priv, mode);
|
||||
if (ret) {
|
||||
dev_err(priv->dev, "PLL setup failed: %d\n", ret);
|
||||
dev_err(dev, "PLL setup failed: %d\n", ret);
|
||||
tc358768_hw_disable(priv);
|
||||
return;
|
||||
}
|
||||
|
||||
drm_display_mode_to_videomode(mode, &vm);
|
||||
|
||||
dsiclk = priv->dsiclk;
|
||||
dsibclk = dsiclk / 4;
|
||||
hsbyteclk = dsiclk / 4;
|
||||
|
||||
/* Data Format Control Register */
|
||||
val = BIT(2) | BIT(1) | BIT(0); /* rdswap_en | dsitx_en | txdt_en */
|
||||
switch (dsi_dev->format) {
|
||||
case MIPI_DSI_FMT_RGB888:
|
||||
val |= (0x3 << 4);
|
||||
hact = mode->hdisplay * 3;
|
||||
video_start = (mode->htotal - mode->hsync_start) * 3;
|
||||
hact = vm.hactive * 3;
|
||||
data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
|
||||
break;
|
||||
case MIPI_DSI_FMT_RGB666:
|
||||
val |= (0x4 << 4);
|
||||
hact = mode->hdisplay * 3;
|
||||
video_start = (mode->htotal - mode->hsync_start) * 3;
|
||||
hact = vm.hactive * 3;
|
||||
data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
|
||||
break;
|
||||
|
||||
case MIPI_DSI_FMT_RGB666_PACKED:
|
||||
val |= (0x4 << 4) | BIT(3);
|
||||
hact = mode->hdisplay * 18 / 8;
|
||||
video_start = (mode->htotal - mode->hsync_start) * 18 / 8;
|
||||
hact = vm.hactive * 18 / 8;
|
||||
data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
|
||||
break;
|
||||
|
||||
case MIPI_DSI_FMT_RGB565:
|
||||
val |= (0x5 << 4);
|
||||
hact = mode->hdisplay * 2;
|
||||
video_start = (mode->htotal - mode->hsync_start) * 2;
|
||||
hact = vm.hactive * 2;
|
||||
data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
|
||||
break;
|
||||
default:
|
||||
dev_err(priv->dev, "Invalid data format (%u)\n",
|
||||
dev_err(dev, "Invalid data format (%u)\n",
|
||||
dsi_dev->format);
|
||||
tc358768_hw_disable(priv);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* There are three important things to make TC358768 work correctly,
|
||||
* which are not trivial to manage:
|
||||
*
|
||||
* 1. Keep the DPI line-time and the DSI line-time as close to each
|
||||
* other as possible.
|
||||
* 2. TC358768 goes to LP mode after each line's active area. The DSI
|
||||
* HFP period has to be long enough for entering and exiting LP mode.
|
||||
* But it is not clear how to calculate this.
|
||||
* 3. VSDly (video start delay) has to be long enough to ensure that the
|
||||
* DSI TX does not start transmitting until we have started receiving
|
||||
* pixel data from the DPI input. It is not clear how to calculate
|
||||
* this either.
|
||||
*/
|
||||
|
||||
dpi_htot = vm.hactive + vm.hfront_porch + vm.hsync_len + vm.hback_porch;
|
||||
dpi_data_start = vm.hsync_len + vm.hback_porch;
|
||||
|
||||
dev_dbg(dev, "dpi horiz timing (pclk): %u + %u + %u + %u = %u\n",
|
||||
vm.hsync_len, vm.hback_porch, vm.hactive, vm.hfront_porch,
|
||||
dpi_htot);
|
||||
|
||||
dev_dbg(dev, "dpi horiz timing (ns): %u + %u + %u + %u = %u\n",
|
||||
tc358768_dpi_to_ns(vm.hsync_len, vm.pixelclock),
|
||||
tc358768_dpi_to_ns(vm.hback_porch, vm.pixelclock),
|
||||
tc358768_dpi_to_ns(vm.hactive, vm.pixelclock),
|
||||
tc358768_dpi_to_ns(vm.hfront_porch, vm.pixelclock),
|
||||
tc358768_dpi_to_ns(dpi_htot, vm.pixelclock));
|
||||
|
||||
dev_dbg(dev, "dpi data start (ns): %u + %u = %u\n",
|
||||
tc358768_dpi_to_ns(vm.hsync_len, vm.pixelclock),
|
||||
tc358768_dpi_to_ns(vm.hback_porch, vm.pixelclock),
|
||||
tc358768_dpi_to_ns(dpi_data_start, vm.pixelclock));
|
||||
|
||||
dsi_dpi_htot = tc358768_dpi_to_dsi_bytes(priv, dpi_htot);
|
||||
dsi_dpi_data_start = tc358768_dpi_to_dsi_bytes(priv, dpi_data_start);
|
||||
|
||||
if (dsi_dev->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
|
||||
dsi_hsw = tc358768_dpi_to_dsi_bytes(priv, vm.hsync_len);
|
||||
dsi_hbp = tc358768_dpi_to_dsi_bytes(priv, vm.hback_porch);
|
||||
} else {
|
||||
/* HBP is included in HSW in event mode */
|
||||
dsi_hbp = 0;
|
||||
dsi_hsw = tc358768_dpi_to_dsi_bytes(priv,
|
||||
vm.hsync_len +
|
||||
vm.hback_porch);
|
||||
|
||||
/*
|
||||
* The pixel packet includes the actual pixel data, and:
|
||||
* DSI packet header = 4 bytes
|
||||
* DCS code = 1 byte
|
||||
* DSI packet footer = 2 bytes
|
||||
*/
|
||||
dsi_hact = hact + 4 + 1 + 2;
|
||||
|
||||
dsi_hfp = dsi_dpi_htot - dsi_hact - dsi_hsw - dsi_hss;
|
||||
|
||||
/*
|
||||
* Here we should check if HFP is long enough for entering LP
|
||||
* and exiting LP, but it's not clear how to calculate that.
|
||||
* Instead, this is a naive algorithm that just adjusts the HFP
|
||||
* and HSW so that HFP is (at least) roughly 2/3 of the total
|
||||
* blanking time.
|
||||
*/
|
||||
if (dsi_hfp < (dsi_hfp + dsi_hsw + dsi_hss) * 2 / 3) {
|
||||
u32 old_hfp = dsi_hfp;
|
||||
u32 old_hsw = dsi_hsw;
|
||||
u32 tot = dsi_hfp + dsi_hsw + dsi_hss;
|
||||
|
||||
dsi_hsw = tot / 3;
|
||||
|
||||
/*
|
||||
* Seems like sometimes HSW has to be divisible by num-lanes, but
|
||||
* not always...
|
||||
*/
|
||||
dsi_hsw = roundup(dsi_hsw, priv->dsi_lanes);
|
||||
|
||||
dsi_hfp = dsi_dpi_htot - dsi_hact - dsi_hsw - dsi_hss;
|
||||
|
||||
dev_dbg(dev,
|
||||
"hfp too short, adjusting dsi hfp and dsi hsw from %u, %u to %u, %u\n",
|
||||
old_hfp, old_hsw, dsi_hfp, dsi_hsw);
|
||||
}
|
||||
|
||||
dev_dbg(dev,
|
||||
"dsi horiz timing (bytes): %u, %u + %u + %u + %u = %u\n",
|
||||
dsi_hss, dsi_hsw, dsi_hbp, dsi_hact, dsi_hfp,
|
||||
dsi_hss + dsi_hsw + dsi_hbp + dsi_hact + dsi_hfp);
|
||||
|
||||
dev_dbg(dev, "dsi horiz timing (ns): %u + %u + %u + %u + %u = %u\n",
|
||||
tc358768_dsi_bytes_to_ns(priv, dsi_hss),
|
||||
tc358768_dsi_bytes_to_ns(priv, dsi_hsw),
|
||||
tc358768_dsi_bytes_to_ns(priv, dsi_hbp),
|
||||
tc358768_dsi_bytes_to_ns(priv, dsi_hact),
|
||||
tc358768_dsi_bytes_to_ns(priv, dsi_hfp),
|
||||
tc358768_dsi_bytes_to_ns(priv, dsi_hss + dsi_hsw +
|
||||
dsi_hbp + dsi_hact + dsi_hfp));
|
||||
}
|
||||
|
||||
/* VSDly calculation */
|
||||
|
||||
/* Start with the HW internal delay */
|
||||
dsi_vsdly = internal_dly;
|
||||
|
||||
/* Convert to byte units as the other variables are in byte units */
|
||||
dsi_vsdly *= priv->dsi_lanes;
|
||||
|
||||
/* Do we need more delay, in addition to the internal? */
|
||||
if (dsi_dpi_data_start > dsi_vsdly + dsi_hss + dsi_hsw + dsi_hbp) {
|
||||
dsi_vsdly = dsi_dpi_data_start - dsi_hss - dsi_hsw - dsi_hbp;
|
||||
dsi_vsdly = roundup(dsi_vsdly, priv->dsi_lanes);
|
||||
}
|
||||
|
||||
dev_dbg(dev, "dsi data start (bytes) %u + %u + %u + %u = %u\n",
|
||||
dsi_vsdly, dsi_hss, dsi_hsw, dsi_hbp,
|
||||
dsi_vsdly + dsi_hss + dsi_hsw + dsi_hbp);
|
||||
|
||||
dev_dbg(dev, "dsi data start (ns) %u + %u + %u + %u = %u\n",
|
||||
tc358768_dsi_bytes_to_ns(priv, dsi_vsdly),
|
||||
tc358768_dsi_bytes_to_ns(priv, dsi_hss),
|
||||
tc358768_dsi_bytes_to_ns(priv, dsi_hsw),
|
||||
tc358768_dsi_bytes_to_ns(priv, dsi_hbp),
|
||||
tc358768_dsi_bytes_to_ns(priv, dsi_vsdly + dsi_hss + dsi_hsw + dsi_hbp));
|
||||
|
||||
/* Convert back to hsbyteclk */
|
||||
dsi_vsdly /= priv->dsi_lanes;
|
||||
|
||||
/*
|
||||
* The docs say that there is an internal delay of 40 cycles.
|
||||
* However, we get underflows if we follow that rule. If we
|
||||
* instead ignore the internal delay, things work. So either
|
||||
* the docs are wrong or the calculations are wrong.
|
||||
*
|
||||
* As a temporary fix, add the internal delay here, to counter
|
||||
* the subtraction when writing the register.
|
||||
*/
|
||||
dsi_vsdly += internal_dly;
|
||||
|
||||
/* Clamp to the register max */
|
||||
if (dsi_vsdly - internal_dly > 0x3ff) {
|
||||
dev_warn(dev, "VSDly too high, underflows likely\n");
|
||||
dsi_vsdly = 0x3ff + internal_dly;
|
||||
}
|
||||
|
||||
/* VSDly[9:0] */
|
||||
video_start = max(video_start, internal_delay + 1) - internal_delay;
|
||||
tc358768_write(priv, TC358768_VSDLY, video_start);
|
||||
tc358768_write(priv, TC358768_VSDLY, dsi_vsdly - internal_dly);
|
||||
|
||||
tc358768_write(priv, TC358768_DATAFMT, val);
|
||||
tc358768_write(priv, TC358768_DSITX_DT, data_type);
|
||||
@ -722,67 +902,67 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
|
||||
tc358768_write(priv, TC358768_D0W_CNTRL + i * 4, 0x0000);
|
||||
|
||||
/* DSI Timings */
|
||||
dsibclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION,
|
||||
dsibclk);
|
||||
dsiclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION, dsiclk);
|
||||
ui_nsk = dsiclk_nsk / 2;
|
||||
dev_dbg(priv->dev, "dsiclk_nsk: %u\n", dsiclk_nsk);
|
||||
dev_dbg(priv->dev, "ui_nsk: %u\n", ui_nsk);
|
||||
dev_dbg(priv->dev, "dsibclk_nsk: %u\n", dsibclk_nsk);
|
||||
hsbyteclk_ps = (u32)div_u64(PICO, hsbyteclk);
|
||||
dsiclk_ps = (u32)div_u64(PICO, dsiclk);
|
||||
ui_ps = dsiclk_ps / 2;
|
||||
dev_dbg(dev, "dsiclk: %u ps, ui %u ps, hsbyteclk %u ps\n", dsiclk_ps,
|
||||
ui_ps, hsbyteclk_ps);
|
||||
|
||||
/* LP11 > 100us for D-PHY Rx Init */
|
||||
val = tc358768_ns_to_cnt(100 * 1000, dsibclk_nsk) - 1;
|
||||
dev_dbg(priv->dev, "LINEINITCNT: 0x%x\n", val);
|
||||
val = tc358768_ns_to_cnt(100 * 1000, hsbyteclk_ps) - 1;
|
||||
dev_dbg(dev, "LINEINITCNT: %u\n", val);
|
||||
tc358768_write(priv, TC358768_LINEINITCNT, val);
|
||||
|
||||
/* LPTimeCnt > 50ns */
|
||||
val = tc358768_ns_to_cnt(50, dsibclk_nsk) - 1;
|
||||
val = tc358768_ns_to_cnt(50, hsbyteclk_ps) - 1;
|
||||
lptxcnt = val;
|
||||
dev_dbg(priv->dev, "LPTXTIMECNT: 0x%x\n", val);
|
||||
dev_dbg(dev, "LPTXTIMECNT: %u\n", val);
|
||||
tc358768_write(priv, TC358768_LPTXTIMECNT, val);
|
||||
|
||||
/* 38ns < TCLK_PREPARE < 95ns */
|
||||
val = tc358768_ns_to_cnt(65, dsibclk_nsk) - 1;
|
||||
val = tc358768_ns_to_cnt(65, hsbyteclk_ps) - 1;
|
||||
dev_dbg(dev, "TCLK_PREPARECNT %u\n", val);
|
||||
/* TCLK_PREPARE + TCLK_ZERO > 300ns */
|
||||
val2 = tc358768_ns_to_cnt(300 - tc358768_to_ns(2 * ui_nsk),
|
||||
dsibclk_nsk) - 2;
|
||||
val2 = tc358768_ns_to_cnt(300 - tc358768_ps_to_ns(2 * ui_ps),
|
||||
hsbyteclk_ps) - 2;
|
||||
dev_dbg(dev, "TCLK_ZEROCNT %u\n", val2);
|
||||
val |= val2 << 8;
|
||||
dev_dbg(priv->dev, "TCLK_HEADERCNT: 0x%x\n", val);
|
||||
tc358768_write(priv, TC358768_TCLK_HEADERCNT, val);
|
||||
|
||||
/* TCLK_TRAIL > 60ns AND TEOT <= 105 ns + 12*UI */
|
||||
raw_val = tc358768_ns_to_cnt(60 + tc358768_to_ns(2 * ui_nsk), dsibclk_nsk) - 5;
|
||||
raw_val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(2 * ui_ps), hsbyteclk_ps) - 5;
|
||||
val = clamp(raw_val, 0, 127);
|
||||
dev_dbg(priv->dev, "TCLK_TRAILCNT: 0x%x\n", val);
|
||||
dev_dbg(dev, "TCLK_TRAILCNT: %u\n", val);
|
||||
tc358768_write(priv, TC358768_TCLK_TRAILCNT, val);
|
||||
|
||||
/* 40ns + 4*UI < THS_PREPARE < 85ns + 6*UI */
|
||||
val = 50 + tc358768_to_ns(4 * ui_nsk);
|
||||
val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1;
|
||||
val = 50 + tc358768_ps_to_ns(4 * ui_ps);
|
||||
val = tc358768_ns_to_cnt(val, hsbyteclk_ps) - 1;
|
||||
dev_dbg(dev, "THS_PREPARECNT %u\n", val);
|
||||
/* THS_PREPARE + THS_ZERO > 145ns + 10*UI */
|
||||
raw_val = tc358768_ns_to_cnt(145 - tc358768_to_ns(3 * ui_nsk), dsibclk_nsk) - 10;
|
||||
raw_val = tc358768_ns_to_cnt(145 - tc358768_ps_to_ns(3 * ui_ps), hsbyteclk_ps) - 10;
|
||||
val2 = clamp(raw_val, 0, 127);
|
||||
dev_dbg(dev, "THS_ZEROCNT %u\n", val2);
|
||||
val |= val2 << 8;
|
||||
dev_dbg(priv->dev, "THS_HEADERCNT: 0x%x\n", val);
|
||||
tc358768_write(priv, TC358768_THS_HEADERCNT, val);
|
||||
|
||||
/* TWAKEUP > 1ms in lptxcnt steps */
|
||||
val = tc358768_ns_to_cnt(1020000, dsibclk_nsk);
|
||||
val = tc358768_ns_to_cnt(1020000, hsbyteclk_ps);
|
||||
val = val / (lptxcnt + 1) - 1;
|
||||
dev_dbg(priv->dev, "TWAKEUP: 0x%x\n", val);
|
||||
dev_dbg(dev, "TWAKEUP: %u\n", val);
|
||||
tc358768_write(priv, TC358768_TWAKEUP, val);
|
||||
|
||||
/* TCLK_POSTCNT > 60ns + 52*UI */
|
||||
val = tc358768_ns_to_cnt(60 + tc358768_to_ns(52 * ui_nsk),
|
||||
dsibclk_nsk) - 3;
|
||||
dev_dbg(priv->dev, "TCLK_POSTCNT: 0x%x\n", val);
|
||||
val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(52 * ui_ps),
|
||||
hsbyteclk_ps) - 3;
|
||||
dev_dbg(dev, "TCLK_POSTCNT: %u\n", val);
|
||||
tc358768_write(priv, TC358768_TCLK_POSTCNT, val);
|
||||
|
||||
/* max(60ns + 4*UI, 8*UI) < THS_TRAILCNT < 105ns + 12*UI */
|
||||
raw_val = tc358768_ns_to_cnt(60 + tc358768_to_ns(18 * ui_nsk),
|
||||
dsibclk_nsk) - 4;
|
||||
raw_val = tc358768_ns_to_cnt(60 + tc358768_ps_to_ns(18 * ui_ps),
|
||||
hsbyteclk_ps) - 4;
|
||||
val = clamp(raw_val, 0, 15);
|
||||
dev_dbg(priv->dev, "THS_TRAILCNT: 0x%x\n", val);
|
||||
dev_dbg(dev, "THS_TRAILCNT: %u\n", val);
|
||||
tc358768_write(priv, TC358768_THS_TRAILCNT, val);
|
||||
|
||||
val = BIT(0);
|
||||
@ -790,16 +970,17 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
|
||||
val |= BIT(i + 1);
|
||||
tc358768_write(priv, TC358768_HSTXVREGEN, val);
|
||||
|
||||
if (!(mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
|
||||
tc358768_write(priv, TC358768_TXOPTIONCNTRL, 0x1);
|
||||
tc358768_write(priv, TC358768_TXOPTIONCNTRL,
|
||||
(mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) ? 0 : BIT(0));
|
||||
|
||||
/* TXTAGOCNT[26:16] RXTASURECNT[10:0] */
|
||||
val = tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk * 4);
|
||||
val = tc358768_ns_to_cnt(val, dsibclk_nsk) / 4 - 1;
|
||||
val2 = tc358768_ns_to_cnt(tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk),
|
||||
dsibclk_nsk) - 2;
|
||||
val = tc358768_ps_to_ns((lptxcnt + 1) * hsbyteclk_ps * 4);
|
||||
val = tc358768_ns_to_cnt(val, hsbyteclk_ps) / 4 - 1;
|
||||
dev_dbg(dev, "TXTAGOCNT: %u\n", val);
|
||||
val2 = tc358768_ns_to_cnt(tc358768_ps_to_ns((lptxcnt + 1) * hsbyteclk_ps),
|
||||
hsbyteclk_ps) - 2;
|
||||
dev_dbg(dev, "RXTASURECNT: %u\n", val2);
|
||||
val = val << 16 | val2;
|
||||
dev_dbg(priv->dev, "BTACNTRL1: 0x%x\n", val);
|
||||
tc358768_write(priv, TC358768_BTACNTRL1, val);
|
||||
|
||||
/* START[0] */
|
||||
@ -810,58 +991,44 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
|
||||
tc358768_write(priv, TC358768_DSI_EVENT, 0);
|
||||
|
||||
/* vact */
|
||||
tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay);
|
||||
tc358768_write(priv, TC358768_DSI_VACT, vm.vactive);
|
||||
|
||||
/* vsw */
|
||||
tc358768_write(priv, TC358768_DSI_VSW,
|
||||
mode->vsync_end - mode->vsync_start);
|
||||
tc358768_write(priv, TC358768_DSI_VSW, vm.vsync_len);
|
||||
|
||||
/* vbp */
|
||||
tc358768_write(priv, TC358768_DSI_VBPR,
|
||||
mode->vtotal - mode->vsync_end);
|
||||
|
||||
/* hsw * byteclk * ndl / pclk */
|
||||
val = (u32)div_u64((mode->hsync_end - mode->hsync_start) *
|
||||
((u64)priv->dsiclk / 4) * priv->dsi_lanes,
|
||||
mode->clock * 1000);
|
||||
tc358768_write(priv, TC358768_DSI_HSW, val);
|
||||
|
||||
/* hbp * byteclk * ndl / pclk */
|
||||
val = (u32)div_u64((mode->htotal - mode->hsync_end) *
|
||||
((u64)priv->dsiclk / 4) * priv->dsi_lanes,
|
||||
mode->clock * 1000);
|
||||
tc358768_write(priv, TC358768_DSI_HBPR, val);
|
||||
tc358768_write(priv, TC358768_DSI_VBPR, vm.vback_porch);
|
||||
} else {
|
||||
/* Set event mode */
|
||||
tc358768_write(priv, TC358768_DSI_EVENT, 1);
|
||||
|
||||
/* vact */
|
||||
tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay);
|
||||
tc358768_write(priv, TC358768_DSI_VACT, vm.vactive);
|
||||
|
||||
/* vsw (+ vbp) */
|
||||
tc358768_write(priv, TC358768_DSI_VSW,
|
||||
mode->vtotal - mode->vsync_start);
|
||||
vm.vsync_len + vm.vback_porch);
|
||||
|
||||
/* vbp (not used in event mode) */
|
||||
tc358768_write(priv, TC358768_DSI_VBPR, 0);
|
||||
|
||||
/* (hsw + hbp) * byteclk * ndl / pclk */
|
||||
val = (u32)div_u64((mode->htotal - mode->hsync_start) *
|
||||
((u64)priv->dsiclk / 4) * priv->dsi_lanes,
|
||||
mode->clock * 1000);
|
||||
tc358768_write(priv, TC358768_DSI_HSW, val);
|
||||
|
||||
/* hbp (not used in event mode) */
|
||||
tc358768_write(priv, TC358768_DSI_HBPR, 0);
|
||||
}
|
||||
|
||||
/* hsw (bytes) */
|
||||
tc358768_write(priv, TC358768_DSI_HSW, dsi_hsw);
|
||||
|
||||
/* hbp (bytes) */
|
||||
tc358768_write(priv, TC358768_DSI_HBPR, dsi_hbp);
|
||||
|
||||
/* hact (bytes) */
|
||||
tc358768_write(priv, TC358768_DSI_HACT, hact);
|
||||
|
||||
/* VSYNC polarity */
|
||||
if (!(mode->flags & DRM_MODE_FLAG_NVSYNC))
|
||||
tc358768_update_bits(priv, TC358768_CONFCTL, BIT(5), BIT(5));
|
||||
tc358768_update_bits(priv, TC358768_CONFCTL, BIT(5),
|
||||
(mode->flags & DRM_MODE_FLAG_PVSYNC) ? BIT(5) : 0);
|
||||
|
||||
/* HSYNC polarity */
|
||||
if (mode->flags & DRM_MODE_FLAG_PHSYNC)
|
||||
tc358768_update_bits(priv, TC358768_PP_MISC, BIT(0), BIT(0));
|
||||
tc358768_update_bits(priv, TC358768_PP_MISC, BIT(0),
|
||||
(mode->flags & DRM_MODE_FLAG_PHSYNC) ? BIT(0) : 0);
|
||||
|
||||
/* Start DSI Tx */
|
||||
tc358768_write(priv, TC358768_DSI_START, 0x1);
|
||||
@ -891,7 +1058,7 @@ static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
|
||||
|
||||
ret = tc358768_clear_error(priv);
|
||||
if (ret) {
|
||||
dev_err(priv->dev, "Bridge pre_enable failed: %d\n", ret);
|
||||
dev_err(dev, "Bridge pre_enable failed: %d\n", ret);
|
||||
tc358768_bridge_disable(bridge);
|
||||
tc358768_bridge_post_disable(bridge);
|
||||
}
|
||||
@ -959,9 +1126,27 @@ tc358768_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
|
||||
return input_fmts;
|
||||
}
|
||||
|
||||
static bool tc358768_mode_fixup(struct drm_bridge *bridge,
|
||||
const struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
/* Default to positive sync */
|
||||
|
||||
if (!(adjusted_mode->flags &
|
||||
(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
|
||||
adjusted_mode->flags |= DRM_MODE_FLAG_PHSYNC;
|
||||
|
||||
if (!(adjusted_mode->flags &
|
||||
(DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
|
||||
adjusted_mode->flags |= DRM_MODE_FLAG_PVSYNC;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static const struct drm_bridge_funcs tc358768_bridge_funcs = {
|
||||
.attach = tc358768_bridge_attach,
|
||||
.mode_valid = tc358768_bridge_mode_valid,
|
||||
.mode_fixup = tc358768_mode_fixup,
|
||||
.pre_enable = tc358768_bridge_pre_enable,
|
||||
.enable = tc358768_bridge_enable,
|
||||
.disable = tc358768_bridge_disable,
|
||||
|
@ -3308,8 +3308,7 @@ int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
|
||||
struct drm_dp_mst_atomic_payload *payload)
|
||||
{
|
||||
struct drm_dp_mst_port *port;
|
||||
int ret = 0;
|
||||
bool allocate = true;
|
||||
int ret;
|
||||
|
||||
/* Update mst mgr info */
|
||||
if (mgr->payload_count == 0)
|
||||
@ -3320,27 +3319,27 @@ int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr,
|
||||
mgr->payload_count++;
|
||||
mgr->next_start_slot += payload->time_slots;
|
||||
|
||||
payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL;
|
||||
|
||||
/* Allocate payload to immediate downstream facing port */
|
||||
port = drm_dp_mst_topology_get_port_validated(mgr, payload->port);
|
||||
if (!port) {
|
||||
drm_dbg_kms(mgr->dev,
|
||||
"VCPI %d for port %p not in topology, not creating a payload to remote\n",
|
||||
payload->vcpi, payload->port);
|
||||
allocate = false;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (allocate) {
|
||||
ret = drm_dp_create_payload_at_dfp(mgr, payload);
|
||||
if (ret < 0)
|
||||
drm_warn(mgr->dev, "Failed to create MST payload for port %p: %d\n",
|
||||
payload->port, ret);
|
||||
|
||||
ret = drm_dp_create_payload_at_dfp(mgr, payload);
|
||||
if (ret < 0) {
|
||||
drm_dbg_kms(mgr->dev, "Failed to create MST payload for port %p: %d\n",
|
||||
payload->port, ret);
|
||||
goto put_port;
|
||||
}
|
||||
|
||||
payload->payload_allocation_status =
|
||||
(!allocate || ret < 0) ? DRM_DP_MST_PAYLOAD_ALLOCATION_LOCAL :
|
||||
DRM_DP_MST_PAYLOAD_ALLOCATION_DFP;
|
||||
payload->payload_allocation_status = DRM_DP_MST_PAYLOAD_ALLOCATION_DFP;
|
||||
|
||||
put_port:
|
||||
drm_dp_mst_topology_put_port(port);
|
||||
|
||||
return ret;
|
||||
|
@ -3339,6 +3339,9 @@ void drm_atomic_helper_shutdown(struct drm_device *dev)
|
||||
struct drm_modeset_acquire_ctx ctx;
|
||||
int ret;
|
||||
|
||||
if (dev == NULL)
|
||||
return;
|
||||
|
||||
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
|
||||
|
||||
ret = drm_atomic_helper_disable_all(dev, &ctx);
|
||||
|
@ -235,7 +235,8 @@ static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
|
||||
static int
|
||||
drm_master_check_perm(struct drm_device *dev, struct drm_file *file_priv)
|
||||
{
|
||||
if (file_priv->pid == task_pid(current) && file_priv->was_master)
|
||||
if (file_priv->was_master &&
|
||||
rcu_access_pointer(file_priv->pid) == task_pid(current))
|
||||
return 0;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
|
@ -480,10 +480,12 @@ err_undo:
|
||||
static int __alloc_range(struct drm_buddy *mm,
|
||||
struct list_head *dfs,
|
||||
u64 start, u64 size,
|
||||
struct list_head *blocks)
|
||||
struct list_head *blocks,
|
||||
u64 *total_allocated_on_err)
|
||||
{
|
||||
struct drm_buddy_block *block;
|
||||
struct drm_buddy_block *buddy;
|
||||
u64 total_allocated = 0;
|
||||
LIST_HEAD(allocated);
|
||||
u64 end;
|
||||
int err;
|
||||
@ -520,6 +522,7 @@ static int __alloc_range(struct drm_buddy *mm,
|
||||
}
|
||||
|
||||
mark_allocated(block);
|
||||
total_allocated += drm_buddy_block_size(mm, block);
|
||||
mm->avail -= drm_buddy_block_size(mm, block);
|
||||
list_add_tail(&block->link, &allocated);
|
||||
continue;
|
||||
@ -551,13 +554,20 @@ err_undo:
|
||||
__drm_buddy_free(mm, block);
|
||||
|
||||
err_free:
|
||||
drm_buddy_free_list(mm, &allocated);
|
||||
if (err == -ENOSPC && total_allocated_on_err) {
|
||||
list_splice_tail(&allocated, blocks);
|
||||
*total_allocated_on_err = total_allocated;
|
||||
} else {
|
||||
drm_buddy_free_list(mm, &allocated);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int __drm_buddy_alloc_range(struct drm_buddy *mm,
|
||||
u64 start,
|
||||
u64 size,
|
||||
u64 *total_allocated_on_err,
|
||||
struct list_head *blocks)
|
||||
{
|
||||
LIST_HEAD(dfs);
|
||||
@ -566,7 +576,62 @@ static int __drm_buddy_alloc_range(struct drm_buddy *mm,
|
||||
for (i = 0; i < mm->n_roots; ++i)
|
||||
list_add_tail(&mm->roots[i]->tmp_link, &dfs);
|
||||
|
||||
return __alloc_range(mm, &dfs, start, size, blocks);
|
||||
return __alloc_range(mm, &dfs, start, size,
|
||||
blocks, total_allocated_on_err);
|
||||
}
|
||||
|
||||
static int __alloc_contig_try_harder(struct drm_buddy *mm,
|
||||
u64 size,
|
||||
u64 min_block_size,
|
||||
struct list_head *blocks)
|
||||
{
|
||||
u64 rhs_offset, lhs_offset, lhs_size, filled;
|
||||
struct drm_buddy_block *block;
|
||||
struct list_head *list;
|
||||
LIST_HEAD(blocks_lhs);
|
||||
unsigned long pages;
|
||||
unsigned int order;
|
||||
u64 modify_size;
|
||||
int err;
|
||||
|
||||
modify_size = rounddown_pow_of_two(size);
|
||||
pages = modify_size >> ilog2(mm->chunk_size);
|
||||
order = fls(pages) - 1;
|
||||
if (order == 0)
|
||||
return -ENOSPC;
|
||||
|
||||
list = &mm->free_list[order];
|
||||
if (list_empty(list))
|
||||
return -ENOSPC;
|
||||
|
||||
list_for_each_entry_reverse(block, list, link) {
|
||||
/* Allocate blocks traversing RHS */
|
||||
rhs_offset = drm_buddy_block_offset(block);
|
||||
err = __drm_buddy_alloc_range(mm, rhs_offset, size,
|
||||
&filled, blocks);
|
||||
if (!err || err != -ENOSPC)
|
||||
return err;
|
||||
|
||||
lhs_size = max((size - filled), min_block_size);
|
||||
if (!IS_ALIGNED(lhs_size, min_block_size))
|
||||
lhs_size = round_up(lhs_size, min_block_size);
|
||||
|
||||
/* Allocate blocks traversing LHS */
|
||||
lhs_offset = drm_buddy_block_offset(block) - lhs_size;
|
||||
err = __drm_buddy_alloc_range(mm, lhs_offset, lhs_size,
|
||||
NULL, &blocks_lhs);
|
||||
if (!err) {
|
||||
list_splice(&blocks_lhs, blocks);
|
||||
return 0;
|
||||
} else if (err != -ENOSPC) {
|
||||
drm_buddy_free_list(mm, blocks);
|
||||
return err;
|
||||
}
|
||||
/* Free blocks for the next iteration */
|
||||
drm_buddy_free_list(mm, blocks);
|
||||
}
|
||||
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -626,7 +691,7 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
|
||||
|
||||
new_start = drm_buddy_block_offset(block);
|
||||
list_add(&block->tmp_link, &dfs);
|
||||
err = __alloc_range(mm, &dfs, new_start, new_size, blocks);
|
||||
err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL);
|
||||
if (err) {
|
||||
mark_allocated(block);
|
||||
mm->avail -= drm_buddy_block_size(mm, block);
|
||||
@ -645,7 +710,7 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
|
||||
* @start: start of the allowed range for this block
|
||||
* @end: end of the allowed range for this block
|
||||
* @size: size of the allocation
|
||||
* @min_page_size: alignment of the allocation
|
||||
* @min_block_size: alignment of the allocation
|
||||
* @blocks: output list head to add allocated blocks
|
||||
* @flags: DRM_BUDDY_*_ALLOCATION flags
|
||||
*
|
||||
@ -660,23 +725,24 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
|
||||
*/
|
||||
int drm_buddy_alloc_blocks(struct drm_buddy *mm,
|
||||
u64 start, u64 end, u64 size,
|
||||
u64 min_page_size,
|
||||
u64 min_block_size,
|
||||
struct list_head *blocks,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct drm_buddy_block *block = NULL;
|
||||
u64 original_size, original_min_size;
|
||||
unsigned int min_order, order;
|
||||
unsigned long pages;
|
||||
LIST_HEAD(allocated);
|
||||
unsigned long pages;
|
||||
int err;
|
||||
|
||||
if (size < mm->chunk_size)
|
||||
return -EINVAL;
|
||||
|
||||
if (min_page_size < mm->chunk_size)
|
||||
if (min_block_size < mm->chunk_size)
|
||||
return -EINVAL;
|
||||
|
||||
if (!is_power_of_2(min_page_size))
|
||||
if (!is_power_of_2(min_block_size))
|
||||
return -EINVAL;
|
||||
|
||||
if (!IS_ALIGNED(start | end | size, mm->chunk_size))
|
||||
@ -690,14 +756,23 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
|
||||
|
||||
/* Actual range allocation */
|
||||
if (start + size == end)
|
||||
return __drm_buddy_alloc_range(mm, start, size, blocks);
|
||||
return __drm_buddy_alloc_range(mm, start, size, NULL, blocks);
|
||||
|
||||
if (!IS_ALIGNED(size, min_page_size))
|
||||
return -EINVAL;
|
||||
original_size = size;
|
||||
original_min_size = min_block_size;
|
||||
|
||||
/* Roundup the size to power of 2 */
|
||||
if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) {
|
||||
size = roundup_pow_of_two(size);
|
||||
min_block_size = size;
|
||||
/* Align size value to min_block_size */
|
||||
} else if (!IS_ALIGNED(size, min_block_size)) {
|
||||
size = round_up(size, min_block_size);
|
||||
}
|
||||
|
||||
pages = size >> ilog2(mm->chunk_size);
|
||||
order = fls(pages) - 1;
|
||||
min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
|
||||
min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
|
||||
|
||||
do {
|
||||
order = min(order, (unsigned int)fls(pages) - 1);
|
||||
@ -716,6 +791,16 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
|
||||
break;
|
||||
|
||||
if (order-- == min_order) {
|
||||
if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
|
||||
!(flags & DRM_BUDDY_RANGE_ALLOCATION))
|
||||
/*
|
||||
* Try contiguous block allocation through
|
||||
* try harder method
|
||||
*/
|
||||
return __alloc_contig_try_harder(mm,
|
||||
original_size,
|
||||
original_min_size,
|
||||
blocks);
|
||||
err = -ENOSPC;
|
||||
goto err_free;
|
||||
}
|
||||
@ -732,6 +817,31 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
|
||||
break;
|
||||
} while (1);
|
||||
|
||||
/* Trim the allocated block to the required size */
|
||||
if (original_size != size) {
|
||||
struct list_head *trim_list;
|
||||
LIST_HEAD(temp);
|
||||
u64 trim_size;
|
||||
|
||||
trim_list = &allocated;
|
||||
trim_size = original_size;
|
||||
|
||||
if (!list_is_singular(&allocated)) {
|
||||
block = list_last_entry(&allocated, typeof(*block), link);
|
||||
list_move(&block->link, &temp);
|
||||
trim_list = &temp;
|
||||
trim_size = drm_buddy_block_size(mm, block) -
|
||||
(size - original_size);
|
||||
}
|
||||
|
||||
drm_buddy_block_trim(mm,
|
||||
trim_size,
|
||||
trim_list);
|
||||
|
||||
if (!list_empty(&temp))
|
||||
list_splice_tail(trim_list, &allocated);
|
||||
}
|
||||
|
||||
list_splice_tail(&allocated, blocks);
|
||||
return 0;
|
||||
|
||||
|
@ -40,7 +40,7 @@
|
||||
#include <drm/drm_file.h>
|
||||
#include <drm/drm_gem.h>
|
||||
#include <drm/drm_managed.h>
|
||||
#include <drm/drm_gpuva_mgr.h>
|
||||
#include <drm/drm_gpuvm.h>
|
||||
|
||||
#include "drm_crtc_internal.h"
|
||||
#include "drm_internal.h"
|
||||
@ -92,15 +92,17 @@ static int drm_clients_info(struct seq_file *m, void *data)
|
||||
*/
|
||||
mutex_lock(&dev->filelist_mutex);
|
||||
list_for_each_entry_reverse(priv, &dev->filelist, lhead) {
|
||||
struct task_struct *task;
|
||||
bool is_current_master = drm_is_current_master(priv);
|
||||
struct task_struct *task;
|
||||
struct pid *pid;
|
||||
|
||||
rcu_read_lock(); /* locks pid_task()->comm */
|
||||
task = pid_task(priv->pid, PIDTYPE_TGID);
|
||||
rcu_read_lock(); /* Locks priv->pid and pid_task()->comm! */
|
||||
pid = rcu_dereference(priv->pid);
|
||||
task = pid_task(pid, PIDTYPE_TGID);
|
||||
uid = task ? __task_cred(task)->euid : GLOBAL_ROOT_UID;
|
||||
seq_printf(m, "%20s %5d %3d %c %c %5d %10u\n",
|
||||
task ? task->comm : "<unknown>",
|
||||
pid_vnr(priv->pid),
|
||||
pid_vnr(pid),
|
||||
priv->minor->index,
|
||||
is_current_master ? 'y' : 'n',
|
||||
priv->authenticated ? 'y' : 'n',
|
||||
@ -187,31 +189,31 @@ static const struct file_operations drm_debugfs_fops = {
|
||||
/**
|
||||
* drm_debugfs_gpuva_info - dump the given DRM GPU VA space
|
||||
* @m: pointer to the &seq_file to write
|
||||
* @mgr: the &drm_gpuva_manager representing the GPU VA space
|
||||
* @gpuvm: the &drm_gpuvm representing the GPU VA space
|
||||
*
|
||||
* Dumps the GPU VA mappings of a given DRM GPU VA manager.
|
||||
*
|
||||
* For each DRM GPU VA space drivers should call this function from their
|
||||
* &drm_info_list's show callback.
|
||||
*
|
||||
* Returns: 0 on success, -ENODEV if the &mgr is not initialized
|
||||
* Returns: 0 on success, -ENODEV if the &gpuvm is not initialized
|
||||
*/
|
||||
int drm_debugfs_gpuva_info(struct seq_file *m,
|
||||
struct drm_gpuva_manager *mgr)
|
||||
struct drm_gpuvm *gpuvm)
|
||||
{
|
||||
struct drm_gpuva *va, *kva = &mgr->kernel_alloc_node;
|
||||
struct drm_gpuva *va, *kva = &gpuvm->kernel_alloc_node;
|
||||
|
||||
if (!mgr->name)
|
||||
if (!gpuvm->name)
|
||||
return -ENODEV;
|
||||
|
||||
seq_printf(m, "DRM GPU VA space (%s) [0x%016llx;0x%016llx]\n",
|
||||
mgr->name, mgr->mm_start, mgr->mm_start + mgr->mm_range);
|
||||
gpuvm->name, gpuvm->mm_start, gpuvm->mm_start + gpuvm->mm_range);
|
||||
seq_printf(m, "Kernel reserved node [0x%016llx;0x%016llx]\n",
|
||||
kva->va.addr, kva->va.addr + kva->va.range);
|
||||
seq_puts(m, "\n");
|
||||
seq_puts(m, " VAs | start | range | end | object | object offset\n");
|
||||
seq_puts(m, "-------------------------------------------------------------------------------------------------------------\n");
|
||||
drm_gpuva_for_each_va(va, mgr) {
|
||||
drm_gpuvm_for_each_va(va, gpuvm) {
|
||||
if (unlikely(va == kva))
|
||||
continue;
|
||||
|
||||
|
@ -3496,11 +3496,19 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_connector *connecto
|
||||
mode->vsync_end = mode->vsync_start + vsync_pulse_width;
|
||||
mode->vtotal = mode->vdisplay + vblank;
|
||||
|
||||
/* Some EDIDs have bogus h/vtotal values */
|
||||
if (mode->hsync_end > mode->htotal)
|
||||
mode->htotal = mode->hsync_end + 1;
|
||||
if (mode->vsync_end > mode->vtotal)
|
||||
mode->vtotal = mode->vsync_end + 1;
|
||||
/* Some EDIDs have bogus h/vsync_end values */
|
||||
if (mode->hsync_end > mode->htotal) {
|
||||
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] reducing hsync_end %d->%d\n",
|
||||
connector->base.id, connector->name,
|
||||
mode->hsync_end, mode->htotal);
|
||||
mode->hsync_end = mode->htotal;
|
||||
}
|
||||
if (mode->vsync_end > mode->vtotal) {
|
||||
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] reducing vsync_end %d->%d\n",
|
||||
connector->base.id, connector->name,
|
||||
mode->vsync_end, mode->vtotal);
|
||||
mode->vsync_end = mode->vtotal;
|
||||
}
|
||||
|
||||
drm_mode_do_interlace_quirk(mode, pt);
|
||||
|
||||
|
@ -160,7 +160,7 @@ struct drm_file *drm_file_alloc(struct drm_minor *minor)
|
||||
|
||||
/* Get a unique identifier for fdinfo: */
|
||||
file->client_id = atomic64_inc_return(&ident);
|
||||
file->pid = get_pid(task_tgid(current));
|
||||
rcu_assign_pointer(file->pid, get_pid(task_tgid(current)));
|
||||
file->minor = minor;
|
||||
|
||||
/* for compatibility root is always authenticated */
|
||||
@ -200,7 +200,7 @@ out_prime_destroy:
|
||||
drm_syncobj_release(file);
|
||||
if (drm_core_check_feature(dev, DRIVER_GEM))
|
||||
drm_gem_release(dev, file);
|
||||
put_pid(file->pid);
|
||||
put_pid(rcu_access_pointer(file->pid));
|
||||
kfree(file);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
@ -291,7 +291,7 @@ void drm_file_free(struct drm_file *file)
|
||||
|
||||
WARN_ON(!list_empty(&file->event_list));
|
||||
|
||||
put_pid(file->pid);
|
||||
put_pid(rcu_access_pointer(file->pid));
|
||||
kfree(file);
|
||||
}
|
||||
|
||||
@ -505,6 +505,40 @@ int drm_release(struct inode *inode, struct file *filp)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_release);
|
||||
|
||||
void drm_file_update_pid(struct drm_file *filp)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
struct pid *pid, *old;
|
||||
|
||||
/*
|
||||
* Master nodes need to keep the original ownership in order for
|
||||
* drm_master_check_perm to keep working correctly. (See comment in
|
||||
* drm_auth.c.)
|
||||
*/
|
||||
if (filp->was_master)
|
||||
return;
|
||||
|
||||
pid = task_tgid(current);
|
||||
|
||||
/*
|
||||
* Quick unlocked check since the model is a single handover followed by
|
||||
* exclusive repeated use.
|
||||
*/
|
||||
if (pid == rcu_access_pointer(filp->pid))
|
||||
return;
|
||||
|
||||
dev = filp->minor->dev;
|
||||
mutex_lock(&dev->filelist_mutex);
|
||||
old = rcu_replace_pointer(filp->pid, pid, 1);
|
||||
mutex_unlock(&dev->filelist_mutex);
|
||||
|
||||
if (pid != old) {
|
||||
get_pid(pid);
|
||||
synchronize_rcu();
|
||||
put_pid(old);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* drm_release_noglobal - release method for DRM file
|
||||
* @inode: device inode
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -776,6 +776,9 @@ long drm_ioctl_kernel(struct file *file, drm_ioctl_t *func, void *kdata,
|
||||
struct drm_device *dev = file_priv->minor->dev;
|
||||
int retcode;
|
||||
|
||||
/* Update drm_file owner if fd was passed along. */
|
||||
drm_file_update_pid(file_priv);
|
||||
|
||||
if (drm_dev_is_unplugged(dev))
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -356,9 +356,17 @@ static void fsl_dcu_drm_remove(struct platform_device *pdev)
|
||||
clk_unregister(fsl_dev->pix_clk);
|
||||
}
|
||||
|
||||
static void fsl_dcu_drm_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
struct fsl_dcu_drm_device *fsl_dev = platform_get_drvdata(pdev);
|
||||
|
||||
drm_atomic_helper_shutdown(fsl_dev->drm);
|
||||
}
|
||||
|
||||
static struct platform_driver fsl_dcu_drm_platform_driver = {
|
||||
.probe = fsl_dcu_drm_probe,
|
||||
.remove_new = fsl_dcu_drm_remove,
|
||||
.shutdown = fsl_dcu_drm_shutdown,
|
||||
.driver = {
|
||||
.name = "fsl-dcu",
|
||||
.pm = &fsl_dcu_drm_pm_ops,
|
||||
|
@ -357,6 +357,11 @@ static void hibmc_pci_remove(struct pci_dev *pdev)
|
||||
hibmc_unload(dev);
|
||||
}
|
||||
|
||||
static void hibmc_pci_shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
drm_atomic_helper_shutdown(pci_get_drvdata(pdev));
|
||||
}
|
||||
|
||||
static const struct pci_device_id hibmc_pci_table[] = {
|
||||
{ PCI_VDEVICE(HUAWEI, 0x1711) },
|
||||
{0,}
|
||||
@ -367,6 +372,7 @@ static struct pci_driver hibmc_pci_driver = {
|
||||
.id_table = hibmc_pci_table,
|
||||
.probe = hibmc_pci_probe,
|
||||
.remove = hibmc_pci_remove,
|
||||
.shutdown = hibmc_pci_shutdown,
|
||||
.driver.pm = &hibmc_pm_ops,
|
||||
};
|
||||
|
||||
|
@ -206,6 +206,7 @@ err_mode_config_cleanup:
|
||||
static int kirin_drm_kms_cleanup(struct drm_device *dev)
|
||||
{
|
||||
drm_kms_helper_poll_fini(dev);
|
||||
drm_atomic_helper_shutdown(dev);
|
||||
kirin_drm_private_cleanup(dev);
|
||||
drm_mode_config_cleanup(dev);
|
||||
|
||||
@ -244,6 +245,7 @@ err_kms_cleanup:
|
||||
kirin_drm_kms_cleanup(drm_dev);
|
||||
err_drm_dev_put:
|
||||
drm_dev_put(drm_dev);
|
||||
dev_set_drvdata(dev, NULL);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -255,6 +257,7 @@ static void kirin_drm_unbind(struct device *dev)
|
||||
drm_dev_unregister(drm_dev);
|
||||
kirin_drm_kms_cleanup(drm_dev);
|
||||
drm_dev_put(drm_dev);
|
||||
dev_set_drvdata(dev, NULL);
|
||||
}
|
||||
|
||||
static const struct component_master_ops kirin_drm_ops = {
|
||||
@ -284,6 +287,11 @@ static void kirin_drm_platform_remove(struct platform_device *pdev)
|
||||
component_master_del(&pdev->dev, &kirin_drm_ops);
|
||||
}
|
||||
|
||||
static void kirin_drm_platform_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
|
||||
}
|
||||
|
||||
static const struct of_device_id kirin_drm_dt_ids[] = {
|
||||
{ .compatible = "hisilicon,hi6220-ade",
|
||||
.data = &ade_driver_data,
|
||||
@ -295,6 +303,7 @@ MODULE_DEVICE_TABLE(of, kirin_drm_dt_ids);
|
||||
static struct platform_driver kirin_drm_platform_driver = {
|
||||
.probe = kirin_drm_platform_probe,
|
||||
.remove_new = kirin_drm_platform_remove,
|
||||
.shutdown = kirin_drm_platform_shutdown,
|
||||
.driver = {
|
||||
.name = "kirin-drm",
|
||||
.of_match_table = kirin_drm_dt_ids,
|
||||
|
@ -178,6 +178,11 @@ static void hyperv_vmbus_remove(struct hv_device *hdev)
|
||||
vmbus_free_mmio(hv->mem->start, hv->fb_size);
|
||||
}
|
||||
|
||||
static void hyperv_vmbus_shutdown(struct hv_device *hdev)
|
||||
{
|
||||
drm_atomic_helper_shutdown(hv_get_drvdata(hdev));
|
||||
}
|
||||
|
||||
static int hyperv_vmbus_suspend(struct hv_device *hdev)
|
||||
{
|
||||
struct drm_device *dev = hv_get_drvdata(hdev);
|
||||
@ -220,6 +225,7 @@ static struct hv_driver hyperv_hv_driver = {
|
||||
.id_table = hyperv_vmbus_tbl,
|
||||
.probe = hyperv_vmbus_probe,
|
||||
.remove = hyperv_vmbus_remove,
|
||||
.shutdown = hyperv_vmbus_shutdown,
|
||||
.suspend = hyperv_vmbus_suspend,
|
||||
.resume = hyperv_vmbus_resume,
|
||||
.driver = {
|
||||
|
@ -59,6 +59,9 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
|
||||
if (place->flags & TTM_PL_FLAG_TOPDOWN)
|
||||
bman_res->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
|
||||
|
||||
if (place->flags & TTM_PL_FLAG_CONTIGUOUS)
|
||||
bman_res->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
|
||||
|
||||
if (place->fpfn || lpfn != man->size)
|
||||
bman_res->flags |= DRM_BUDDY_RANGE_ALLOCATION;
|
||||
|
||||
@ -72,18 +75,6 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
|
||||
GEM_BUG_ON(min_page_size < mm->chunk_size);
|
||||
GEM_BUG_ON(!IS_ALIGNED(size, min_page_size));
|
||||
|
||||
if (place->fpfn + PFN_UP(bman_res->base.size) != place->lpfn &&
|
||||
place->flags & TTM_PL_FLAG_CONTIGUOUS) {
|
||||
unsigned long pages;
|
||||
|
||||
size = roundup_pow_of_two(size);
|
||||
min_page_size = size;
|
||||
|
||||
pages = size >> ilog2(mm->chunk_size);
|
||||
if (pages > lpfn)
|
||||
lpfn = pages;
|
||||
}
|
||||
|
||||
if (size > lpfn << PAGE_SHIFT) {
|
||||
err = -E2BIG;
|
||||
goto err_free_res;
|
||||
@ -107,14 +98,6 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
|
||||
if (unlikely(err))
|
||||
goto err_free_blocks;
|
||||
|
||||
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
|
||||
u64 original_size = (u64)bman_res->base.size;
|
||||
|
||||
drm_buddy_block_trim(mm,
|
||||
original_size,
|
||||
&bman_res->blocks);
|
||||
}
|
||||
|
||||
if (lpfn <= bman->visible_size) {
|
||||
bman_res->used_visible_size = PFN_UP(bman_res->base.size);
|
||||
} else {
|
||||
|
@ -92,6 +92,13 @@ static int dcss_drv_platform_remove(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void dcss_drv_platform_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
struct dcss_drv *mdrv = dev_get_drvdata(&pdev->dev);
|
||||
|
||||
dcss_kms_shutdown(mdrv->kms);
|
||||
}
|
||||
|
||||
static struct dcss_type_data dcss_types[] = {
|
||||
[DCSS_IMX8MQ] = {
|
||||
.name = "DCSS_IMX8MQ",
|
||||
@ -114,6 +121,7 @@ MODULE_DEVICE_TABLE(of, dcss_of_match);
|
||||
static struct platform_driver dcss_platform_driver = {
|
||||
.probe = dcss_drv_platform_probe,
|
||||
.remove = dcss_drv_platform_remove,
|
||||
.shutdown = dcss_drv_platform_shutdown,
|
||||
.driver = {
|
||||
.name = "imx-dcss",
|
||||
.of_match_table = dcss_of_match,
|
||||
|
@ -172,3 +172,10 @@ void dcss_kms_detach(struct dcss_kms_dev *kms)
|
||||
dcss_crtc_deinit(&kms->crtc, drm);
|
||||
drm->dev_private = NULL;
|
||||
}
|
||||
|
||||
void dcss_kms_shutdown(struct dcss_kms_dev *kms)
|
||||
{
|
||||
struct drm_device *drm = &kms->base;
|
||||
|
||||
drm_atomic_helper_shutdown(drm);
|
||||
}
|
||||
|
@ -34,6 +34,7 @@ struct dcss_kms_dev {
|
||||
|
||||
struct dcss_kms_dev *dcss_kms_attach(struct dcss_dev *dcss);
|
||||
void dcss_kms_detach(struct dcss_kms_dev *kms);
|
||||
void dcss_kms_shutdown(struct dcss_kms_dev *kms);
|
||||
int dcss_crtc_init(struct dcss_crtc *crtc, struct drm_device *drm);
|
||||
void dcss_crtc_deinit(struct dcss_crtc *crtc, struct drm_device *drm);
|
||||
struct dcss_plane *dcss_plane_init(struct drm_device *drm,
|
||||
|
@ -257,6 +257,7 @@ err_poll_fini:
|
||||
drm_kms_helper_poll_fini(drm);
|
||||
component_unbind_all(drm->dev, drm);
|
||||
err_kms:
|
||||
dev_set_drvdata(dev, NULL);
|
||||
drm_dev_put(drm);
|
||||
|
||||
return ret;
|
||||
@ -269,6 +270,7 @@ static void imx_drm_unbind(struct device *dev)
|
||||
drm_dev_unregister(drm);
|
||||
|
||||
drm_kms_helper_poll_fini(drm);
|
||||
drm_atomic_helper_shutdown(drm);
|
||||
|
||||
component_unbind_all(drm->dev, drm);
|
||||
|
||||
@ -297,6 +299,11 @@ static void imx_drm_platform_remove(struct platform_device *pdev)
|
||||
component_master_del(&pdev->dev, &imx_drm_ops);
|
||||
}
|
||||
|
||||
static void imx_drm_platform_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int imx_drm_suspend(struct device *dev)
|
||||
{
|
||||
@ -324,6 +331,7 @@ MODULE_DEVICE_TABLE(of, imx_drm_dt_ids);
|
||||
static struct platform_driver imx_drm_pdrv = {
|
||||
.probe = imx_drm_platform_probe,
|
||||
.remove_new = imx_drm_platform_remove,
|
||||
.shutdown = imx_drm_platform_shutdown,
|
||||
.driver = {
|
||||
.name = "imx-drm",
|
||||
.pm = &imx_drm_pm_ops,
|
||||
|
@ -1130,7 +1130,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
|
||||
|
||||
ret = drmm_mode_config_init(drm);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_drvdata;
|
||||
|
||||
drm->mode_config.min_width = 0;
|
||||
drm->mode_config.min_height = 0;
|
||||
@ -1142,7 +1142,8 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
|
||||
base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
|
||||
if (IS_ERR(base)) {
|
||||
dev_err(dev, "Failed to get memory resource\n");
|
||||
return PTR_ERR(base);
|
||||
ret = PTR_ERR(base);
|
||||
goto err_drvdata;
|
||||
}
|
||||
|
||||
regmap_config = ingenic_drm_regmap_config;
|
||||
@ -1151,33 +1152,40 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
|
||||
®map_config);
|
||||
if (IS_ERR(priv->map)) {
|
||||
dev_err(dev, "Failed to create regmap\n");
|
||||
return PTR_ERR(priv->map);
|
||||
ret = PTR_ERR(priv->map);
|
||||
goto err_drvdata;
|
||||
}
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
if (irq < 0) {
|
||||
ret = irq;
|
||||
goto err_drvdata;
|
||||
}
|
||||
|
||||
if (soc_info->needs_dev_clk) {
|
||||
priv->lcd_clk = devm_clk_get(dev, "lcd");
|
||||
if (IS_ERR(priv->lcd_clk)) {
|
||||
dev_err(dev, "Failed to get lcd clock\n");
|
||||
return PTR_ERR(priv->lcd_clk);
|
||||
ret = PTR_ERR(priv->lcd_clk);
|
||||
goto err_drvdata;
|
||||
}
|
||||
}
|
||||
|
||||
priv->pix_clk = devm_clk_get(dev, "lcd_pclk");
|
||||
if (IS_ERR(priv->pix_clk)) {
|
||||
dev_err(dev, "Failed to get pixel clock\n");
|
||||
return PTR_ERR(priv->pix_clk);
|
||||
ret = PTR_ERR(priv->pix_clk);
|
||||
goto err_drvdata;
|
||||
}
|
||||
|
||||
priv->dma_hwdescs = dmam_alloc_coherent(dev,
|
||||
sizeof(*priv->dma_hwdescs),
|
||||
&priv->dma_hwdescs_phys,
|
||||
GFP_KERNEL);
|
||||
if (!priv->dma_hwdescs)
|
||||
return -ENOMEM;
|
||||
if (!priv->dma_hwdescs) {
|
||||
ret = -ENOMEM;
|
||||
goto err_drvdata;
|
||||
}
|
||||
|
||||
/* Configure DMA hwdesc for foreground0 plane */
|
||||
ingenic_drm_configure_hwdesc_plane(priv, 0);
|
||||
@ -1199,7 +1207,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
|
||||
NULL, DRM_PLANE_TYPE_PRIMARY, NULL);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to register plane: %i\n", ret);
|
||||
return ret;
|
||||
goto err_drvdata;
|
||||
}
|
||||
|
||||
if (soc_info->map_noncoherent)
|
||||
@ -1211,7 +1219,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
|
||||
NULL, &ingenic_drm_crtc_funcs, NULL);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to init CRTC: %i\n", ret);
|
||||
return ret;
|
||||
goto err_drvdata;
|
||||
}
|
||||
|
||||
drm_crtc_enable_color_mgmt(&priv->crtc, 0, false,
|
||||
@ -1230,7 +1238,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to register overlay plane: %i\n",
|
||||
ret);
|
||||
return ret;
|
||||
goto err_drvdata;
|
||||
}
|
||||
|
||||
if (soc_info->map_noncoherent)
|
||||
@ -1241,17 +1249,18 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
|
||||
if (ret) {
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(dev, "Failed to bind components: %i\n", ret);
|
||||
return ret;
|
||||
goto err_drvdata;
|
||||
}
|
||||
|
||||
ret = devm_add_action_or_reset(dev, ingenic_drm_unbind_all, priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_drvdata;
|
||||
|
||||
priv->ipu_plane = drm_plane_from_index(drm, 2);
|
||||
if (!priv->ipu_plane) {
|
||||
dev_err(dev, "Failed to retrieve IPU plane\n");
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto err_drvdata;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1263,7 +1272,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
|
||||
break; /* we're done */
|
||||
if (ret != -EPROBE_DEFER)
|
||||
dev_err(dev, "Failed to get bridge handle\n");
|
||||
return ret;
|
||||
goto err_drvdata;
|
||||
}
|
||||
|
||||
if (panel)
|
||||
@ -1275,7 +1284,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
|
||||
if (IS_ERR(ib)) {
|
||||
ret = PTR_ERR(ib);
|
||||
dev_err(dev, "Failed to init encoder: %d\n", ret);
|
||||
return ret;
|
||||
goto err_drvdata;
|
||||
}
|
||||
|
||||
encoder = &ib->encoder;
|
||||
@ -1290,13 +1299,14 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
|
||||
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
|
||||
if (ret) {
|
||||
dev_err(dev, "Unable to attach bridge\n");
|
||||
return ret;
|
||||
goto err_drvdata;
|
||||
}
|
||||
|
||||
connector = drm_bridge_connector_init(drm, encoder);
|
||||
if (IS_ERR(connector)) {
|
||||
dev_err(dev, "Unable to init connector\n");
|
||||
return PTR_ERR(connector);
|
||||
ret = PTR_ERR(connector);
|
||||
goto err_drvdata;
|
||||
}
|
||||
|
||||
drm_connector_attach_encoder(connector, encoder);
|
||||
@ -1313,13 +1323,13 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
|
||||
ret = devm_request_irq(dev, irq, ingenic_drm_irq_handler, 0, drm->driver->name, drm);
|
||||
if (ret) {
|
||||
dev_err(dev, "Unable to install IRQ handler\n");
|
||||
return ret;
|
||||
goto err_drvdata;
|
||||
}
|
||||
|
||||
ret = drm_vblank_init(drm, 1);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed calling drm_vblank_init()\n");
|
||||
return ret;
|
||||
goto err_drvdata;
|
||||
}
|
||||
|
||||
drm_mode_config_reset(drm);
|
||||
@ -1327,7 +1337,7 @@ static int ingenic_drm_bind(struct device *dev, bool has_components)
|
||||
ret = clk_prepare_enable(priv->pix_clk);
|
||||
if (ret) {
|
||||
dev_err(dev, "Unable to start pixel clock\n");
|
||||
return ret;
|
||||
goto err_drvdata;
|
||||
}
|
||||
|
||||
if (priv->lcd_clk) {
|
||||
@ -1402,6 +1412,8 @@ err_devclk_disable:
|
||||
clk_disable_unprepare(priv->lcd_clk);
|
||||
err_pixclk_disable:
|
||||
clk_disable_unprepare(priv->pix_clk);
|
||||
err_drvdata:
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1422,6 +1434,7 @@ static void ingenic_drm_unbind(struct device *dev)
|
||||
|
||||
drm_dev_unregister(&priv->drm);
|
||||
drm_atomic_helper_shutdown(&priv->drm);
|
||||
dev_set_drvdata(dev, NULL);
|
||||
}
|
||||
|
||||
static const struct component_master_ops ingenic_master_ops = {
|
||||
@ -1459,6 +1472,14 @@ static void ingenic_drm_remove(struct platform_device *pdev)
|
||||
component_master_del(dev, &ingenic_master_ops);
|
||||
}
|
||||
|
||||
static void ingenic_drm_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
struct ingenic_drm *priv = platform_get_drvdata(pdev);
|
||||
|
||||
if (priv)
|
||||
drm_atomic_helper_shutdown(&priv->drm);
|
||||
}
|
||||
|
||||
static int ingenic_drm_suspend(struct device *dev)
|
||||
{
|
||||
struct ingenic_drm *priv = dev_get_drvdata(dev);
|
||||
@ -1610,6 +1631,7 @@ static struct platform_driver ingenic_drm_driver = {
|
||||
},
|
||||
.probe = ingenic_drm_probe,
|
||||
.remove_new = ingenic_drm_remove,
|
||||
.shutdown = ingenic_drm_shutdown,
|
||||
};
|
||||
|
||||
static int ingenic_drm_init(void)
|
||||
|
@ -482,6 +482,14 @@ static void logicvc_drm_remove(struct platform_device *pdev)
|
||||
of_reserved_mem_device_release(dev);
|
||||
}
|
||||
|
||||
static void logicvc_drm_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
struct logicvc_drm *logicvc = platform_get_drvdata(pdev);
|
||||
struct drm_device *drm_dev = &logicvc->drm_dev;
|
||||
|
||||
drm_atomic_helper_shutdown(drm_dev);
|
||||
}
|
||||
|
||||
static const struct of_device_id logicvc_drm_of_table[] = {
|
||||
{ .compatible = "xylon,logicvc-3.02.a-display" },
|
||||
{ .compatible = "xylon,logicvc-4.01.a-display" },
|
||||
@ -492,6 +500,7 @@ MODULE_DEVICE_TABLE(of, logicvc_drm_of_table);
|
||||
static struct platform_driver logicvc_drm_platform_driver = {
|
||||
.probe = logicvc_drm_probe,
|
||||
.remove_new = logicvc_drm_remove,
|
||||
.shutdown = logicvc_drm_shutdown,
|
||||
.driver = {
|
||||
.name = "logicvc-drm",
|
||||
.of_match_table = logicvc_drm_of_table,
|
||||
|
@ -327,6 +327,11 @@ static void lsdc_pci_remove(struct pci_dev *pdev)
|
||||
drm_atomic_helper_shutdown(ddev);
|
||||
}
|
||||
|
||||
static void lsdc_pci_shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
drm_atomic_helper_shutdown(pci_get_drvdata(pdev));
|
||||
}
|
||||
|
||||
static int lsdc_drm_freeze(struct drm_device *ddev)
|
||||
{
|
||||
struct lsdc_device *ldev = to_lsdc(ddev);
|
||||
@ -447,6 +452,7 @@ struct pci_driver lsdc_pci_driver = {
|
||||
.id_table = lsdc_pciid_list,
|
||||
.probe = lsdc_pci_probe,
|
||||
.remove = lsdc_pci_remove,
|
||||
.shutdown = lsdc_pci_shutdown,
|
||||
.driver.pm = &lsdc_pm_ops,
|
||||
};
|
||||
|
||||
|
@ -459,6 +459,14 @@ static void mcde_remove(struct platform_device *pdev)
|
||||
regulator_disable(mcde->epod);
|
||||
}
|
||||
|
||||
static void mcde_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
struct drm_device *drm = platform_get_drvdata(pdev);
|
||||
|
||||
if (drm->registered)
|
||||
drm_atomic_helper_shutdown(drm);
|
||||
}
|
||||
|
||||
static const struct of_device_id mcde_of_match[] = {
|
||||
{
|
||||
.compatible = "ste,mcde",
|
||||
@ -473,6 +481,7 @@ static struct platform_driver mcde_driver = {
|
||||
},
|
||||
.probe = mcde_probe,
|
||||
.remove_new = mcde_remove,
|
||||
.shutdown = mcde_shutdown,
|
||||
};
|
||||
|
||||
static struct platform_driver *const component_drivers[] = {
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <drm/drm_aperture.h>
|
||||
#include <drm/drm_atomic_helper.h>
|
||||
#include <drm/drm_drv.h>
|
||||
#include <drm/drm_fbdev_generic.h>
|
||||
#include <drm/drm_file.h>
|
||||
@ -278,6 +279,12 @@ static void mgag200_pci_remove(struct pci_dev *pdev)
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
|
||||
drm_dev_unregister(dev);
|
||||
drm_atomic_helper_shutdown(dev);
|
||||
}
|
||||
|
||||
static void mgag200_pci_shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
drm_atomic_helper_shutdown(pci_get_drvdata(pdev));
|
||||
}
|
||||
|
||||
static struct pci_driver mgag200_pci_driver = {
|
||||
@ -285,6 +292,7 @@ static struct pci_driver mgag200_pci_driver = {
|
||||
.id_table = mgag200_pciidlist,
|
||||
.probe = mgag200_pci_probe,
|
||||
.remove = mgag200_pci_remove,
|
||||
.shutdown = mgag200_pci_shutdown,
|
||||
};
|
||||
|
||||
drm_module_pci_driver_if_modeset(mgag200_pci_driver, mgag200_modeset);
|
||||
|
@ -11,6 +11,7 @@ config DRM_NOUVEAU
|
||||
select DRM_TTM
|
||||
select DRM_TTM_HELPER
|
||||
select DRM_EXEC
|
||||
select DRM_GPUVM
|
||||
select DRM_SCHED
|
||||
select I2C
|
||||
select I2C_ALGOBIT
|
||||
|
@ -256,7 +256,7 @@ nv04_display_create(struct drm_device *dev)
|
||||
for (i = 0; i < dcb->entries; i++) {
|
||||
struct dcb_output *dcbent = &dcb->entry[i];
|
||||
|
||||
connector = nouveau_connector_create(dev, dcbent);
|
||||
connector = nouveau_connector_create(dev, dcbent->connector);
|
||||
if (IS_ERR(connector))
|
||||
continue;
|
||||
|
||||
|
@ -66,8 +66,6 @@
|
||||
#include "nouveau_fence.h"
|
||||
#include "nv50_display.h"
|
||||
|
||||
#include <subdev/bios/dp.h>
|
||||
|
||||
/******************************************************************************
|
||||
* EVO channel
|
||||
*****************************************************************************/
|
||||
@ -477,7 +475,6 @@ nv50_dac_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *st
|
||||
|
||||
core->func->dac->ctrl(core, nv_encoder->outp.or.id, ctrl, NULL);
|
||||
nv_encoder->crtc = NULL;
|
||||
nvif_outp_release(&nv_encoder->outp);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -502,7 +499,8 @@ nv50_dac_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *sta
|
||||
|
||||
ctrl |= NVDEF(NV507D, DAC_SET_CONTROL, PROTOCOL, RGB_CRT);
|
||||
|
||||
nvif_outp_acquire_rgb_crt(&nv_encoder->outp);
|
||||
if (!nvif_outp_acquired(&nv_encoder->outp))
|
||||
nvif_outp_acquire_dac(&nv_encoder->outp);
|
||||
|
||||
core->func->dac->ctrl(core, nv_encoder->outp.or.id, ctrl, asyh);
|
||||
asyh->or.depth = 0;
|
||||
@ -553,34 +551,27 @@ nv50_dac_func = {
|
||||
};
|
||||
|
||||
static int
|
||||
nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
|
||||
nv50_dac_create(struct nouveau_encoder *nv_encoder)
|
||||
{
|
||||
struct drm_connector *connector = &nv_encoder->conn->base;
|
||||
struct nouveau_drm *drm = nouveau_drm(connector->dev);
|
||||
struct nv50_disp *disp = nv50_disp(connector->dev);
|
||||
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
|
||||
struct nvkm_i2c_bus *bus;
|
||||
struct nouveau_encoder *nv_encoder;
|
||||
struct drm_encoder *encoder;
|
||||
struct dcb_output *dcbe = nv_encoder->dcb;
|
||||
int type = DRM_MODE_ENCODER_DAC;
|
||||
|
||||
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
|
||||
if (!nv_encoder)
|
||||
return -ENOMEM;
|
||||
nv_encoder->dcb = dcbe;
|
||||
|
||||
bus = nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
|
||||
if (bus)
|
||||
nv_encoder->i2c = &bus->i2c;
|
||||
|
||||
encoder = to_drm_encoder(nv_encoder);
|
||||
encoder->possible_crtcs = dcbe->heads;
|
||||
encoder->possible_clones = 0;
|
||||
drm_encoder_init(connector->dev, encoder, &nv50_dac_func, type,
|
||||
"dac-%04x-%04x", dcbe->hasht, dcbe->hashm);
|
||||
drm_encoder_helper_add(encoder, &nv50_dac_help);
|
||||
|
||||
drm_connector_attach_encoder(connector, encoder);
|
||||
return nvif_outp_ctor(disp->disp, nv_encoder->base.base.name, dcbe->id, &nv_encoder->outp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -617,7 +608,7 @@ nv50_audio_component_get_eld(struct device *kdev, int port, int dev_id,
|
||||
continue; /* TODO */
|
||||
|
||||
nv_encoder = nouveau_encoder(encoder);
|
||||
nv_connector = nouveau_connector(nv_encoder->audio.connector);
|
||||
nv_connector = nv_encoder->conn;
|
||||
nv_crtc = nouveau_crtc(nv_encoder->crtc);
|
||||
|
||||
if (!nv_crtc || nv_encoder->outp.or.id != port || nv_crtc->index != dev_id)
|
||||
@ -713,6 +704,18 @@ nv50_audio_supported(struct drm_encoder *encoder)
|
||||
disp->disp->object.oclass == GT206_DISP)
|
||||
return false;
|
||||
|
||||
if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
|
||||
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
||||
|
||||
switch (nv_encoder->dcb->type) {
|
||||
case DCB_OUTPUT_TMDS:
|
||||
case DCB_OUTPUT_DP:
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -729,7 +732,6 @@ nv50_audio_disable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc)
|
||||
mutex_lock(&drm->audio.lock);
|
||||
if (nv_encoder->audio.enabled) {
|
||||
nv_encoder->audio.enabled = false;
|
||||
nv_encoder->audio.connector = NULL;
|
||||
nvif_outp_hda_eld(&nv_encoder->outp, nv_crtc->index, NULL, 0);
|
||||
}
|
||||
mutex_unlock(&drm->audio.lock);
|
||||
@ -754,7 +756,6 @@ nv50_audio_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
|
||||
nvif_outp_hda_eld(&nv_encoder->outp, nv_crtc->index, nv_connector->base.eld,
|
||||
drm_eld_size(nv_connector->base.eld));
|
||||
nv_encoder->audio.enabled = true;
|
||||
nv_encoder->audio.connector = &nv_connector->base;
|
||||
|
||||
mutex_unlock(&drm->audio.lock);
|
||||
|
||||
@ -774,7 +775,6 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
|
||||
struct drm_hdmi_info *hdmi = &nv_connector->base.display_info.hdmi;
|
||||
union hdmi_infoframe infoframe = { 0 };
|
||||
const u8 rekey = 56; /* binary driver, and tegra, constant */
|
||||
u8 scdc = 0;
|
||||
u32 max_ac_packet;
|
||||
struct {
|
||||
struct nvif_outp_infoframe_v0 infoframe;
|
||||
@ -787,8 +787,9 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
|
||||
max_ac_packet -= 18; /* constant from tegra */
|
||||
max_ac_packet /= 32;
|
||||
|
||||
if (hdmi->scdc.scrambling.supported) {
|
||||
if (nv_encoder->i2c && hdmi->scdc.scrambling.supported) {
|
||||
const bool high_tmds_clock_ratio = mode->clock > 340000;
|
||||
u8 scdc;
|
||||
|
||||
ret = drm_scdc_readb(nv_encoder->i2c, SCDC_TMDS_CONFIG, &scdc);
|
||||
if (ret < 0) {
|
||||
@ -808,8 +809,9 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
|
||||
scdc, ret);
|
||||
}
|
||||
|
||||
ret = nvif_outp_acquire_tmds(&nv_encoder->outp, nv_crtc->index, true,
|
||||
max_ac_packet, rekey, scdc, hda);
|
||||
ret = nvif_outp_hdmi(&nv_encoder->outp, nv_crtc->index, true, max_ac_packet, rekey,
|
||||
mode->clock, hdmi->scdc.supported, hdmi->scdc.scrambling.supported,
|
||||
hdmi->scdc.scrambling.low_rates);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
@ -838,7 +840,7 @@ nv50_hdmi_enable(struct drm_encoder *encoder, struct nouveau_crtc *nv_crtc,
|
||||
|
||||
nvif_outp_infoframe(&nv_encoder->outp, NVIF_OUTP_INFOFRAME_V0_VSI, &args.infoframe, size);
|
||||
|
||||
nv50_audio_enable(encoder, nv_crtc, nv_connector, state, mode);
|
||||
nv_encoder->hdmi.enabled = true;
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
@ -865,6 +867,8 @@ struct nv50_msto {
|
||||
struct nv50_mstc *mstc;
|
||||
bool disabled;
|
||||
bool enabled;
|
||||
|
||||
u32 display_id;
|
||||
};
|
||||
|
||||
struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder)
|
||||
@ -893,10 +897,17 @@ nv50_msto_cleanup(struct drm_atomic_state *state,
|
||||
drm_atomic_get_old_mst_topology_state(state, mgr);
|
||||
const struct drm_dp_mst_atomic_payload *old_payload =
|
||||
drm_atomic_get_mst_payload_state(old_mst_state, msto->mstc->port);
|
||||
struct nv50_mstc *mstc = msto->mstc;
|
||||
struct nv50_mstm *mstm = mstc->mstm;
|
||||
|
||||
NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name);
|
||||
|
||||
if (msto->disabled) {
|
||||
if (msto->head->func->display_id) {
|
||||
nvif_outp_dp_mst_id_put(&mstm->outp->outp, msto->display_id);
|
||||
msto->display_id = 0;
|
||||
}
|
||||
|
||||
msto->mstc = NULL;
|
||||
msto->disabled = false;
|
||||
drm_dp_remove_payload_part2(mgr, new_mst_state, old_payload, new_payload);
|
||||
@ -916,23 +927,27 @@ nv50_msto_prepare(struct drm_atomic_state *state,
|
||||
struct nv50_mstc *mstc = msto->mstc;
|
||||
struct nv50_mstm *mstm = mstc->mstm;
|
||||
struct drm_dp_mst_atomic_payload *payload;
|
||||
int ret = 0;
|
||||
|
||||
NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name);
|
||||
|
||||
payload = drm_atomic_get_mst_payload_state(mst_state, mstc->port);
|
||||
|
||||
// TODO: Figure out if we want to do a better job of handling VCPI allocation failures here?
|
||||
if (msto->disabled) {
|
||||
drm_dp_remove_payload_part1(mgr, mst_state, payload);
|
||||
|
||||
nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 0, 0, 0, 0);
|
||||
ret = 1;
|
||||
} else {
|
||||
if (msto->enabled)
|
||||
drm_dp_add_payload_part1(mgr, mst_state, payload);
|
||||
ret = drm_dp_add_payload_part1(mgr, mst_state, payload);
|
||||
}
|
||||
|
||||
if (ret == 0) {
|
||||
nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index,
|
||||
payload->vc_start_slot, payload->time_slots,
|
||||
payload->pbn, payload->time_slots * mst_state->pbn_div);
|
||||
} else {
|
||||
nvif_outp_dp_mst_vcpi(&mstm->outp->outp, msto->head->base.index, 0, 0, 0, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1029,8 +1044,13 @@ nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *st
|
||||
return;
|
||||
|
||||
if (!mstm->links++) {
|
||||
/*XXX: MST audio. */
|
||||
nvif_outp_acquire_dp(&mstm->outp->outp, mstm->outp->dp.dpcd, 0, 0, false, true);
|
||||
nvif_outp_acquire_sor(&mstm->outp->outp, false /*TODO: MST audio... */);
|
||||
nouveau_dp_train(mstm->outp, true, 0, 0);
|
||||
}
|
||||
|
||||
if (head->func->display_id) {
|
||||
if (!WARN_ON(nvif_outp_dp_mst_id_get(&mstm->outp->outp, &msto->display_id)))
|
||||
head->func->display_id(head, msto->display_id);
|
||||
}
|
||||
|
||||
if (mstm->outp->outp.or.link & 1)
|
||||
@ -1053,6 +1073,9 @@ nv50_msto_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *s
|
||||
struct nv50_mstc *mstc = msto->mstc;
|
||||
struct nv50_mstm *mstm = mstc->mstm;
|
||||
|
||||
if (msto->head->func->display_id)
|
||||
msto->head->func->display_id(msto->head, 0);
|
||||
|
||||
mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0);
|
||||
mstm->modified = true;
|
||||
if (!--mstm->links)
|
||||
@ -1291,6 +1314,12 @@ nv50_mstm_cleanup(struct drm_atomic_state *state,
|
||||
}
|
||||
}
|
||||
|
||||
if (mstm->disabled) {
|
||||
nouveau_dp_power_down(mstm->outp);
|
||||
nvif_outp_release(&mstm->outp->outp);
|
||||
mstm->disabled = false;
|
||||
}
|
||||
|
||||
mstm->modified = false;
|
||||
}
|
||||
|
||||
@ -1325,12 +1354,6 @@ nv50_mstm_prepare(struct drm_atomic_state *state,
|
||||
nv50_msto_prepare(state, mst_state, &mstm->mgr, msto);
|
||||
}
|
||||
}
|
||||
|
||||
if (mstm->disabled) {
|
||||
if (!mstm->links)
|
||||
nvif_outp_release(&mstm->outp->outp);
|
||||
mstm->disabled = false;
|
||||
}
|
||||
}
|
||||
|
||||
static struct drm_connector *
|
||||
@ -1536,7 +1559,7 @@ static void
|
||||
nv50_sor_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *state)
|
||||
{
|
||||
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
||||
struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
|
||||
struct nv50_head *head = nv50_head(nv_encoder->crtc);
|
||||
struct nouveau_connector *nv_connector = nv50_outp_get_old_connector(state, nv_encoder);
|
||||
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
|
||||
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
|
||||
@ -1544,7 +1567,6 @@ nv50_sor_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *st
|
||||
#endif
|
||||
struct drm_dp_aux *aux = &nv_connector->aux;
|
||||
int ret;
|
||||
u8 pwr;
|
||||
|
||||
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
|
||||
if (backlight && backlight->uses_dpcd) {
|
||||
@ -1555,19 +1577,20 @@ nv50_sor_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *st
|
||||
}
|
||||
#endif
|
||||
|
||||
if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
|
||||
ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr);
|
||||
|
||||
if (ret == 0) {
|
||||
pwr &= ~DP_SET_POWER_MASK;
|
||||
pwr |= DP_SET_POWER_D3;
|
||||
drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr);
|
||||
}
|
||||
if (nv_encoder->dcb->type == DCB_OUTPUT_TMDS && nv_encoder->hdmi.enabled) {
|
||||
nvif_outp_hdmi(&nv_encoder->outp, head->base.index,
|
||||
false, 0, 0, 0, false, false, false);
|
||||
nv_encoder->hdmi.enabled = false;
|
||||
}
|
||||
|
||||
nv_encoder->update(nv_encoder, nv_crtc->index, NULL, 0, 0);
|
||||
nv50_audio_disable(encoder, nv_crtc);
|
||||
nvif_outp_release(&nv_encoder->outp);
|
||||
if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
|
||||
nouveau_dp_power_down(nv_encoder);
|
||||
|
||||
if (head->func->display_id)
|
||||
head->func->display_id(head, 0);
|
||||
|
||||
nv_encoder->update(nv_encoder, head->base.index, NULL, 0, 0);
|
||||
nv50_audio_disable(encoder, &head->base);
|
||||
nv_encoder->crtc = NULL;
|
||||
}
|
||||
|
||||
@ -1580,6 +1603,7 @@ nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *sta
|
||||
nv50_head_atom(drm_atomic_get_new_crtc_state(state, &nv_crtc->base));
|
||||
struct drm_display_mode *mode = &asyh->state.adjusted_mode;
|
||||
struct nv50_disp *disp = nv50_disp(encoder->dev);
|
||||
struct nv50_head *head = nv50_head(&nv_crtc->base);
|
||||
struct nvif_outp *outp = &nv_encoder->outp;
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
@ -1597,15 +1621,17 @@ nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *sta
|
||||
|
||||
if ((disp->disp->object.oclass == GT214_DISP ||
|
||||
disp->disp->object.oclass >= GF110_DISP) &&
|
||||
nv_encoder->dcb->type != DCB_OUTPUT_LVDS &&
|
||||
drm_detect_monitor_audio(nv_connector->edid))
|
||||
hda = true;
|
||||
|
||||
if (!nvif_outp_acquired(outp))
|
||||
nvif_outp_acquire_sor(outp, hda);
|
||||
|
||||
switch (nv_encoder->dcb->type) {
|
||||
case DCB_OUTPUT_TMDS:
|
||||
if (disp->disp->object.oclass == NV50_DISP ||
|
||||
!drm_detect_hdmi_monitor(nv_connector->edid))
|
||||
nvif_outp_acquire_tmds(outp, nv_crtc->index, false, 0, 0, 0, false);
|
||||
else
|
||||
if (disp->disp->object.oclass != NV50_DISP &&
|
||||
drm_detect_hdmi_monitor(nv_connector->edid))
|
||||
nv50_hdmi_enable(encoder, nv_crtc, nv_connector, state, mode, hda);
|
||||
|
||||
if (nv_encoder->outp.or.link & 1) {
|
||||
@ -1651,10 +1677,10 @@ nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *sta
|
||||
lvds_8bpc = true;
|
||||
}
|
||||
|
||||
nvif_outp_acquire_lvds(&nv_encoder->outp, lvds_dual, lvds_8bpc);
|
||||
nvif_outp_lvds(&nv_encoder->outp, lvds_dual, lvds_8bpc);
|
||||
break;
|
||||
case DCB_OUTPUT_DP:
|
||||
nvif_outp_acquire_dp(&nv_encoder->outp, nv_encoder->dp.dpcd, 0, 0, hda, false);
|
||||
nouveau_dp_train(nv_encoder, false, mode->clock, asyh->or.bpc);
|
||||
depth = nv50_dp_bpc_to_depth(asyh->or.bpc);
|
||||
|
||||
if (nv_encoder->outp.or.link & 1)
|
||||
@ -1662,8 +1688,6 @@ nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *sta
|
||||
else
|
||||
proto = NV887D_SOR_SET_CONTROL_PROTOCOL_DP_B;
|
||||
|
||||
nv50_audio_enable(encoder, nv_crtc, nv_connector, state, mode);
|
||||
|
||||
#ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
|
||||
backlight = nv_connector->backlight;
|
||||
if (backlight && backlight->uses_dpcd)
|
||||
@ -1677,6 +1701,9 @@ nv50_sor_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *sta
|
||||
break;
|
||||
}
|
||||
|
||||
if (head->func->display_id)
|
||||
head->func->display_id(head, BIT(nv_encoder->outp.id));
|
||||
|
||||
nv_encoder->update(nv_encoder, nv_crtc->index, asyh, proto, depth);
|
||||
}
|
||||
|
||||
@ -1692,14 +1719,13 @@ nv50_sor_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
||||
|
||||
nvif_outp_dtor(&nv_encoder->outp);
|
||||
|
||||
nv50_mstm_del(&nv_encoder->dp.mstm);
|
||||
drm_encoder_cleanup(encoder);
|
||||
|
||||
if (nv_encoder->dcb->type == DCB_OUTPUT_DP)
|
||||
mutex_destroy(&nv_encoder->dp.hpd_irq_lock);
|
||||
|
||||
nvif_outp_dtor(&nv_encoder->outp);
|
||||
kfree(encoder);
|
||||
}
|
||||
|
||||
@ -1708,24 +1734,15 @@ nv50_sor_func = {
|
||||
.destroy = nv50_sor_destroy,
|
||||
};
|
||||
|
||||
bool nv50_has_mst(struct nouveau_drm *drm)
|
||||
{
|
||||
struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
|
||||
u32 data;
|
||||
u8 ver, hdr, cnt, len;
|
||||
|
||||
data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len);
|
||||
return data && ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04);
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
|
||||
nv50_sor_create(struct nouveau_encoder *nv_encoder)
|
||||
{
|
||||
struct drm_connector *connector = &nv_encoder->conn->base;
|
||||
struct nouveau_connector *nv_connector = nouveau_connector(connector);
|
||||
struct nouveau_drm *drm = nouveau_drm(connector->dev);
|
||||
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
|
||||
struct nouveau_encoder *nv_encoder;
|
||||
struct drm_encoder *encoder;
|
||||
struct dcb_output *dcbe = nv_encoder->dcb;
|
||||
struct nv50_disp *disp = nv50_disp(connector->dev);
|
||||
int type, ret;
|
||||
|
||||
@ -1738,15 +1755,9 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
|
||||
break;
|
||||
}
|
||||
|
||||
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
|
||||
if (!nv_encoder)
|
||||
return -ENOMEM;
|
||||
nv_encoder->dcb = dcbe;
|
||||
nv_encoder->update = nv50_sor_update;
|
||||
|
||||
encoder = to_drm_encoder(nv_encoder);
|
||||
encoder->possible_crtcs = dcbe->heads;
|
||||
encoder->possible_clones = 0;
|
||||
drm_encoder_init(connector->dev, encoder, &nv50_sor_func, type,
|
||||
"sor-%04x-%04x", dcbe->hasht, dcbe->hashm);
|
||||
drm_encoder_helper_add(encoder, &nv50_sor_help);
|
||||
@ -1757,40 +1768,40 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
|
||||
nv50_outp_dump_caps(drm, nv_encoder);
|
||||
|
||||
if (dcbe->type == DCB_OUTPUT_DP) {
|
||||
struct nvkm_i2c_aux *aux =
|
||||
nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
|
||||
|
||||
mutex_init(&nv_encoder->dp.hpd_irq_lock);
|
||||
|
||||
if (aux) {
|
||||
if (disp->disp->object.oclass < GF110_DISP) {
|
||||
/* HW has no support for address-only
|
||||
* transactions, so we're required to
|
||||
* use custom I2C-over-AUX code.
|
||||
*/
|
||||
nv_encoder->i2c = &aux->i2c;
|
||||
} else {
|
||||
nv_encoder->i2c = &nv_connector->aux.ddc;
|
||||
}
|
||||
nv_encoder->aux = aux;
|
||||
if (disp->disp->object.oclass < GF110_DISP) {
|
||||
/* HW has no support for address-only
|
||||
* transactions, so we're required to
|
||||
* use custom I2C-over-AUX code.
|
||||
*/
|
||||
struct nvkm_i2c_aux *aux;
|
||||
|
||||
aux = nvkm_i2c_aux_find(i2c, dcbe->i2c_index);
|
||||
if (!aux)
|
||||
return -EINVAL;
|
||||
|
||||
nv_encoder->i2c = &aux->i2c;
|
||||
} else {
|
||||
nv_encoder->i2c = &nv_connector->aux.ddc;
|
||||
}
|
||||
|
||||
if (nv_connector->type != DCB_CONNECTOR_eDP &&
|
||||
nv50_has_mst(drm)) {
|
||||
if (nv_connector->type != DCB_CONNECTOR_eDP && nv_encoder->outp.info.dp.mst) {
|
||||
ret = nv50_mstm_new(nv_encoder, &nv_connector->aux,
|
||||
16, nv_connector->base.base.id,
|
||||
&nv_encoder->dp.mstm);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
} else
|
||||
if (nv_encoder->outp.info.ddc != NVIF_OUTP_DDC_INVALID) {
|
||||
struct nvkm_i2c_bus *bus =
|
||||
nvkm_i2c_bus_find(i2c, dcbe->i2c_index);
|
||||
if (bus)
|
||||
nv_encoder->i2c = &bus->i2c;
|
||||
}
|
||||
|
||||
return nvif_outp_ctor(disp->disp, nv_encoder->base.base.name, dcbe->id, &nv_encoder->outp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
@ -1817,7 +1828,6 @@ nv50_pior_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *s
|
||||
|
||||
core->func->pior->ctrl(core, nv_encoder->outp.or.id, ctrl, NULL);
|
||||
nv_encoder->crtc = NULL;
|
||||
nvif_outp_release(&nv_encoder->outp);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1845,14 +1855,16 @@ nv50_pior_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *st
|
||||
default: asyh->or.depth = NV837D_PIOR_SET_CONTROL_PIXEL_DEPTH_DEFAULT; break;
|
||||
}
|
||||
|
||||
if (!nvif_outp_acquired(&nv_encoder->outp))
|
||||
nvif_outp_acquire_pior(&nv_encoder->outp);
|
||||
|
||||
switch (nv_encoder->dcb->type) {
|
||||
case DCB_OUTPUT_TMDS:
|
||||
ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, PROTOCOL, EXT_TMDS_ENC);
|
||||
nvif_outp_acquire_tmds(&nv_encoder->outp, false, false, 0, 0, 0, false);
|
||||
break;
|
||||
case DCB_OUTPUT_DP:
|
||||
ctrl |= NVDEF(NV507D, PIOR_SET_CONTROL, PROTOCOL, EXT_TMDS_ENC);
|
||||
nvif_outp_acquire_dp(&nv_encoder->outp, nv_encoder->dp.dpcd, 0, 0, false, false);
|
||||
nouveau_dp_train(nv_encoder, false, asyh->state.adjusted_mode.clock, 6);
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
@ -1889,8 +1901,9 @@ nv50_pior_func = {
|
||||
};
|
||||
|
||||
static int
|
||||
nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
|
||||
nv50_pior_create(struct nouveau_encoder *nv_encoder)
|
||||
{
|
||||
struct drm_connector *connector = &nv_encoder->conn->base;
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nv50_disp *disp = nv50_disp(dev);
|
||||
@ -1898,18 +1911,18 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
|
||||
struct nvkm_i2c_bus *bus = NULL;
|
||||
struct nvkm_i2c_aux *aux = NULL;
|
||||
struct i2c_adapter *ddc;
|
||||
struct nouveau_encoder *nv_encoder;
|
||||
struct drm_encoder *encoder;
|
||||
struct dcb_output *dcbe = nv_encoder->dcb;
|
||||
int type;
|
||||
|
||||
switch (dcbe->type) {
|
||||
case DCB_OUTPUT_TMDS:
|
||||
bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_EXT(dcbe->extdev));
|
||||
bus = nvkm_i2c_bus_find(i2c, nv_encoder->outp.info.ddc);
|
||||
ddc = bus ? &bus->i2c : NULL;
|
||||
type = DRM_MODE_ENCODER_TMDS;
|
||||
break;
|
||||
case DCB_OUTPUT_DP:
|
||||
aux = nvkm_i2c_aux_find(i2c, NVKM_I2C_AUX_EXT(dcbe->extdev));
|
||||
aux = nvkm_i2c_aux_find(i2c, nv_encoder->outp.info.dp.aux);
|
||||
ddc = aux ? &aux->i2c : NULL;
|
||||
type = DRM_MODE_ENCODER_TMDS;
|
||||
break;
|
||||
@ -1917,18 +1930,11 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
|
||||
if (!nv_encoder)
|
||||
return -ENOMEM;
|
||||
nv_encoder->dcb = dcbe;
|
||||
nv_encoder->i2c = ddc;
|
||||
nv_encoder->aux = aux;
|
||||
|
||||
mutex_init(&nv_encoder->dp.hpd_irq_lock);
|
||||
|
||||
encoder = to_drm_encoder(nv_encoder);
|
||||
encoder->possible_crtcs = dcbe->heads;
|
||||
encoder->possible_clones = 0;
|
||||
drm_encoder_init(connector->dev, encoder, &nv50_pior_func, type,
|
||||
"pior-%04x-%04x", dcbe->hasht, dcbe->hashm);
|
||||
drm_encoder_helper_add(encoder, &nv50_pior_help);
|
||||
@ -1938,7 +1944,7 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
|
||||
disp->core->func->pior->get_caps(disp, nv_encoder, ffs(dcbe->or) - 1);
|
||||
nv50_outp_dump_caps(drm, nv_encoder);
|
||||
|
||||
return nvif_outp_ctor(disp->disp, nv_encoder->base.base.name, dcbe->id, &nv_encoder->outp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
@ -1952,7 +1958,9 @@ nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
|
||||
struct drm_dp_mst_topology_state *mst_state;
|
||||
struct nouveau_drm *drm = nouveau_drm(state->dev);
|
||||
struct nv50_disp *disp = nv50_disp(drm->dev);
|
||||
struct nv50_atom *atom = nv50_atom(state);
|
||||
struct nv50_core *core = disp->core;
|
||||
struct nv50_outp_atom *outp;
|
||||
struct nv50_mstm *mstm;
|
||||
int i;
|
||||
|
||||
@ -1975,6 +1983,23 @@ nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
|
||||
if (mstm->modified)
|
||||
nv50_mstm_cleanup(state, mst_state, mstm);
|
||||
}
|
||||
|
||||
list_for_each_entry(outp, &atom->outp, head) {
|
||||
if (outp->encoder->encoder_type != DRM_MODE_ENCODER_DPMST) {
|
||||
struct nouveau_encoder *nv_encoder = nouveau_encoder(outp->encoder);
|
||||
|
||||
if (outp->enabled) {
|
||||
nv50_audio_enable(outp->encoder, nouveau_crtc(nv_encoder->crtc),
|
||||
nv_encoder->conn, NULL, NULL);
|
||||
outp->enabled = outp->disabled = false;
|
||||
} else {
|
||||
if (outp->disabled) {
|
||||
nvif_outp_release(&nv_encoder->outp);
|
||||
outp->disabled = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
@ -2066,14 +2091,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
|
||||
if (outp->clr.mask) {
|
||||
help->atomic_disable(encoder, state);
|
||||
outp->disabled = true;
|
||||
interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
|
||||
if (outp->flush_disable) {
|
||||
nv50_disp_atomic_commit_wndw(state, interlock);
|
||||
nv50_disp_atomic_commit_core(state, interlock);
|
||||
memset(interlock, 0x00, sizeof(interlock));
|
||||
|
||||
flushed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2093,7 +2112,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
nv50_crc_atomic_init_notifier_contexts(state);
|
||||
|
||||
/* Update output path(s). */
|
||||
list_for_each_entry_safe(outp, outt, &atom->outp, head) {
|
||||
list_for_each_entry(outp, &atom->outp, head) {
|
||||
const struct drm_encoder_helper_funcs *help;
|
||||
struct drm_encoder *encoder;
|
||||
|
||||
@ -2105,11 +2124,9 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
|
||||
if (outp->set.mask) {
|
||||
help->atomic_enable(encoder, state);
|
||||
outp->enabled = true;
|
||||
interlock[NV50_DISP_INTERLOCK_CORE] = 1;
|
||||
}
|
||||
|
||||
list_del(&outp->head);
|
||||
kfree(outp);
|
||||
}
|
||||
|
||||
/* Update head(s). */
|
||||
@ -2207,6 +2224,11 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
|
||||
if (atom->lock_core)
|
||||
mutex_unlock(&disp->mutex);
|
||||
|
||||
list_for_each_entry_safe(outp, outt, &atom->outp, head) {
|
||||
list_del(&outp->head);
|
||||
kfree(outp);
|
||||
}
|
||||
|
||||
/* Wait for HW to signal completion. */
|
||||
for_each_new_plane_in_state(state, plane, new_plane_state, i) {
|
||||
struct nv50_wndw_atom *asyw = nv50_wndw_atom(new_plane_state);
|
||||
@ -2355,10 +2377,9 @@ nv50_disp_outp_atomic_check_clr(struct nv50_atom *atom,
|
||||
if (IS_ERR(outp))
|
||||
return PTR_ERR(outp);
|
||||
|
||||
if (outp->encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
|
||||
outp->flush_disable = true;
|
||||
if (outp->encoder->encoder_type == DRM_MODE_ENCODER_DPMST ||
|
||||
nouveau_encoder(outp->encoder)->dcb->type == DCB_OUTPUT_DP)
|
||||
atom->flush_disable = true;
|
||||
}
|
||||
outp->clr.ctrl = true;
|
||||
atom->lock_core = true;
|
||||
}
|
||||
@ -2519,6 +2540,104 @@ nv50_display_fini(struct drm_device *dev, bool runtime, bool suspend)
|
||||
cancel_work_sync(&drm->hpd_work);
|
||||
}
|
||||
|
||||
static inline void
|
||||
nv50_display_read_hw_or_state(struct drm_device *dev, struct nv50_disp *disp,
|
||||
struct nouveau_encoder *outp)
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_connector_list_iter conn_iter;
|
||||
struct drm_connector *conn;
|
||||
struct nv50_head_atom *armh;
|
||||
const u32 encoder_mask = drm_encoder_mask(&outp->base.base);
|
||||
bool found_conn = false, found_head = false;
|
||||
u8 proto;
|
||||
int head_idx;
|
||||
int ret;
|
||||
|
||||
switch (outp->dcb->type) {
|
||||
case DCB_OUTPUT_TMDS:
|
||||
ret = nvif_outp_inherit_tmds(&outp->outp, &proto);
|
||||
break;
|
||||
case DCB_OUTPUT_DP:
|
||||
ret = nvif_outp_inherit_dp(&outp->outp, &proto);
|
||||
break;
|
||||
case DCB_OUTPUT_LVDS:
|
||||
ret = nvif_outp_inherit_lvds(&outp->outp, &proto);
|
||||
break;
|
||||
case DCB_OUTPUT_ANALOG:
|
||||
ret = nvif_outp_inherit_rgb_crt(&outp->outp, &proto);
|
||||
break;
|
||||
default:
|
||||
drm_dbg_kms(dev, "Readback for %s not implemented yet, skipping\n",
|
||||
outp->base.base.name);
|
||||
drm_WARN_ON(dev, true);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
return;
|
||||
|
||||
head_idx = ret;
|
||||
|
||||
drm_for_each_crtc(crtc, dev) {
|
||||
if (crtc->index != head_idx)
|
||||
continue;
|
||||
|
||||
armh = nv50_head_atom(crtc->state);
|
||||
found_head = true;
|
||||
break;
|
||||
}
|
||||
if (drm_WARN_ON(dev, !found_head))
|
||||
return;
|
||||
|
||||
/* Figure out which connector is being used by this encoder */
|
||||
drm_connector_list_iter_begin(dev, &conn_iter);
|
||||
nouveau_for_each_non_mst_connector_iter(conn, &conn_iter) {
|
||||
if (nouveau_connector(conn)->index == outp->dcb->connector) {
|
||||
found_conn = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
drm_connector_list_iter_end(&conn_iter);
|
||||
if (drm_WARN_ON(dev, !found_conn))
|
||||
return;
|
||||
|
||||
armh->state.encoder_mask = encoder_mask;
|
||||
armh->state.connector_mask = drm_connector_mask(conn);
|
||||
armh->state.active = true;
|
||||
armh->state.enable = true;
|
||||
pm_runtime_get_noresume(dev->dev);
|
||||
|
||||
outp->crtc = crtc;
|
||||
outp->ctrl = NVVAL(NV507D, SOR_SET_CONTROL, PROTOCOL, proto) | BIT(crtc->index);
|
||||
|
||||
drm_connector_get(conn);
|
||||
conn->state->crtc = crtc;
|
||||
conn->state->best_encoder = &outp->base.base;
|
||||
}
|
||||
|
||||
/* Read back the currently programmed display state */
|
||||
static void
|
||||
nv50_display_read_hw_state(struct nouveau_drm *drm)
|
||||
{
|
||||
struct drm_device *dev = drm->dev;
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_modeset_acquire_ctx ctx;
|
||||
struct nv50_disp *disp = nv50_disp(dev);
|
||||
int ret;
|
||||
|
||||
DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, 0, ret);
|
||||
|
||||
drm_for_each_encoder(encoder, dev) {
|
||||
if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST)
|
||||
continue;
|
||||
|
||||
nv50_display_read_hw_or_state(dev, disp, nouveau_encoder(encoder));
|
||||
}
|
||||
|
||||
DRM_MODESET_LOCK_ALL_END(dev, ctx, ret);
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
|
||||
{
|
||||
@ -2536,6 +2655,9 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime)
|
||||
}
|
||||
}
|
||||
|
||||
if (!resume)
|
||||
nv50_display_read_hw_state(nouveau_drm(dev));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2562,14 +2684,11 @@ nv50_display_destroy(struct drm_device *dev)
|
||||
int
|
||||
nv50_display_create(struct drm_device *dev)
|
||||
{
|
||||
struct nvif_device *device = &nouveau_drm(dev)->client.device;
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct dcb_table *dcb = &drm->vbios.dcb;
|
||||
struct drm_connector *connector, *tmp;
|
||||
struct nv50_disp *disp;
|
||||
struct dcb_output *dcbe;
|
||||
int crtcs, ret, i;
|
||||
bool has_mst = nv50_has_mst(drm);
|
||||
int ret, i;
|
||||
bool has_mst = false;
|
||||
|
||||
disp = kzalloc(sizeof(*disp), GFP_KERNEL);
|
||||
if (!disp)
|
||||
@ -2645,20 +2764,92 @@ nv50_display_create(struct drm_device *dev)
|
||||
dev->mode_config.cursor_height = 64;
|
||||
}
|
||||
|
||||
/* create crtc objects to represent the hw heads */
|
||||
if (disp->disp->object.oclass >= GV100_DISP)
|
||||
crtcs = nvif_rd32(&device->object, 0x610060) & 0xff;
|
||||
else
|
||||
if (disp->disp->object.oclass >= GF110_DISP)
|
||||
crtcs = nvif_rd32(&device->object, 0x612004) & 0xf;
|
||||
else
|
||||
crtcs = 0x3;
|
||||
/* create encoder/connector objects based on VBIOS DCB table */
|
||||
for_each_set_bit(i, &disp->disp->outp_mask, sizeof(disp->disp->outp_mask) * 8) {
|
||||
struct nouveau_encoder *outp;
|
||||
|
||||
for (i = 0; i < fls(crtcs); i++) {
|
||||
struct nv50_head *head;
|
||||
outp = kzalloc(sizeof(*outp), GFP_KERNEL);
|
||||
if (!outp)
|
||||
break;
|
||||
|
||||
if (!(crtcs & (1 << i)))
|
||||
ret = nvif_outp_ctor(disp->disp, "kmsOutp", i, &outp->outp);
|
||||
if (ret) {
|
||||
kfree(outp);
|
||||
continue;
|
||||
}
|
||||
|
||||
connector = nouveau_connector_create(dev, outp->outp.info.conn);
|
||||
if (IS_ERR(connector)) {
|
||||
nvif_outp_dtor(&outp->outp);
|
||||
kfree(outp);
|
||||
continue;
|
||||
}
|
||||
|
||||
outp->base.base.possible_crtcs = outp->outp.info.heads;
|
||||
outp->base.base.possible_clones = 0;
|
||||
outp->conn = nouveau_connector(connector);
|
||||
|
||||
outp->dcb = kzalloc(sizeof(*outp->dcb), GFP_KERNEL);
|
||||
if (!outp->dcb)
|
||||
break;
|
||||
|
||||
switch (outp->outp.info.proto) {
|
||||
case NVIF_OUTP_RGB_CRT:
|
||||
outp->dcb->type = DCB_OUTPUT_ANALOG;
|
||||
outp->dcb->crtconf.maxfreq = outp->outp.info.rgb_crt.freq_max;
|
||||
break;
|
||||
case NVIF_OUTP_TMDS:
|
||||
outp->dcb->type = DCB_OUTPUT_TMDS;
|
||||
outp->dcb->duallink_possible = outp->outp.info.tmds.dual;
|
||||
break;
|
||||
case NVIF_OUTP_LVDS:
|
||||
outp->dcb->type = DCB_OUTPUT_LVDS;
|
||||
outp->dcb->lvdsconf.use_acpi_for_edid = outp->outp.info.lvds.acpi_edid;
|
||||
break;
|
||||
case NVIF_OUTP_DP:
|
||||
outp->dcb->type = DCB_OUTPUT_DP;
|
||||
outp->dcb->dpconf.link_nr = outp->outp.info.dp.link_nr;
|
||||
outp->dcb->dpconf.link_bw = outp->outp.info.dp.link_bw;
|
||||
if (outp->outp.info.dp.mst)
|
||||
has_mst = true;
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
continue;
|
||||
}
|
||||
|
||||
outp->dcb->heads = outp->outp.info.heads;
|
||||
outp->dcb->connector = outp->outp.info.conn;
|
||||
outp->dcb->i2c_index = outp->outp.info.ddc;
|
||||
|
||||
switch (outp->outp.info.type) {
|
||||
case NVIF_OUTP_DAC : ret = nv50_dac_create(outp); break;
|
||||
case NVIF_OUTP_SOR : ret = nv50_sor_create(outp); break;
|
||||
case NVIF_OUTP_PIOR: ret = nv50_pior_create(outp); break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
|
||||
i, outp->outp.info.type, outp->outp.info.proto, ret);
|
||||
}
|
||||
}
|
||||
|
||||
/* cull any connectors we created that don't have an encoder */
|
||||
list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
|
||||
if (connector->possible_encoders)
|
||||
continue;
|
||||
|
||||
NV_WARN(drm, "%s has no encoders, removing\n",
|
||||
connector->name);
|
||||
connector->funcs->destroy(connector);
|
||||
}
|
||||
|
||||
/* create crtc objects to represent the hw heads */
|
||||
for_each_set_bit(i, &disp->disp->head_mask, sizeof(disp->disp->head_mask) * 8) {
|
||||
struct nv50_head *head;
|
||||
|
||||
head = nv50_head_create(dev, i);
|
||||
if (IS_ERR(head)) {
|
||||
@ -2684,52 +2875,10 @@ nv50_display_create(struct drm_device *dev)
|
||||
* Once these issues are closed, this should be
|
||||
* removed
|
||||
*/
|
||||
head->msto->encoder.possible_crtcs = crtcs;
|
||||
head->msto->encoder.possible_crtcs = disp->disp->head_mask;
|
||||
}
|
||||
}
|
||||
|
||||
/* create encoder/connector objects based on VBIOS DCB table */
|
||||
for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
|
||||
connector = nouveau_connector_create(dev, dcbe);
|
||||
if (IS_ERR(connector))
|
||||
continue;
|
||||
|
||||
if (dcbe->location == DCB_LOC_ON_CHIP) {
|
||||
switch (dcbe->type) {
|
||||
case DCB_OUTPUT_TMDS:
|
||||
case DCB_OUTPUT_LVDS:
|
||||
case DCB_OUTPUT_DP:
|
||||
ret = nv50_sor_create(connector, dcbe);
|
||||
break;
|
||||
case DCB_OUTPUT_ANALOG:
|
||||
ret = nv50_dac_create(connector, dcbe);
|
||||
break;
|
||||
default:
|
||||
ret = -ENODEV;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
ret = nv50_pior_create(connector, dcbe);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
NV_WARN(drm, "failed to create encoder %d/%d/%d: %d\n",
|
||||
dcbe->location, dcbe->type,
|
||||
ffs(dcbe->or) - 1, ret);
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* cull any connectors we created that don't have an encoder */
|
||||
list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
|
||||
if (connector->possible_encoders)
|
||||
continue;
|
||||
|
||||
NV_WARN(drm, "%s has no encoders, removing\n",
|
||||
connector->name);
|
||||
connector->funcs->destroy(connector);
|
||||
}
|
||||
|
||||
/* Disable vblank irqs aggressively for power-saving, safe on nv50+ */
|
||||
dev->vblank_disable_immediate = true;
|
||||
|
||||
|
@ -83,7 +83,9 @@ struct nv50_outp_atom {
|
||||
struct list_head head;
|
||||
|
||||
struct drm_encoder *encoder;
|
||||
bool flush_disable;
|
||||
|
||||
bool disabled;
|
||||
bool enabled;
|
||||
|
||||
union nv50_outp_atom_mask {
|
||||
struct {
|
||||
@ -106,8 +108,6 @@ void nv50_dmac_destroy(struct nv50_dmac *);
|
||||
*/
|
||||
struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder);
|
||||
|
||||
bool nv50_has_mst(struct nouveau_drm *drm);
|
||||
|
||||
u32 *evo_wait(struct nv50_dmac *, int nr);
|
||||
void evo_kick(u32 *, struct nv50_dmac *);
|
||||
|
||||
|
@ -49,6 +49,7 @@ struct nv50_head_func {
|
||||
int (*procamp)(struct nv50_head *, struct nv50_head_atom *);
|
||||
int (*or)(struct nv50_head *, struct nv50_head_atom *);
|
||||
void (*static_wndw_map)(struct nv50_head *, struct nv50_head_atom *);
|
||||
int (*display_id)(struct nv50_head *, u32 display_id);
|
||||
};
|
||||
|
||||
extern const struct nv50_head_func head507d;
|
||||
|
@ -27,6 +27,19 @@
|
||||
|
||||
#include <nvhw/class/clc57d.h>
|
||||
|
||||
static int
|
||||
headc57d_display_id(struct nv50_head *head, u32 display_id)
|
||||
{
|
||||
struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push;
|
||||
int ret;
|
||||
|
||||
if ((ret = PUSH_WAIT(push, 2)))
|
||||
return ret;
|
||||
|
||||
PUSH_NVSQ(push, NVC57D, 0x2020 + (head->base.index * 0x400), display_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
headc57d_or(struct nv50_head *head, struct nv50_head_atom *asyh)
|
||||
{
|
||||
@ -250,4 +263,5 @@ headc57d = {
|
||||
.or = headc57d_or,
|
||||
/* TODO: flexible window mappings */
|
||||
.static_wndw_map = headc37d_static_wndw_map,
|
||||
.display_id = headc57d_display_id,
|
||||
};
|
||||
|
@ -7,6 +7,21 @@ struct nvif_disp;
|
||||
|
||||
struct nvif_conn {
|
||||
struct nvif_object object;
|
||||
u32 id;
|
||||
|
||||
struct {
|
||||
enum {
|
||||
NVIF_CONN_VGA,
|
||||
NVIF_CONN_TV,
|
||||
NVIF_CONN_DVI_I,
|
||||
NVIF_CONN_DVI_D,
|
||||
NVIF_CONN_LVDS,
|
||||
NVIF_CONN_LVDS_SPWG,
|
||||
NVIF_CONN_HDMI,
|
||||
NVIF_CONN_DP,
|
||||
NVIF_CONN_EDP,
|
||||
} type;
|
||||
} info;
|
||||
};
|
||||
|
||||
int nvif_conn_ctor(struct nvif_disp *, const char *name, int id, struct nvif_conn *);
|
||||
@ -18,11 +33,6 @@ nvif_conn_id(struct nvif_conn *conn)
|
||||
return conn->object.handle;
|
||||
}
|
||||
|
||||
#define NVIF_CONN_HPD_STATUS_UNSUPPORTED 0 /* negative if query fails */
|
||||
#define NVIF_CONN_HPD_STATUS_NOT_PRESENT 1
|
||||
#define NVIF_CONN_HPD_STATUS_PRESENT 2
|
||||
int nvif_conn_hpd_status(struct nvif_conn *);
|
||||
|
||||
int nvif_conn_event_ctor(struct nvif_conn *, const char *name, nvif_event_func, u8 types,
|
||||
struct nvif_event *);
|
||||
#endif
|
||||
|
@ -7,6 +7,16 @@ union nvif_conn_args {
|
||||
__u8 version;
|
||||
__u8 id; /* DCB connector table index. */
|
||||
__u8 pad02[6];
|
||||
#define NVIF_CONN_V0_VGA 0x00
|
||||
#define NVIF_CONN_V0_TV 0x01
|
||||
#define NVIF_CONN_V0_DVI_I 0x02
|
||||
#define NVIF_CONN_V0_DVI_D 0x03
|
||||
#define NVIF_CONN_V0_LVDS 0x04
|
||||
#define NVIF_CONN_V0_LVDS_SPWG 0x05
|
||||
#define NVIF_CONN_V0_HDMI 0x06
|
||||
#define NVIF_CONN_V0_DP 0x07
|
||||
#define NVIF_CONN_V0_EDP 0x08
|
||||
__u8 type;
|
||||
} v0;
|
||||
};
|
||||
|
||||
@ -20,15 +30,4 @@ union nvif_conn_event_args {
|
||||
__u8 pad02[6];
|
||||
} v0;
|
||||
};
|
||||
|
||||
#define NVIF_CONN_V0_HPD_STATUS 0x00000000
|
||||
|
||||
union nvif_conn_hpd_status_args {
|
||||
struct nvif_conn_hpd_status_v0 {
|
||||
__u8 version;
|
||||
__u8 support;
|
||||
__u8 present;
|
||||
__u8 pad03[5];
|
||||
} v0;
|
||||
};
|
||||
#endif
|
||||
|
@ -8,18 +8,86 @@ union nvif_outp_args {
|
||||
struct nvif_outp_v0 {
|
||||
__u8 version;
|
||||
__u8 id; /* DCB device index. */
|
||||
__u8 pad02[6];
|
||||
#define NVIF_OUTP_V0_TYPE_DAC 0x00
|
||||
#define NVIF_OUTP_V0_TYPE_SOR 0x01
|
||||
#define NVIF_OUTP_V0_TYPE_PIOR 0x02
|
||||
__u8 type;
|
||||
#define NVIF_OUTP_V0_PROTO_RGB_CRT 0x00
|
||||
#define NVIF_OUTP_V0_PROTO_TMDS 0x01
|
||||
#define NVIF_OUTP_V0_PROTO_LVDS 0x02
|
||||
#define NVIF_OUTP_V0_PROTO_DP 0x03
|
||||
__u8 proto;
|
||||
__u8 heads;
|
||||
__u8 ddc;
|
||||
__u8 conn;
|
||||
union {
|
||||
struct {
|
||||
__u32 freq_max;
|
||||
} rgb_crt;
|
||||
struct {
|
||||
__u8 dual;
|
||||
} tmds;
|
||||
struct {
|
||||
__u8 acpi_edid;
|
||||
} lvds;
|
||||
struct {
|
||||
__u8 aux;
|
||||
__u8 mst;
|
||||
__u8 increased_wm;
|
||||
__u8 link_nr;
|
||||
__u32 link_bw;
|
||||
} dp;
|
||||
};
|
||||
} v0;
|
||||
};
|
||||
|
||||
#define NVIF_OUTP_V0_LOAD_DETECT 0x00
|
||||
#define NVIF_OUTP_V0_ACQUIRE 0x01
|
||||
#define NVIF_OUTP_V0_RELEASE 0x02
|
||||
#define NVIF_OUTP_V0_INFOFRAME 0x03
|
||||
#define NVIF_OUTP_V0_HDA_ELD 0x04
|
||||
#define NVIF_OUTP_V0_DP_AUX_PWR 0x05
|
||||
#define NVIF_OUTP_V0_DP_RETRAIN 0x06
|
||||
#define NVIF_OUTP_V0_DP_MST_VCPI 0x07
|
||||
#define NVIF_OUTP_V0_DETECT 0x00
|
||||
#define NVIF_OUTP_V0_EDID_GET 0x01
|
||||
|
||||
#define NVIF_OUTP_V0_INHERIT 0x10
|
||||
#define NVIF_OUTP_V0_ACQUIRE 0x11
|
||||
#define NVIF_OUTP_V0_RELEASE 0x12
|
||||
|
||||
#define NVIF_OUTP_V0_LOAD_DETECT 0x20
|
||||
|
||||
#define NVIF_OUTP_V0_BL_GET 0x30
|
||||
#define NVIF_OUTP_V0_BL_SET 0x31
|
||||
|
||||
#define NVIF_OUTP_V0_LVDS 0x40
|
||||
|
||||
#define NVIF_OUTP_V0_HDMI 0x50
|
||||
|
||||
#define NVIF_OUTP_V0_INFOFRAME 0x60
|
||||
#define NVIF_OUTP_V0_HDA_ELD 0x61
|
||||
|
||||
#define NVIF_OUTP_V0_DP_AUX_PWR 0x70
|
||||
#define NVIF_OUTP_V0_DP_AUX_XFER 0x71
|
||||
#define NVIF_OUTP_V0_DP_RATES 0x72
|
||||
#define NVIF_OUTP_V0_DP_TRAIN 0x73
|
||||
#define NVIF_OUTP_V0_DP_DRIVE 0x74
|
||||
#define NVIF_OUTP_V0_DP_SST 0x75
|
||||
#define NVIF_OUTP_V0_DP_MST_ID_GET 0x76
|
||||
#define NVIF_OUTP_V0_DP_MST_ID_PUT 0x77
|
||||
#define NVIF_OUTP_V0_DP_MST_VCPI 0x78
|
||||
|
||||
union nvif_outp_detect_args {
|
||||
struct nvif_outp_detect_v0 {
|
||||
__u8 version;
|
||||
#define NVIF_OUTP_DETECT_V0_NOT_PRESENT 0x00
|
||||
#define NVIF_OUTP_DETECT_V0_PRESENT 0x01
|
||||
#define NVIF_OUTP_DETECT_V0_UNKNOWN 0x02
|
||||
__u8 status;
|
||||
} v0;
|
||||
};
|
||||
|
||||
union nvif_outp_edid_get_args {
|
||||
struct nvif_outp_edid_get_v0 {
|
||||
__u8 version;
|
||||
__u8 pad01;
|
||||
__u16 size;
|
||||
__u8 data[2048];
|
||||
} v0;
|
||||
};
|
||||
|
||||
union nvif_outp_load_detect_args {
|
||||
struct nvif_outp_load_detect_v0 {
|
||||
@ -33,40 +101,39 @@ union nvif_outp_load_detect_args {
|
||||
union nvif_outp_acquire_args {
|
||||
struct nvif_outp_acquire_v0 {
|
||||
__u8 version;
|
||||
#define NVIF_OUTP_ACQUIRE_V0_RGB_CRT 0x00
|
||||
#define NVIF_OUTP_ACQUIRE_V0_TV 0x01
|
||||
#define NVIF_OUTP_ACQUIRE_V0_TMDS 0x02
|
||||
#define NVIF_OUTP_ACQUIRE_V0_LVDS 0x03
|
||||
#define NVIF_OUTP_ACQUIRE_V0_DP 0x04
|
||||
__u8 proto;
|
||||
#define NVIF_OUTP_ACQUIRE_V0_DAC 0x00
|
||||
#define NVIF_OUTP_ACQUIRE_V0_SOR 0x01
|
||||
#define NVIF_OUTP_ACQUIRE_V0_PIOR 0x02
|
||||
__u8 type;
|
||||
__u8 or;
|
||||
__u8 link;
|
||||
__u8 pad04[4];
|
||||
union {
|
||||
struct {
|
||||
__u8 head;
|
||||
__u8 hdmi;
|
||||
__u8 hdmi_max_ac_packet;
|
||||
__u8 hdmi_rekey;
|
||||
#define NVIF_OUTP_ACQUIRE_V0_TMDS_HDMI_SCDC_SCRAMBLE (1 << 0)
|
||||
#define NVIF_OUTP_ACQUIRE_V0_TMDS_HDMI_SCDC_DIV_BY_4 (1 << 1)
|
||||
__u8 hdmi_scdc;
|
||||
__u8 hdmi_hda;
|
||||
__u8 pad06[2];
|
||||
} tmds;
|
||||
struct {
|
||||
__u8 dual;
|
||||
__u8 bpc8;
|
||||
__u8 pad02[6];
|
||||
} lvds;
|
||||
struct {
|
||||
__u8 link_nr; /* 0 = highest possible. */
|
||||
__u8 link_bw; /* 0 = highest possible, DP BW code otherwise. */
|
||||
__u8 hda;
|
||||
__u8 mst;
|
||||
__u8 pad04[4];
|
||||
__u8 dpcd[DP_RECEIVER_CAP_SIZE];
|
||||
} dp;
|
||||
} sor;
|
||||
};
|
||||
} v0;
|
||||
};
|
||||
|
||||
union nvif_outp_inherit_args {
|
||||
struct nvif_outp_inherit_v0 {
|
||||
__u8 version;
|
||||
#define NVIF_OUTP_INHERIT_V0_RGB_CRT 0x00
|
||||
#define NVIF_OUTP_INHERIT_V0_TV 0x01
|
||||
#define NVIF_OUTP_INHERIT_V0_TMDS 0x02
|
||||
#define NVIF_OUTP_INHERIT_V0_LVDS 0x03
|
||||
#define NVIF_OUTP_INHERIT_V0_DP 0x04
|
||||
// In/out. Input is one of the above values, output is the actual hw protocol
|
||||
__u8 proto;
|
||||
__u8 or;
|
||||
__u8 link;
|
||||
__u8 head;
|
||||
union {
|
||||
struct {
|
||||
// TODO: Figure out padding, and whether we even want this field
|
||||
__u8 hda;
|
||||
} tmds;
|
||||
};
|
||||
} v0;
|
||||
};
|
||||
@ -76,6 +143,42 @@ union nvif_outp_release_args {
|
||||
} vn;
|
||||
};
|
||||
|
||||
union nvif_outp_bl_get_args {
|
||||
struct nvif_outp_bl_get_v0 {
|
||||
__u8 version;
|
||||
__u8 level;
|
||||
} v0;
|
||||
};
|
||||
|
||||
union nvif_outp_bl_set_args {
|
||||
struct nvif_outp_bl_set_v0 {
|
||||
__u8 version;
|
||||
__u8 level;
|
||||
} v0;
|
||||
};
|
||||
|
||||
union nvif_outp_lvds_args {
|
||||
struct nvif_outp_lvds_v0 {
|
||||
__u8 version;
|
||||
__u8 dual;
|
||||
__u8 bpc8;
|
||||
} v0;
|
||||
};
|
||||
|
||||
union nvif_outp_hdmi_args {
|
||||
struct nvif_outp_hdmi_v0 {
|
||||
__u8 version;
|
||||
__u8 head;
|
||||
__u8 enable;
|
||||
__u8 max_ac_packet;
|
||||
__u8 rekey;
|
||||
__u8 scdc;
|
||||
__u8 scdc_scrambling;
|
||||
__u8 scdc_low_rates;
|
||||
__u32 khz;
|
||||
} v0;
|
||||
};
|
||||
|
||||
union nvif_outp_infoframe_args {
|
||||
struct nvif_outp_infoframe_v0 {
|
||||
__u8 version;
|
||||
@ -105,9 +208,77 @@ union nvif_outp_dp_aux_pwr_args {
|
||||
} v0;
|
||||
};
|
||||
|
||||
union nvif_outp_dp_retrain_args {
|
||||
struct nvif_outp_dp_retrain_vn {
|
||||
} vn;
|
||||
union nvif_outp_dp_aux_xfer_args {
|
||||
struct nvif_outp_dp_aux_xfer_v0 {
|
||||
__u8 version;
|
||||
__u8 pad01;
|
||||
__u8 type;
|
||||
__u8 size;
|
||||
__u32 addr;
|
||||
__u8 data[16];
|
||||
} v0;
|
||||
};
|
||||
|
||||
union nvif_outp_dp_rates_args {
|
||||
struct nvif_outp_dp_rates_v0 {
|
||||
__u8 version;
|
||||
__u8 pad01[6];
|
||||
__u8 rates;
|
||||
struct {
|
||||
__s8 dpcd;
|
||||
__u32 rate;
|
||||
} rate[8];
|
||||
} v0;
|
||||
};
|
||||
|
||||
union nvif_outp_dp_train_args {
|
||||
struct nvif_outp_dp_train_v0 {
|
||||
__u8 version;
|
||||
__u8 retrain;
|
||||
__u8 mst;
|
||||
__u8 lttprs;
|
||||
__u8 post_lt_adj;
|
||||
__u8 link_nr;
|
||||
__u32 link_bw;
|
||||
__u8 dpcd[DP_RECEIVER_CAP_SIZE];
|
||||
} v0;
|
||||
};
|
||||
|
||||
union nvif_outp_dp_drive_args {
|
||||
struct nvif_outp_dp_drive_v0 {
|
||||
__u8 version;
|
||||
__u8 pad01[2];
|
||||
__u8 lanes;
|
||||
__u8 pe[4];
|
||||
__u8 vs[4];
|
||||
} v0;
|
||||
};
|
||||
|
||||
union nvif_outp_dp_sst_args {
|
||||
struct nvif_outp_dp_sst_v0 {
|
||||
__u8 version;
|
||||
__u8 head;
|
||||
__u8 pad02[2];
|
||||
__u32 watermark;
|
||||
__u32 hblanksym;
|
||||
__u32 vblanksym;
|
||||
} v0;
|
||||
};
|
||||
|
||||
union nvif_outp_dp_mst_id_put_args {
|
||||
struct nvif_outp_dp_mst_id_put_v0 {
|
||||
__u8 version;
|
||||
__u8 pad01[3];
|
||||
__u32 id;
|
||||
} v0;
|
||||
};
|
||||
|
||||
union nvif_outp_dp_mst_id_get_args {
|
||||
struct nvif_outp_dp_mst_id_get_v0 {
|
||||
__u8 version;
|
||||
__u8 pad01[3];
|
||||
__u32 id;
|
||||
} v0;
|
||||
};
|
||||
|
||||
union nvif_outp_dp_mst_vcpi_args {
|
||||
|
@ -8,6 +8,46 @@ struct nvif_disp;
|
||||
|
||||
struct nvif_outp {
|
||||
struct nvif_object object;
|
||||
u32 id;
|
||||
|
||||
struct {
|
||||
enum {
|
||||
NVIF_OUTP_DAC,
|
||||
NVIF_OUTP_SOR,
|
||||
NVIF_OUTP_PIOR,
|
||||
} type;
|
||||
|
||||
enum {
|
||||
NVIF_OUTP_RGB_CRT,
|
||||
NVIF_OUTP_TMDS,
|
||||
NVIF_OUTP_LVDS,
|
||||
NVIF_OUTP_DP,
|
||||
} proto;
|
||||
|
||||
u8 heads;
|
||||
#define NVIF_OUTP_DDC_INVALID 0xff
|
||||
u8 ddc;
|
||||
u8 conn;
|
||||
|
||||
union {
|
||||
struct {
|
||||
u32 freq_max;
|
||||
} rgb_crt;
|
||||
struct {
|
||||
bool dual;
|
||||
} tmds;
|
||||
struct {
|
||||
bool acpi_edid;
|
||||
} lvds;
|
||||
struct {
|
||||
u8 aux;
|
||||
bool mst;
|
||||
bool increased_wm;
|
||||
u8 link_nr;
|
||||
u32 link_bw;
|
||||
} dp;
|
||||
};
|
||||
} info;
|
||||
|
||||
struct {
|
||||
int id;
|
||||
@ -17,18 +57,60 @@ struct nvif_outp {
|
||||
|
||||
int nvif_outp_ctor(struct nvif_disp *, const char *name, int id, struct nvif_outp *);
|
||||
void nvif_outp_dtor(struct nvif_outp *);
|
||||
|
||||
enum nvif_outp_detect_status {
|
||||
NOT_PRESENT,
|
||||
PRESENT,
|
||||
UNKNOWN,
|
||||
};
|
||||
|
||||
enum nvif_outp_detect_status nvif_outp_detect(struct nvif_outp *);
|
||||
int nvif_outp_edid_get(struct nvif_outp *, u8 **pedid);
|
||||
|
||||
int nvif_outp_load_detect(struct nvif_outp *, u32 loadval);
|
||||
int nvif_outp_acquire_rgb_crt(struct nvif_outp *);
|
||||
int nvif_outp_acquire_tmds(struct nvif_outp *, int head,
|
||||
bool hdmi, u8 max_ac_packet, u8 rekey, u8 scdc, bool hda);
|
||||
int nvif_outp_acquire_lvds(struct nvif_outp *, bool dual, bool bpc8);
|
||||
int nvif_outp_acquire_dp(struct nvif_outp *outp, u8 dpcd[DP_RECEIVER_CAP_SIZE],
|
||||
int link_nr, int link_bw, bool hda, bool mst);
|
||||
int nvif_outp_acquire_dac(struct nvif_outp *);
|
||||
int nvif_outp_acquire_sor(struct nvif_outp *, bool hda);
|
||||
int nvif_outp_acquire_pior(struct nvif_outp *);
|
||||
int nvif_outp_inherit_rgb_crt(struct nvif_outp *outp, u8 *proto_out);
|
||||
int nvif_outp_inherit_lvds(struct nvif_outp *outp, u8 *proto_out);
|
||||
int nvif_outp_inherit_tmds(struct nvif_outp *outp, u8 *proto_out);
|
||||
int nvif_outp_inherit_dp(struct nvif_outp *outp, u8 *proto_out);
|
||||
|
||||
void nvif_outp_release(struct nvif_outp *);
|
||||
|
||||
static inline bool
|
||||
nvif_outp_acquired(struct nvif_outp *outp)
|
||||
{
|
||||
return outp->or.id >= 0;
|
||||
}
|
||||
|
||||
int nvif_outp_bl_get(struct nvif_outp *);
|
||||
int nvif_outp_bl_set(struct nvif_outp *, int level);
|
||||
|
||||
int nvif_outp_lvds(struct nvif_outp *, bool dual, bool bpc8);
|
||||
|
||||
int nvif_outp_hdmi(struct nvif_outp *, int head, bool enable, u8 max_ac_packet, u8 rekey, u32 khz,
|
||||
bool scdc, bool scdc_scrambling, bool scdc_low_rates);
|
||||
|
||||
int nvif_outp_infoframe(struct nvif_outp *, u8 type, struct nvif_outp_infoframe_v0 *, u32 size);
|
||||
int nvif_outp_hda_eld(struct nvif_outp *, int head, void *data, u32 size);
|
||||
|
||||
int nvif_outp_dp_aux_pwr(struct nvif_outp *, bool enable);
|
||||
int nvif_outp_dp_retrain(struct nvif_outp *);
|
||||
int nvif_outp_dp_aux_xfer(struct nvif_outp *, u8 type, u8 *size, u32 addr, u8 *data);
|
||||
|
||||
struct nvif_outp_dp_rate {
|
||||
int dpcd; /* -1 for non-indexed rates */
|
||||
u32 rate;
|
||||
};
|
||||
|
||||
int nvif_outp_dp_rates(struct nvif_outp *, struct nvif_outp_dp_rate *rate, int rate_nr);
|
||||
int nvif_outp_dp_train(struct nvif_outp *, u8 dpcd[DP_RECEIVER_CAP_SIZE],
|
||||
u8 lttprs, u8 link_nr, u32 link_bw, bool mst, bool post_lt_adj,
|
||||
bool retrain);
|
||||
int nvif_outp_dp_drive(struct nvif_outp *, u8 link_nr, u8 pe[4], u8 vs[4]);
|
||||
int nvif_outp_dp_sst(struct nvif_outp *, int head, u32 watermark, u32 hblanksym, u32 vblanksym);
|
||||
int nvif_outp_dp_mst_id_get(struct nvif_outp *, u32 *id);
|
||||
int nvif_outp_dp_mst_id_put(struct nvif_outp *, u32 id);
|
||||
int nvif_outp_dp_mst_vcpi(struct nvif_outp *, int head,
|
||||
u8 start_slot, u8 num_slots, u16 pbn, u16 aligned_pbn);
|
||||
#endif
|
||||
|
@ -12,6 +12,7 @@ struct nvkm_tags {
|
||||
};
|
||||
|
||||
enum nvkm_memory_target {
|
||||
NVKM_MEM_TARGET_INST_SR_LOST, /* instance memory - not preserved across suspend */
|
||||
NVKM_MEM_TARGET_INST, /* instance memory */
|
||||
NVKM_MEM_TARGET_VRAM, /* video memory */
|
||||
NVKM_MEM_TARGET_HOST, /* coherent system memory */
|
||||
|
@ -26,7 +26,7 @@ struct nvkm_instmem {
|
||||
|
||||
u32 nvkm_instmem_rd32(struct nvkm_instmem *, u32 addr);
|
||||
void nvkm_instmem_wr32(struct nvkm_instmem *, u32 addr, u32 data);
|
||||
int nvkm_instobj_new(struct nvkm_instmem *, u32 size, u32 align, bool zero,
|
||||
int nvkm_instobj_new(struct nvkm_instmem *, u32 size, u32 align, bool zero, bool preserve,
|
||||
struct nvkm_memory **);
|
||||
int nvkm_instobj_wrap(struct nvkm_device *, struct nvkm_memory *, struct nvkm_memory **);
|
||||
|
||||
|
@ -109,42 +109,6 @@ nv40_backlight_init(struct nouveau_encoder *encoder,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_get_intensity(struct backlight_device *bd)
|
||||
{
|
||||
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
|
||||
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
int or = ffs(nv_encoder->dcb->or) - 1;
|
||||
u32 div = 1025;
|
||||
u32 val;
|
||||
|
||||
val = nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(or));
|
||||
val &= NV50_PDISP_SOR_PWM_CTL_VAL;
|
||||
return ((val * 100) + (div / 2)) / div;
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_set_intensity(struct backlight_device *bd)
|
||||
{
|
||||
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
|
||||
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
int or = ffs(nv_encoder->dcb->or) - 1;
|
||||
u32 div = 1025;
|
||||
u32 val = (bd->props.brightness * div) / 100;
|
||||
|
||||
nvif_wr32(device, NV50_PDISP_SOR_PWM_CTL(or),
|
||||
NV50_PDISP_SOR_PWM_CTL_NEW | val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct backlight_ops nv50_bl_ops = {
|
||||
.options = BL_CORE_SUSPENDRESUME,
|
||||
.get_brightness = nv50_get_intensity,
|
||||
.update_status = nv50_set_intensity,
|
||||
};
|
||||
|
||||
/*
|
||||
* eDP brightness callbacks need to happen under lock, since we need to
|
||||
* enable/disable the backlight ourselves for modesets
|
||||
@ -238,53 +202,25 @@ static const struct backlight_ops nv50_edp_bl_ops = {
|
||||
};
|
||||
|
||||
static int
|
||||
nva3_get_intensity(struct backlight_device *bd)
|
||||
nv50_get_intensity(struct backlight_device *bd)
|
||||
{
|
||||
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
|
||||
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
int or = ffs(nv_encoder->dcb->or) - 1;
|
||||
u32 div, val;
|
||||
|
||||
div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
|
||||
val = nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(or));
|
||||
val &= NVA3_PDISP_SOR_PWM_CTL_VAL;
|
||||
if (div && div >= val)
|
||||
return ((val * 100) + (div / 2)) / div;
|
||||
|
||||
return 100;
|
||||
return nvif_outp_bl_get(&nv_encoder->outp);
|
||||
}
|
||||
|
||||
static int
|
||||
nva3_set_intensity(struct backlight_device *bd)
|
||||
nv50_set_intensity(struct backlight_device *bd)
|
||||
{
|
||||
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
|
||||
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
int or = ffs(nv_encoder->dcb->or) - 1;
|
||||
u32 div, val;
|
||||
|
||||
div = nvif_rd32(device, NV50_PDISP_SOR_PWM_DIV(or));
|
||||
|
||||
val = backlight_get_brightness(bd);
|
||||
if (val)
|
||||
val = (val * div) / 100;
|
||||
|
||||
if (div) {
|
||||
nvif_wr32(device, NV50_PDISP_SOR_PWM_CTL(or),
|
||||
val |
|
||||
NV50_PDISP_SOR_PWM_CTL_NEW |
|
||||
NVA3_PDISP_SOR_PWM_CTL_UNK);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
return nvif_outp_bl_set(&nv_encoder->outp, backlight_get_brightness(bd));
|
||||
}
|
||||
|
||||
static const struct backlight_ops nva3_bl_ops = {
|
||||
static const struct backlight_ops nv50_bl_ops = {
|
||||
.options = BL_CORE_SUSPENDRESUME,
|
||||
.get_brightness = nva3_get_intensity,
|
||||
.update_status = nva3_set_intensity,
|
||||
.get_brightness = nv50_get_intensity,
|
||||
.update_status = nv50_set_intensity,
|
||||
};
|
||||
|
||||
/* FIXME: perform backlight probing for eDP _before_ this, this only gets called after connector
|
||||
@ -298,13 +234,12 @@ nv50_backlight_init(struct nouveau_backlight *bl,
|
||||
const struct backlight_ops **ops)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
|
||||
/*
|
||||
* Note when this runs the connectors have not been probed yet,
|
||||
* so nv_conn->base.status is not set yet.
|
||||
*/
|
||||
if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(ffs(nv_encoder->dcb->or) - 1)) ||
|
||||
if (nvif_outp_bl_get(&nv_encoder->outp) < 0 ||
|
||||
drm_helper_probe_detect(&nv_conn->base, NULL, false) != connector_status_connected)
|
||||
return -ENODEV;
|
||||
|
||||
@ -346,15 +281,8 @@ nv50_backlight_init(struct nouveau_backlight *bl,
|
||||
}
|
||||
}
|
||||
|
||||
if (drm->client.device.info.chipset <= 0xa0 ||
|
||||
drm->client.device.info.chipset == 0xaa ||
|
||||
drm->client.device.info.chipset == 0xac)
|
||||
*ops = &nv50_bl_ops;
|
||||
else
|
||||
*ops = &nva3_bl_ops;
|
||||
|
||||
*ops = &nv50_bl_ops;
|
||||
props->max_brightness = 100;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2093,9 +2093,11 @@ nouveau_bios_init(struct drm_device *dev)
|
||||
if (!NVInitVBIOS(dev))
|
||||
return -ENODEV;
|
||||
|
||||
ret = parse_dcb_table(dev, bios);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
|
||||
ret = parse_dcb_table(dev, bios);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!bios->major_version) /* we don't run version 0 bios */
|
||||
return 0;
|
||||
|
@ -400,10 +400,8 @@ nouveau_connector_destroy(struct drm_connector *connector)
|
||||
kfree(nv_connector->edid);
|
||||
drm_connector_unregister(connector);
|
||||
drm_connector_cleanup(connector);
|
||||
if (nv_connector->aux.transfer) {
|
||||
if (nv_connector->aux.transfer)
|
||||
drm_dp_cec_unregister_connector(&nv_connector->aux);
|
||||
kfree(nv_connector->aux.name);
|
||||
}
|
||||
nvif_conn_dtor(&nv_connector->conn);
|
||||
kfree(connector);
|
||||
}
|
||||
@ -413,6 +411,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct pci_dev *pdev = to_pci_dev(dev->dev);
|
||||
struct nouveau_connector *conn = nouveau_connector(connector);
|
||||
struct nouveau_encoder *nv_encoder = NULL, *found = NULL;
|
||||
struct drm_encoder *encoder;
|
||||
int ret;
|
||||
@ -421,33 +420,48 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
|
||||
drm_connector_for_each_possible_encoder(connector, encoder) {
|
||||
nv_encoder = nouveau_encoder(encoder);
|
||||
|
||||
switch (nv_encoder->dcb->type) {
|
||||
case DCB_OUTPUT_DP:
|
||||
ret = nouveau_dp_detect(nouveau_connector(connector),
|
||||
nv_encoder);
|
||||
if (ret == NOUVEAU_DP_MST)
|
||||
return NULL;
|
||||
else if (ret == NOUVEAU_DP_SST)
|
||||
found = nv_encoder;
|
||||
if (nvif_object_constructed(&nv_encoder->outp.object)) {
|
||||
enum nvif_outp_detect_status status;
|
||||
|
||||
break;
|
||||
case DCB_OUTPUT_LVDS:
|
||||
if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
|
||||
ret = nouveau_dp_detect(conn, nv_encoder);
|
||||
if (ret == NOUVEAU_DP_MST)
|
||||
return NULL;
|
||||
if (ret != NOUVEAU_DP_SST)
|
||||
continue;
|
||||
|
||||
return nv_encoder;
|
||||
} else {
|
||||
status = nvif_outp_detect(&nv_encoder->outp);
|
||||
switch (status) {
|
||||
case PRESENT:
|
||||
return nv_encoder;
|
||||
case NOT_PRESENT:
|
||||
continue;
|
||||
case UNKNOWN:
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!nv_encoder->i2c)
|
||||
continue;
|
||||
|
||||
if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS) {
|
||||
switcheroo_ddc = !!(vga_switcheroo_handler_flags() &
|
||||
VGA_SWITCHEROO_CAN_SWITCH_DDC);
|
||||
fallthrough;
|
||||
default:
|
||||
if (!nv_encoder->i2c)
|
||||
break;
|
||||
|
||||
if (switcheroo_ddc)
|
||||
vga_switcheroo_lock_ddc(pdev);
|
||||
if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
|
||||
found = nv_encoder;
|
||||
if (switcheroo_ddc)
|
||||
vga_switcheroo_unlock_ddc(pdev);
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (switcheroo_ddc)
|
||||
vga_switcheroo_lock_ddc(pdev);
|
||||
if (nvkm_probe_i2c(nv_encoder->i2c, 0x50))
|
||||
found = nv_encoder;
|
||||
if (switcheroo_ddc)
|
||||
vga_switcheroo_unlock_ddc(pdev);
|
||||
|
||||
if (found)
|
||||
break;
|
||||
}
|
||||
@ -554,7 +568,6 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
|
||||
struct nouveau_connector *nv_connector = nouveau_connector(connector);
|
||||
struct nouveau_encoder *nv_encoder = NULL;
|
||||
struct nouveau_encoder *nv_partner;
|
||||
struct i2c_adapter *i2c;
|
||||
int type;
|
||||
int ret;
|
||||
enum drm_connector_status conn_status = connector_status_disconnected;
|
||||
@ -577,15 +590,20 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
|
||||
}
|
||||
|
||||
nv_encoder = nouveau_connector_ddc_detect(connector);
|
||||
if (nv_encoder && (i2c = nv_encoder->i2c) != NULL) {
|
||||
struct edid *new_edid;
|
||||
if (nv_encoder) {
|
||||
struct edid *new_edid = NULL;
|
||||
|
||||
if ((vga_switcheroo_handler_flags() &
|
||||
VGA_SWITCHEROO_CAN_SWITCH_DDC) &&
|
||||
nv_connector->type == DCB_CONNECTOR_LVDS)
|
||||
new_edid = drm_get_edid_switcheroo(connector, i2c);
|
||||
else
|
||||
new_edid = drm_get_edid(connector, i2c);
|
||||
if (nv_encoder->i2c) {
|
||||
if ((vga_switcheroo_handler_flags() & VGA_SWITCHEROO_CAN_SWITCH_DDC) &&
|
||||
nv_connector->type == DCB_CONNECTOR_LVDS)
|
||||
new_edid = drm_get_edid_switcheroo(connector, nv_encoder->i2c);
|
||||
else
|
||||
new_edid = drm_get_edid(connector, nv_encoder->i2c);
|
||||
} else {
|
||||
ret = nvif_outp_edid_get(&nv_encoder->outp, (u8 **)&new_edid);
|
||||
if (ret < 0)
|
||||
return connector_status_disconnected;
|
||||
}
|
||||
|
||||
nouveau_connector_set_edid(nv_connector, new_edid);
|
||||
if (!nv_connector->edid) {
|
||||
@ -1117,7 +1135,7 @@ nouveau_connector_atomic_check(struct drm_connector *connector, struct drm_atomi
|
||||
struct drm_connector_state *conn_state =
|
||||
drm_atomic_get_new_connector_state(state, connector);
|
||||
|
||||
if (!nv_conn->dp_encoder || !nv50_has_mst(nouveau_drm(connector->dev)))
|
||||
if (!nv_conn->dp_encoder || !nv_conn->dp_encoder->dp.mstm)
|
||||
return 0;
|
||||
|
||||
return drm_dp_mst_root_conn_atomic_check(conn_state, &nv_conn->dp_encoder->dp.mstm->mgr);
|
||||
@ -1206,23 +1224,17 @@ nouveau_connector_aux_xfer(struct drm_dp_aux *obj, struct drm_dp_aux_msg *msg)
|
||||
struct nouveau_connector *nv_connector =
|
||||
container_of(obj, typeof(*nv_connector), aux);
|
||||
struct nouveau_encoder *nv_encoder;
|
||||
struct nvkm_i2c_aux *aux;
|
||||
u8 size = msg->size;
|
||||
int ret;
|
||||
|
||||
nv_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP);
|
||||
if (!nv_encoder || !(aux = nv_encoder->aux))
|
||||
if (!nv_encoder)
|
||||
return -ENODEV;
|
||||
if (WARN_ON(msg->size > 16))
|
||||
return -E2BIG;
|
||||
|
||||
ret = nvkm_i2c_aux_acquire(aux);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_i2c_aux_xfer(aux, false, msg->request, msg->address,
|
||||
msg->buffer, &size);
|
||||
nvkm_i2c_aux_release(aux);
|
||||
ret = nvif_outp_dp_aux_xfer(&nv_encoder->outp,
|
||||
msg->request, &size, msg->address, msg->buffer);
|
||||
if (ret >= 0) {
|
||||
msg->reply = ret;
|
||||
return size;
|
||||
@ -1263,17 +1275,13 @@ drm_conntype_from_dcb(enum dcb_connector_type dcb)
|
||||
}
|
||||
|
||||
struct drm_connector *
|
||||
nouveau_connector_create(struct drm_device *dev,
|
||||
const struct dcb_output *dcbe)
|
||||
nouveau_connector_create(struct drm_device *dev, int index)
|
||||
{
|
||||
const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nouveau_display *disp = nouveau_display(dev);
|
||||
struct nouveau_connector *nv_connector = NULL;
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_list_iter conn_iter;
|
||||
char aux_name[48] = {0};
|
||||
int index = dcbe->connector;
|
||||
int type, ret = 0;
|
||||
bool dummy;
|
||||
|
||||
@ -1295,74 +1303,86 @@ nouveau_connector_create(struct drm_device *dev,
|
||||
nv_connector->index = index;
|
||||
INIT_WORK(&nv_connector->irq_work, nouveau_dp_irq);
|
||||
|
||||
/* attempt to parse vbios connector type and hotplug gpio */
|
||||
nv_connector->dcb = olddcb_conn(dev, index);
|
||||
if (nv_connector->dcb) {
|
||||
u32 entry = ROM16(nv_connector->dcb[0]);
|
||||
if (olddcb_conntab(dev)[3] >= 4)
|
||||
entry |= (u32)ROM16(nv_connector->dcb[2]) << 16;
|
||||
|
||||
nv_connector->type = nv_connector->dcb[0];
|
||||
if (drm_conntype_from_dcb(nv_connector->type) ==
|
||||
DRM_MODE_CONNECTOR_Unknown) {
|
||||
NV_WARN(drm, "unknown connector type %02x\n",
|
||||
nv_connector->type);
|
||||
nv_connector->type = DCB_CONNECTOR_NONE;
|
||||
if (disp->disp.conn_mask & BIT(nv_connector->index)) {
|
||||
ret = nvif_conn_ctor(&disp->disp, nv_connector->base.name, nv_connector->index,
|
||||
&nv_connector->conn);
|
||||
if (ret) {
|
||||
kfree(nv_connector);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
/* Gigabyte NX85T */
|
||||
if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
|
||||
if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
|
||||
nv_connector->type = DCB_CONNECTOR_DVI_I;
|
||||
switch (nv_connector->conn.info.type) {
|
||||
case NVIF_CONN_VGA : type = DCB_CONNECTOR_VGA; break;
|
||||
case NVIF_CONN_DVI_I : type = DCB_CONNECTOR_DVI_I; break;
|
||||
case NVIF_CONN_DVI_D : type = DCB_CONNECTOR_DVI_D; break;
|
||||
case NVIF_CONN_LVDS : type = DCB_CONNECTOR_LVDS; break;
|
||||
case NVIF_CONN_LVDS_SPWG: type = DCB_CONNECTOR_LVDS_SPWG; break;
|
||||
case NVIF_CONN_DP : type = DCB_CONNECTOR_DP; break;
|
||||
case NVIF_CONN_EDP : type = DCB_CONNECTOR_eDP; break;
|
||||
case NVIF_CONN_HDMI : type = DCB_CONNECTOR_HDMI_0; break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Gigabyte GV-NX86T512H */
|
||||
if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) {
|
||||
if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
|
||||
nv_connector->type = DCB_CONNECTOR_DVI_I;
|
||||
}
|
||||
nv_connector->type = type;
|
||||
} else {
|
||||
nv_connector->type = DCB_CONNECTOR_NONE;
|
||||
}
|
||||
u8 *dcb = olddcb_conn(dev, nv_connector->index);
|
||||
|
||||
/* no vbios data, or an unknown dcb connector type - attempt to
|
||||
* figure out something suitable ourselves
|
||||
*/
|
||||
if (nv_connector->type == DCB_CONNECTOR_NONE) {
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct dcb_table *dcbt = &drm->vbios.dcb;
|
||||
u32 encoders = 0;
|
||||
int i;
|
||||
if (dcb)
|
||||
nv_connector->type = dcb[0];
|
||||
else
|
||||
nv_connector->type = DCB_CONNECTOR_NONE;
|
||||
|
||||
for (i = 0; i < dcbt->entries; i++) {
|
||||
if (dcbt->entry[i].connector == nv_connector->index)
|
||||
encoders |= (1 << dcbt->entry[i].type);
|
||||
/* attempt to parse vbios connector type and hotplug gpio */
|
||||
if (nv_connector->type != DCB_CONNECTOR_NONE) {
|
||||
if (drm_conntype_from_dcb(nv_connector->type) ==
|
||||
DRM_MODE_CONNECTOR_Unknown) {
|
||||
NV_WARN(drm, "unknown connector type %02x\n",
|
||||
nv_connector->type);
|
||||
nv_connector->type = DCB_CONNECTOR_NONE;
|
||||
}
|
||||
}
|
||||
|
||||
if (encoders & (1 << DCB_OUTPUT_DP)) {
|
||||
if (encoders & (1 << DCB_OUTPUT_TMDS))
|
||||
nv_connector->type = DCB_CONNECTOR_DP;
|
||||
else
|
||||
nv_connector->type = DCB_CONNECTOR_eDP;
|
||||
} else
|
||||
if (encoders & (1 << DCB_OUTPUT_TMDS)) {
|
||||
if (encoders & (1 << DCB_OUTPUT_ANALOG))
|
||||
nv_connector->type = DCB_CONNECTOR_DVI_I;
|
||||
else
|
||||
nv_connector->type = DCB_CONNECTOR_DVI_D;
|
||||
} else
|
||||
if (encoders & (1 << DCB_OUTPUT_ANALOG)) {
|
||||
nv_connector->type = DCB_CONNECTOR_VGA;
|
||||
} else
|
||||
if (encoders & (1 << DCB_OUTPUT_LVDS)) {
|
||||
nv_connector->type = DCB_CONNECTOR_LVDS;
|
||||
} else
|
||||
if (encoders & (1 << DCB_OUTPUT_TV)) {
|
||||
nv_connector->type = DCB_CONNECTOR_TV_0;
|
||||
/* no vbios data, or an unknown dcb connector type - attempt to
|
||||
* figure out something suitable ourselves
|
||||
*/
|
||||
if (nv_connector->type == DCB_CONNECTOR_NONE &&
|
||||
!WARN_ON(drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)) {
|
||||
struct dcb_table *dcbt = &drm->vbios.dcb;
|
||||
u32 encoders = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dcbt->entries; i++) {
|
||||
if (dcbt->entry[i].connector == nv_connector->index)
|
||||
encoders |= (1 << dcbt->entry[i].type);
|
||||
}
|
||||
|
||||
if (encoders & (1 << DCB_OUTPUT_TMDS)) {
|
||||
if (encoders & (1 << DCB_OUTPUT_ANALOG))
|
||||
nv_connector->type = DCB_CONNECTOR_DVI_I;
|
||||
else
|
||||
nv_connector->type = DCB_CONNECTOR_DVI_D;
|
||||
} else
|
||||
if (encoders & (1 << DCB_OUTPUT_ANALOG)) {
|
||||
nv_connector->type = DCB_CONNECTOR_VGA;
|
||||
} else
|
||||
if (encoders & (1 << DCB_OUTPUT_LVDS)) {
|
||||
nv_connector->type = DCB_CONNECTOR_LVDS;
|
||||
} else
|
||||
if (encoders & (1 << DCB_OUTPUT_TV)) {
|
||||
nv_connector->type = DCB_CONNECTOR_TV_0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch ((type = drm_conntype_from_dcb(nv_connector->type))) {
|
||||
type = drm_conntype_from_dcb(nv_connector->type);
|
||||
if (type == DRM_MODE_CONNECTOR_LVDS)
|
||||
drm_connector_init(dev, connector, &nouveau_connector_funcs_lvds, type);
|
||||
else
|
||||
drm_connector_init(dev, connector, &nouveau_connector_funcs, type);
|
||||
|
||||
switch (type) {
|
||||
case DRM_MODE_CONNECTOR_LVDS:
|
||||
ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &dummy);
|
||||
if (ret) {
|
||||
@ -1371,24 +1391,16 @@ nouveau_connector_create(struct drm_device *dev,
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
funcs = &nouveau_connector_funcs_lvds;
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_DisplayPort:
|
||||
case DRM_MODE_CONNECTOR_eDP:
|
||||
nv_connector->aux.dev = connector->kdev;
|
||||
nv_connector->aux.drm_dev = dev;
|
||||
nv_connector->aux.transfer = nouveau_connector_aux_xfer;
|
||||
snprintf(aux_name, sizeof(aux_name), "sor-%04x-%04x",
|
||||
dcbe->hasht, dcbe->hashm);
|
||||
nv_connector->aux.name = kstrdup(aux_name, GFP_KERNEL);
|
||||
if (!nv_connector->aux.name) {
|
||||
kfree(nv_connector);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
nv_connector->aux.name = connector->name;
|
||||
drm_dp_aux_init(&nv_connector->aux);
|
||||
break;
|
||||
default:
|
||||
funcs = &nouveau_connector_funcs;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1403,17 +1415,10 @@ nouveau_connector_create(struct drm_device *dev,
|
||||
connector->interlace_allowed = false;
|
||||
connector->doublescan_allowed = false;
|
||||
|
||||
drm_connector_init(dev, connector, funcs, type);
|
||||
drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
|
||||
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
|
||||
|
||||
if (nv_connector->dcb && (disp->disp.conn_mask & BIT(nv_connector->index))) {
|
||||
ret = nvif_conn_ctor(&disp->disp, nv_connector->base.name, nv_connector->index,
|
||||
&nv_connector->conn);
|
||||
if (ret) {
|
||||
goto drm_conn_err;
|
||||
}
|
||||
|
||||
if (nvif_object_constructed(&nv_connector->conn.object)) {
|
||||
ret = nvif_conn_event_ctor(&nv_connector->conn, "kmsHotplug",
|
||||
nouveau_connector_hotplug,
|
||||
NVIF_CONN_EVENT_V0_PLUG | NVIF_CONN_EVENT_V0_UNPLUG,
|
||||
|
@ -121,7 +121,6 @@ struct nouveau_connector {
|
||||
struct drm_connector base;
|
||||
enum dcb_connector_type type;
|
||||
u8 index;
|
||||
u8 *dcb;
|
||||
|
||||
struct nvif_conn conn;
|
||||
u64 hpd_pending;
|
||||
@ -200,7 +199,7 @@ nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
|
||||
}
|
||||
|
||||
struct drm_connector *
|
||||
nouveau_connector_create(struct drm_device *, const struct dcb_output *);
|
||||
nouveau_connector_create(struct drm_device *, int id);
|
||||
void nouveau_connector_hpd(struct nouveau_connector *, u64 bits);
|
||||
|
||||
extern int nouveau_tv_disable;
|
||||
|
@ -231,7 +231,7 @@ nouveau_debugfs_gpuva(struct seq_file *m, void *data)
|
||||
continue;
|
||||
|
||||
nouveau_uvmm_lock(uvmm);
|
||||
drm_debugfs_gpuva_info(m, &uvmm->umgr);
|
||||
drm_debugfs_gpuva_info(m, &uvmm->base);
|
||||
seq_puts(m, "\n");
|
||||
nouveau_debugfs_gpuva_regions(m, uvmm);
|
||||
nouveau_uvmm_unlock(uvmm);
|
||||
|
@ -724,10 +724,10 @@ nouveau_display_create(struct drm_device *dev)
|
||||
drm_kms_helper_poll_init(dev);
|
||||
drm_kms_helper_poll_disable(dev);
|
||||
|
||||
if (nouveau_modeset != 2 && drm->vbios.dcb.entries) {
|
||||
ret = nvif_disp_ctor(&drm->client.device, "kmsDisp", 0,
|
||||
&disp->disp);
|
||||
if (ret == 0) {
|
||||
if (nouveau_modeset != 2) {
|
||||
ret = nvif_disp_ctor(&drm->client.device, "kmsDisp", 0, &disp->disp);
|
||||
|
||||
if (!ret && (disp->disp.outp_mask || drm->vbios.dcb.entries)) {
|
||||
nouveau_display_create_properties(dev);
|
||||
if (disp->disp.object.oclass < NV50_DISP) {
|
||||
dev->mode_config.fb_modifiers_not_supported = true;
|
||||
|
@ -42,6 +42,21 @@ nouveau_dp_has_sink_count(struct drm_connector *connector,
|
||||
return drm_dp_read_sink_count_cap(connector, outp->dp.dpcd, &outp->dp.desc);
|
||||
}
|
||||
|
||||
static bool
|
||||
nouveau_dp_probe_lttpr(struct nouveau_encoder *outp)
|
||||
{
|
||||
u8 rev, size = sizeof(rev);
|
||||
int ret;
|
||||
|
||||
ret = nvif_outp_dp_aux_xfer(&outp->outp, DP_AUX_NATIVE_READ, &size,
|
||||
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV,
|
||||
&rev);
|
||||
if (ret || size < sizeof(rev) || rev < 0x14)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static enum drm_connector_status
|
||||
nouveau_dp_probe_dpcd(struct nouveau_connector *nv_connector,
|
||||
struct nouveau_encoder *outp)
|
||||
@ -53,10 +68,112 @@ nouveau_dp_probe_dpcd(struct nouveau_connector *nv_connector,
|
||||
int ret;
|
||||
u8 *dpcd = outp->dp.dpcd;
|
||||
|
||||
outp->dp.lttpr.nr = 0;
|
||||
outp->dp.rate_nr = 0;
|
||||
outp->dp.link_nr = 0;
|
||||
outp->dp.link_bw = 0;
|
||||
|
||||
if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
|
||||
nouveau_dp_probe_lttpr(outp) &&
|
||||
!drm_dp_read_dpcd_caps(aux, dpcd) &&
|
||||
!drm_dp_read_lttpr_common_caps(aux, dpcd, outp->dp.lttpr.caps)) {
|
||||
int nr = drm_dp_lttpr_count(outp->dp.lttpr.caps);
|
||||
|
||||
if (nr) {
|
||||
drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_MODE,
|
||||
DP_PHY_REPEATER_MODE_TRANSPARENT);
|
||||
|
||||
if (nr > 0) {
|
||||
ret = drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_MODE,
|
||||
DP_PHY_REPEATER_MODE_NON_TRANSPARENT);
|
||||
if (ret != 1) {
|
||||
drm_dp_dpcd_writeb(aux, DP_PHY_REPEATER_MODE,
|
||||
DP_PHY_REPEATER_MODE_TRANSPARENT);
|
||||
} else {
|
||||
outp->dp.lttpr.nr = nr;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ret = drm_dp_read_dpcd_caps(aux, dpcd);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
|
||||
outp->dp.link_nr = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
|
||||
if (outp->dcb->dpconf.link_nr < outp->dp.link_nr)
|
||||
outp->dp.link_nr = outp->dcb->dpconf.link_nr;
|
||||
|
||||
if (outp->dp.lttpr.nr) {
|
||||
int links = drm_dp_lttpr_max_lane_count(outp->dp.lttpr.caps);
|
||||
|
||||
if (links && links < outp->dp.link_nr)
|
||||
outp->dp.link_nr = links;
|
||||
}
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP && dpcd[DP_DPCD_REV] >= 0x13) {
|
||||
__le16 rates[DP_MAX_SUPPORTED_RATES];
|
||||
|
||||
ret = drm_dp_dpcd_read(aux, DP_SUPPORTED_LINK_RATES, rates, sizeof(rates));
|
||||
if (ret == sizeof(rates)) {
|
||||
for (int i = 0; i < ARRAY_SIZE(rates); i++) {
|
||||
u32 rate = (le16_to_cpu(rates[i]) * 200) / 10;
|
||||
int j;
|
||||
|
||||
if (!rate)
|
||||
break;
|
||||
|
||||
for (j = 0; j < outp->dp.rate_nr; j++) {
|
||||
if (rate > outp->dp.rate[j].rate) {
|
||||
for (int k = outp->dp.rate_nr; k > j; k--)
|
||||
outp->dp.rate[k] = outp->dp.rate[k - 1];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
outp->dp.rate[j].dpcd = i;
|
||||
outp->dp.rate[j].rate = rate;
|
||||
outp->dp.rate_nr++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!outp->dp.rate_nr) {
|
||||
const u32 rates[] = { 810000, 540000, 270000, 162000 };
|
||||
u32 max_rate = dpcd[DP_MAX_LINK_RATE] * 27000;
|
||||
|
||||
if (outp->dp.lttpr.nr) {
|
||||
int rate = drm_dp_lttpr_max_link_rate(outp->dp.lttpr.caps);
|
||||
|
||||
if (rate && rate < max_rate)
|
||||
max_rate = rate;
|
||||
}
|
||||
|
||||
max_rate = min_t(int, max_rate, outp->dcb->dpconf.link_bw);
|
||||
|
||||
for (int i = 0; i < ARRAY_SIZE(rates); i++) {
|
||||
if (rates[i] <= max_rate) {
|
||||
outp->dp.rate[outp->dp.rate_nr].dpcd = -1;
|
||||
outp->dp.rate[outp->dp.rate_nr].rate = rates[i];
|
||||
outp->dp.rate_nr++;
|
||||
}
|
||||
}
|
||||
|
||||
if (WARN_ON(!outp->dp.rate_nr))
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = nvif_outp_dp_rates(&outp->outp, outp->dp.rate, outp->dp.rate_nr);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
for (int i = 0; i < outp->dp.rate_nr; i++) {
|
||||
u32 link_bw = outp->dp.rate[i].rate;
|
||||
|
||||
if (link_bw > outp->dp.link_bw)
|
||||
outp->dp.link_bw = link_bw;
|
||||
}
|
||||
|
||||
ret = drm_dp_read_desc(aux, &outp->dp.desc, drm_dp_is_branch(dpcd));
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
@ -132,14 +249,8 @@ nouveau_dp_detect(struct nouveau_connector *nv_connector,
|
||||
}
|
||||
}
|
||||
|
||||
/* Check status of HPD pin before attempting an AUX transaction that
|
||||
* would result in a number of (futile) retries on a connector which
|
||||
* has no display plugged.
|
||||
*
|
||||
* TODO: look into checking this before probing I2C to detect DVI/HDMI
|
||||
*/
|
||||
hpd = nvif_conn_hpd_status(&nv_connector->conn);
|
||||
if (hpd == NVIF_CONN_HPD_STATUS_NOT_PRESENT) {
|
||||
hpd = nvif_outp_detect(&nv_encoder->outp);
|
||||
if (hpd == NOT_PRESENT) {
|
||||
nvif_outp_dp_aux_pwr(&nv_encoder->outp, false);
|
||||
goto out;
|
||||
}
|
||||
@ -157,39 +268,14 @@ nouveau_dp_detect(struct nouveau_connector *nv_connector,
|
||||
goto out;
|
||||
}
|
||||
|
||||
nv_encoder->dp.link_bw = 27000 * dpcd[DP_MAX_LINK_RATE];
|
||||
nv_encoder->dp.link_nr =
|
||||
dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
|
||||
NV_DEBUG(drm, "sink dpcd version: 0x%02x\n", dpcd[DP_DPCD_REV]);
|
||||
for (int i = 0; i < nv_encoder->dp.rate_nr; i++)
|
||||
NV_DEBUG(drm, "sink rate %d: %d\n", i, nv_encoder->dp.rate[i].rate);
|
||||
|
||||
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP && dpcd[DP_DPCD_REV] >= 0x13) {
|
||||
struct drm_dp_aux *aux = &nv_connector->aux;
|
||||
int ret, i;
|
||||
u8 sink_rates[16];
|
||||
|
||||
ret = drm_dp_dpcd_read(aux, DP_SUPPORTED_LINK_RATES, sink_rates, sizeof(sink_rates));
|
||||
if (ret == sizeof(sink_rates)) {
|
||||
for (i = 0; i < ARRAY_SIZE(sink_rates); i += 2) {
|
||||
int val = ((sink_rates[i + 1] << 8) | sink_rates[i]) * 200 / 10;
|
||||
if (val && (i == 0 || val > nv_encoder->dp.link_bw))
|
||||
nv_encoder->dp.link_bw = val;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
NV_DEBUG(drm, "display: %dx%d dpcd 0x%02x\n",
|
||||
nv_encoder->dp.link_nr, nv_encoder->dp.link_bw,
|
||||
dpcd[DP_DPCD_REV]);
|
||||
NV_DEBUG(drm, "encoder: %dx%d\n",
|
||||
nv_encoder->dcb->dpconf.link_nr,
|
||||
nv_encoder->dcb->dpconf.link_bw);
|
||||
|
||||
if (nv_encoder->dcb->dpconf.link_nr < nv_encoder->dp.link_nr)
|
||||
nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr;
|
||||
if (nv_encoder->dcb->dpconf.link_bw < nv_encoder->dp.link_bw)
|
||||
nv_encoder->dp.link_bw = nv_encoder->dcb->dpconf.link_bw;
|
||||
|
||||
NV_DEBUG(drm, "maximum: %dx%d\n",
|
||||
nv_encoder->dp.link_nr, nv_encoder->dp.link_bw);
|
||||
NV_DEBUG(drm, "encoder: %dx%d\n", nv_encoder->dcb->dpconf.link_nr,
|
||||
nv_encoder->dcb->dpconf.link_bw);
|
||||
NV_DEBUG(drm, "maximum: %dx%d\n", nv_encoder->dp.link_nr,
|
||||
nv_encoder->dp.link_bw);
|
||||
|
||||
if (mstm && mstm->can_mst) {
|
||||
ret = nv50_mstm_detect(nv_encoder);
|
||||
@ -211,15 +297,186 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_dp_power_down(struct nouveau_encoder *outp)
|
||||
{
|
||||
struct drm_dp_aux *aux = &outp->conn->aux;
|
||||
int ret;
|
||||
u8 pwr;
|
||||
|
||||
mutex_lock(&outp->dp.hpd_irq_lock);
|
||||
|
||||
ret = drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr);
|
||||
if (ret == 1) {
|
||||
pwr &= ~DP_SET_POWER_MASK;
|
||||
pwr |= DP_SET_POWER_D3;
|
||||
drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr);
|
||||
}
|
||||
|
||||
outp->dp.lt.nr = 0;
|
||||
mutex_unlock(&outp->dp.hpd_irq_lock);
|
||||
}
|
||||
|
||||
static bool
|
||||
nouveau_dp_train_link(struct nouveau_encoder *outp, bool retrain)
|
||||
{
|
||||
struct drm_dp_aux *aux = &outp->conn->aux;
|
||||
bool post_lt = false;
|
||||
int ret, retries = 0;
|
||||
|
||||
if ( (outp->dp.dpcd[DP_MAX_LANE_COUNT] & 0x20) &&
|
||||
!(outp->dp.dpcd[DP_MAX_DOWNSPREAD] & DP_TPS4_SUPPORTED))
|
||||
post_lt = true;
|
||||
|
||||
retry:
|
||||
ret = nvif_outp_dp_train(&outp->outp, outp->dp.dpcd,
|
||||
outp->dp.lttpr.nr,
|
||||
outp->dp.lt.nr,
|
||||
outp->dp.lt.bw,
|
||||
outp->dp.lt.mst,
|
||||
post_lt,
|
||||
retrain);
|
||||
if (ret)
|
||||
return false;
|
||||
|
||||
if (post_lt) {
|
||||
u8 stat[DP_LINK_STATUS_SIZE];
|
||||
u8 prev[2];
|
||||
u8 time = 0, adjusts = 0, tmp;
|
||||
|
||||
ret = drm_dp_dpcd_read_phy_link_status(aux, DP_PHY_DPRX, stat);
|
||||
if (ret)
|
||||
return false;
|
||||
|
||||
for (;;) {
|
||||
if (!drm_dp_channel_eq_ok(stat, outp->dp.lt.nr)) {
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!(stat[2] & 0x02))
|
||||
break;
|
||||
|
||||
msleep(5);
|
||||
time += 5;
|
||||
|
||||
memcpy(prev, &stat[4], sizeof(prev));
|
||||
ret = drm_dp_dpcd_read_phy_link_status(aux, DP_PHY_DPRX, stat);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
if (!memcmp(prev, &stat[4], sizeof(prev))) {
|
||||
if (time > 200)
|
||||
break;
|
||||
} else {
|
||||
u8 pe[4], vs[4];
|
||||
|
||||
if (adjusts++ == 6)
|
||||
break;
|
||||
|
||||
for (int i = 0; i < outp->dp.lt.nr; i++) {
|
||||
pe[i] = drm_dp_get_adjust_request_pre_emphasis(stat, i) >>
|
||||
DP_TRAIN_PRE_EMPHASIS_SHIFT;
|
||||
vs[i] = drm_dp_get_adjust_request_voltage(stat, i) >>
|
||||
DP_TRAIN_VOLTAGE_SWING_SHIFT;
|
||||
}
|
||||
|
||||
ret = nvif_outp_dp_drive(&outp->outp, outp->dp.lt.nr, pe, vs);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
time = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (drm_dp_dpcd_readb(aux, DP_LANE_COUNT_SET, &tmp) == 1) {
|
||||
tmp &= ~0x20;
|
||||
drm_dp_dpcd_writeb(aux, DP_LANE_COUNT_SET, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
if (ret == 1 && retries++ < 3)
|
||||
goto retry;
|
||||
|
||||
return ret == 0;
|
||||
}
|
||||
|
||||
bool
|
||||
nouveau_dp_train(struct nouveau_encoder *outp, bool mst, u32 khz, u8 bpc)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(outp->base.base.dev);
|
||||
struct drm_dp_aux *aux = &outp->conn->aux;
|
||||
u32 min_rate;
|
||||
u8 pwr;
|
||||
bool ret = true;
|
||||
|
||||
if (mst)
|
||||
min_rate = outp->dp.link_nr * outp->dp.rate[0].rate;
|
||||
else
|
||||
min_rate = DIV_ROUND_UP(khz * bpc * 3, 8);
|
||||
|
||||
NV_DEBUG(drm, "%s link training (mst:%d min_rate:%d)\n",
|
||||
outp->base.base.name, mst, min_rate);
|
||||
|
||||
mutex_lock(&outp->dp.hpd_irq_lock);
|
||||
|
||||
if (drm_dp_dpcd_readb(aux, DP_SET_POWER, &pwr) == 1) {
|
||||
if ((pwr & DP_SET_POWER_MASK) != DP_SET_POWER_D0) {
|
||||
pwr &= ~DP_SET_POWER_MASK;
|
||||
pwr |= DP_SET_POWER_D0;
|
||||
drm_dp_dpcd_writeb(aux, DP_SET_POWER, pwr);
|
||||
}
|
||||
}
|
||||
|
||||
for (int nr = outp->dp.link_nr; nr; nr >>= 1) {
|
||||
for (int rate = 0; rate < outp->dp.rate_nr; rate++) {
|
||||
if (outp->dp.rate[rate].rate * nr >= min_rate) {
|
||||
outp->dp.lt.nr = nr;
|
||||
outp->dp.lt.bw = outp->dp.rate[rate].rate;
|
||||
outp->dp.lt.mst = mst;
|
||||
if (nouveau_dp_train_link(outp, false))
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ret = false;
|
||||
done:
|
||||
mutex_unlock(&outp->dp.hpd_irq_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool
|
||||
nouveau_dp_link_check_locked(struct nouveau_encoder *outp)
|
||||
{
|
||||
u8 link_status[DP_LINK_STATUS_SIZE];
|
||||
|
||||
if (!outp || !outp->dp.lt.nr)
|
||||
return true;
|
||||
|
||||
if (drm_dp_dpcd_read_phy_link_status(&outp->conn->aux, DP_PHY_DPRX, link_status) < 0)
|
||||
return false;
|
||||
|
||||
if (drm_dp_channel_eq_ok(link_status, outp->dp.lt.nr))
|
||||
return true;
|
||||
|
||||
return nouveau_dp_train_link(outp, true);
|
||||
}
|
||||
|
||||
bool
|
||||
nouveau_dp_link_check(struct nouveau_connector *nv_connector)
|
||||
{
|
||||
struct nouveau_encoder *nv_encoder = find_encoder(&nv_connector->base, DCB_OUTPUT_DP);
|
||||
struct nouveau_encoder *outp = nv_connector->dp_encoder;
|
||||
bool link_ok = true;
|
||||
|
||||
if (!nv_encoder || nv_encoder->outp.or.id < 0)
|
||||
return true;
|
||||
if (outp) {
|
||||
mutex_lock(&outp->dp.hpd_irq_lock);
|
||||
if (outp->dp.lt.nr)
|
||||
link_ok = nouveau_dp_link_check_locked(outp);
|
||||
mutex_unlock(&outp->dp.hpd_irq_lock);
|
||||
}
|
||||
|
||||
return nvif_outp_dp_retrain(&nv_encoder->outp) == 0;
|
||||
return link_ok;
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -1133,7 +1133,10 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
|
||||
}
|
||||
|
||||
get_task_comm(tmpname, current);
|
||||
snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
|
||||
rcu_read_lock();
|
||||
snprintf(name, sizeof(name), "%s[%d]",
|
||||
tmpname, pid_nr(rcu_dereference(fpriv->pid)));
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) {
|
||||
ret = -ENOMEM;
|
||||
|
@ -49,8 +49,9 @@ struct nouveau_encoder {
|
||||
struct nvif_outp outp;
|
||||
int or;
|
||||
|
||||
struct nouveau_connector *conn;
|
||||
|
||||
struct i2c_adapter *i2c;
|
||||
struct nvkm_i2c_aux *aux;
|
||||
|
||||
/* different to drm_encoder.crtc, this reflects what's
|
||||
* actually programmed on the hw, not the proposed crtc */
|
||||
@ -60,7 +61,6 @@ struct nouveau_encoder {
|
||||
/* Protected by nouveau_drm.audio.lock */
|
||||
struct {
|
||||
bool enabled;
|
||||
struct drm_connector *connector;
|
||||
} audio;
|
||||
|
||||
struct drm_display_mode mode;
|
||||
@ -68,18 +68,38 @@ struct nouveau_encoder {
|
||||
|
||||
struct nv04_output_reg restore;
|
||||
|
||||
union {
|
||||
struct {
|
||||
struct {
|
||||
bool enabled;
|
||||
} hdmi;
|
||||
|
||||
struct {
|
||||
struct nv50_mstm *mstm;
|
||||
|
||||
struct {
|
||||
u8 caps[DP_LTTPR_COMMON_CAP_SIZE];
|
||||
u8 nr;
|
||||
} lttpr;
|
||||
|
||||
u8 dpcd[DP_RECEIVER_CAP_SIZE];
|
||||
|
||||
struct nvif_outp_dp_rate rate[8];
|
||||
int rate_nr;
|
||||
|
||||
int link_nr;
|
||||
int link_bw;
|
||||
|
||||
struct {
|
||||
bool mst;
|
||||
u8 nr;
|
||||
u32 bw;
|
||||
} lt;
|
||||
|
||||
/* Protects DP state that needs to be accessed outside
|
||||
* connector reprobing contexts
|
||||
*/
|
||||
struct mutex hpd_irq_lock;
|
||||
|
||||
u8 dpcd[DP_RECEIVER_CAP_SIZE];
|
||||
u8 downstream_ports[DP_MAX_DOWNSTREAM_PORTS];
|
||||
struct drm_dp_desc desc;
|
||||
|
||||
@ -141,6 +161,8 @@ enum nouveau_dp_status {
|
||||
};
|
||||
|
||||
int nouveau_dp_detect(struct nouveau_connector *, struct nouveau_encoder *);
|
||||
bool nouveau_dp_train(struct nouveau_encoder *, bool mst, u32 khz, u8 bpc);
|
||||
void nouveau_dp_power_down(struct nouveau_encoder *);
|
||||
bool nouveau_dp_link_check(struct nouveau_connector *);
|
||||
void nouveau_dp_irq(struct work_struct *);
|
||||
enum drm_mode_status nv50_dp_mode_valid(struct nouveau_encoder *,
|
||||
|
@ -107,8 +107,8 @@ nouveau_exec_job_submit(struct nouveau_job *job)
|
||||
drm_exec_until_all_locked(exec) {
|
||||
struct drm_gpuva *va;
|
||||
|
||||
drm_gpuva_for_each_va(va, &uvmm->umgr) {
|
||||
if (unlikely(va == &uvmm->umgr.kernel_alloc_node))
|
||||
drm_gpuvm_for_each_va(va, &uvmm->base) {
|
||||
if (unlikely(va == &uvmm->base.kernel_alloc_node))
|
||||
continue;
|
||||
|
||||
ret = drm_exec_prepare_obj(exec, va->gem.obj, 1);
|
||||
|
@ -329,7 +329,7 @@ nouveau_uvma_region_create(struct nouveau_uvmm *uvmm,
|
||||
struct nouveau_uvma_region *reg;
|
||||
int ret;
|
||||
|
||||
if (!drm_gpuva_interval_empty(&uvmm->umgr, addr, range))
|
||||
if (!drm_gpuvm_interval_empty(&uvmm->base, addr, range))
|
||||
return -ENOSPC;
|
||||
|
||||
ret = nouveau_uvma_region_alloc(®);
|
||||
@ -384,7 +384,7 @@ nouveau_uvma_region_empty(struct nouveau_uvma_region *reg)
|
||||
{
|
||||
struct nouveau_uvmm *uvmm = reg->uvmm;
|
||||
|
||||
return drm_gpuva_interval_empty(&uvmm->umgr,
|
||||
return drm_gpuvm_interval_empty(&uvmm->base,
|
||||
reg->va.addr,
|
||||
reg->va.range);
|
||||
}
|
||||
@ -444,7 +444,7 @@ op_map_prepare_unwind(struct nouveau_uvma *uvma)
|
||||
static void
|
||||
op_unmap_prepare_unwind(struct drm_gpuva *va)
|
||||
{
|
||||
drm_gpuva_insert(va->mgr, va);
|
||||
drm_gpuva_insert(va->vm, va);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -589,7 +589,7 @@ op_map_prepare(struct nouveau_uvmm *uvmm,
|
||||
uvma->region = args->region;
|
||||
uvma->kind = args->kind;
|
||||
|
||||
drm_gpuva_map(&uvmm->umgr, &uvma->va, op);
|
||||
drm_gpuva_map(&uvmm->base, &uvma->va, op);
|
||||
|
||||
/* Keep a reference until this uvma is destroyed. */
|
||||
nouveau_uvma_gem_get(uvma);
|
||||
@ -1194,7 +1194,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
|
||||
goto unwind_continue;
|
||||
}
|
||||
|
||||
op->ops = drm_gpuva_sm_unmap_ops_create(&uvmm->umgr,
|
||||
op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->base,
|
||||
op->va.addr,
|
||||
op->va.range);
|
||||
if (IS_ERR(op->ops)) {
|
||||
@ -1205,7 +1205,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
|
||||
ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new,
|
||||
op->ops);
|
||||
if (ret) {
|
||||
drm_gpuva_ops_free(&uvmm->umgr, op->ops);
|
||||
drm_gpuva_ops_free(&uvmm->base, op->ops);
|
||||
op->ops = NULL;
|
||||
op->reg = NULL;
|
||||
goto unwind_continue;
|
||||
@ -1240,7 +1240,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
|
||||
}
|
||||
}
|
||||
|
||||
op->ops = drm_gpuva_sm_map_ops_create(&uvmm->umgr,
|
||||
op->ops = drm_gpuvm_sm_map_ops_create(&uvmm->base,
|
||||
op->va.addr,
|
||||
op->va.range,
|
||||
op->gem.obj,
|
||||
@ -1256,7 +1256,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
|
||||
op->va.range,
|
||||
op->flags & 0xff);
|
||||
if (ret) {
|
||||
drm_gpuva_ops_free(&uvmm->umgr, op->ops);
|
||||
drm_gpuva_ops_free(&uvmm->base, op->ops);
|
||||
op->ops = NULL;
|
||||
goto unwind_continue;
|
||||
}
|
||||
@ -1264,7 +1264,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
|
||||
break;
|
||||
}
|
||||
case OP_UNMAP:
|
||||
op->ops = drm_gpuva_sm_unmap_ops_create(&uvmm->umgr,
|
||||
op->ops = drm_gpuvm_sm_unmap_ops_create(&uvmm->base,
|
||||
op->va.addr,
|
||||
op->va.range);
|
||||
if (IS_ERR(op->ops)) {
|
||||
@ -1275,7 +1275,7 @@ nouveau_uvmm_bind_job_submit(struct nouveau_job *job)
|
||||
ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new,
|
||||
op->ops);
|
||||
if (ret) {
|
||||
drm_gpuva_ops_free(&uvmm->umgr, op->ops);
|
||||
drm_gpuva_ops_free(&uvmm->base, op->ops);
|
||||
op->ops = NULL;
|
||||
goto unwind_continue;
|
||||
}
|
||||
@ -1404,7 +1404,7 @@ unwind:
|
||||
break;
|
||||
}
|
||||
|
||||
drm_gpuva_ops_free(&uvmm->umgr, op->ops);
|
||||
drm_gpuva_ops_free(&uvmm->base, op->ops);
|
||||
op->ops = NULL;
|
||||
op->reg = NULL;
|
||||
}
|
||||
@ -1509,7 +1509,7 @@ nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work)
|
||||
}
|
||||
|
||||
if (!IS_ERR_OR_NULL(op->ops))
|
||||
drm_gpuva_ops_free(&uvmm->umgr, op->ops);
|
||||
drm_gpuva_ops_free(&uvmm->base, op->ops);
|
||||
|
||||
if (obj)
|
||||
drm_gem_object_put(obj);
|
||||
@ -1836,11 +1836,11 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
|
||||
uvmm->kernel_managed_addr = kernel_managed_addr;
|
||||
uvmm->kernel_managed_size = kernel_managed_size;
|
||||
|
||||
drm_gpuva_manager_init(&uvmm->umgr, cli->name,
|
||||
NOUVEAU_VA_SPACE_START,
|
||||
NOUVEAU_VA_SPACE_END,
|
||||
kernel_managed_addr, kernel_managed_size,
|
||||
NULL);
|
||||
drm_gpuvm_init(&uvmm->base, cli->name,
|
||||
NOUVEAU_VA_SPACE_START,
|
||||
NOUVEAU_VA_SPACE_END,
|
||||
kernel_managed_addr, kernel_managed_size,
|
||||
NULL);
|
||||
|
||||
ret = nvif_vmm_ctor(&cli->mmu, "uvmm",
|
||||
cli->vmm.vmm.object.oclass, RAW,
|
||||
@ -1855,7 +1855,7 @@ nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
|
||||
return 0;
|
||||
|
||||
out_free_gpuva_mgr:
|
||||
drm_gpuva_manager_destroy(&uvmm->umgr);
|
||||
drm_gpuvm_destroy(&uvmm->base);
|
||||
out_unlock:
|
||||
mutex_unlock(&cli->mutex);
|
||||
return ret;
|
||||
@ -1877,11 +1877,11 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
|
||||
wait_event(entity->job.wq, list_empty(&entity->job.list.head));
|
||||
|
||||
nouveau_uvmm_lock(uvmm);
|
||||
drm_gpuva_for_each_va_safe(va, next, &uvmm->umgr) {
|
||||
drm_gpuvm_for_each_va_safe(va, next, &uvmm->base) {
|
||||
struct nouveau_uvma *uvma = uvma_from_va(va);
|
||||
struct drm_gem_object *obj = va->gem.obj;
|
||||
|
||||
if (unlikely(va == &uvmm->umgr.kernel_alloc_node))
|
||||
if (unlikely(va == &uvmm->base.kernel_alloc_node))
|
||||
continue;
|
||||
|
||||
drm_gpuva_remove(va);
|
||||
@ -1910,7 +1910,7 @@ nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
|
||||
|
||||
mutex_lock(&cli->mutex);
|
||||
nouveau_vmm_fini(&uvmm->vmm);
|
||||
drm_gpuva_manager_destroy(&uvmm->umgr);
|
||||
drm_gpuvm_destroy(&uvmm->base);
|
||||
mutex_unlock(&cli->mutex);
|
||||
|
||||
dma_resv_fini(&uvmm->resv);
|
||||
|
@ -3,13 +3,13 @@
|
||||
#ifndef __NOUVEAU_UVMM_H__
|
||||
#define __NOUVEAU_UVMM_H__
|
||||
|
||||
#include <drm/drm_gpuva_mgr.h>
|
||||
#include <drm/drm_gpuvm.h>
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
|
||||
struct nouveau_uvmm {
|
||||
struct drm_gpuvm base;
|
||||
struct nouveau_vmm vmm;
|
||||
struct drm_gpuva_manager umgr;
|
||||
struct maple_tree region_mt;
|
||||
struct mutex mutex;
|
||||
struct dma_resv resv;
|
||||
@ -41,10 +41,10 @@ struct nouveau_uvma {
|
||||
u8 kind;
|
||||
};
|
||||
|
||||
#define uvmm_from_mgr(x) container_of((x), struct nouveau_uvmm, umgr)
|
||||
#define uvmm_from_gpuvm(x) container_of((x), struct nouveau_uvmm, base)
|
||||
#define uvma_from_va(x) container_of((x), struct nouveau_uvma, va)
|
||||
|
||||
#define to_uvmm(x) uvmm_from_mgr((x)->va.mgr)
|
||||
#define to_uvmm(x) uvmm_from_gpuvm((x)->va.vm)
|
||||
|
||||
struct nouveau_uvmm_bind_job {
|
||||
struct nouveau_job base;
|
||||
|
@ -69,7 +69,7 @@ nvif_client_ctor(struct nvif_client *parent, const char *name, u64 device,
|
||||
} nop = {};
|
||||
int ret;
|
||||
|
||||
strncpy(args.name, name, sizeof(args.name));
|
||||
strscpy_pad(args.name, name, sizeof(args.name));
|
||||
ret = nvif_object_ctor(parent != client ? &parent->object : NULL,
|
||||
name ? name : "nvifClient", 0,
|
||||
NVIF_CLASS_CLIENT, &args, sizeof(args),
|
||||
|
@ -45,20 +45,6 @@ nvif_conn_event_ctor(struct nvif_conn *conn, const char *name, nvif_event_func f
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nvif_conn_hpd_status(struct nvif_conn *conn)
|
||||
{
|
||||
struct nvif_conn_hpd_status_v0 args;
|
||||
int ret;
|
||||
|
||||
args.version = 0;
|
||||
|
||||
ret = nvif_mthd(&conn->object, NVIF_CONN_V0_HPD_STATUS, &args, sizeof(args));
|
||||
NVIF_ERRON(ret, &conn->object, "[HPD_STATUS] support:%d present:%d",
|
||||
args.support, args.present);
|
||||
return ret ? ret : !!args.support + !!args.present;
|
||||
}
|
||||
|
||||
void
|
||||
nvif_conn_dtor(struct nvif_conn *conn)
|
||||
{
|
||||
@ -77,5 +63,25 @@ nvif_conn_ctor(struct nvif_disp *disp, const char *name, int id, struct nvif_con
|
||||
ret = nvif_object_ctor(&disp->object, name ?: "nvifConn", id, NVIF_CLASS_CONN,
|
||||
&args, sizeof(args), &conn->object);
|
||||
NVIF_ERRON(ret, &disp->object, "[NEW conn id:%d]", id);
|
||||
return ret;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
conn->id = id;
|
||||
|
||||
switch (args.type) {
|
||||
case NVIF_CONN_V0_VGA : conn->info.type = NVIF_CONN_VGA; break;
|
||||
case NVIF_CONN_V0_TV : conn->info.type = NVIF_CONN_TV; break;
|
||||
case NVIF_CONN_V0_DVI_I : conn->info.type = NVIF_CONN_DVI_I; break;
|
||||
case NVIF_CONN_V0_DVI_D : conn->info.type = NVIF_CONN_DVI_D; break;
|
||||
case NVIF_CONN_V0_LVDS : conn->info.type = NVIF_CONN_LVDS; break;
|
||||
case NVIF_CONN_V0_LVDS_SPWG: conn->info.type = NVIF_CONN_LVDS_SPWG; break;
|
||||
case NVIF_CONN_V0_HDMI : conn->info.type = NVIF_CONN_HDMI; break;
|
||||
case NVIF_CONN_V0_DP : conn->info.type = NVIF_CONN_DP; break;
|
||||
case NVIF_CONN_V0_EDP : conn->info.type = NVIF_CONN_EDP; break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
@ -60,7 +60,7 @@ nvif_disp_ctor(struct nvif_device *device, const char *name, s32 oclass, struct
|
||||
cid = nvif_sclass(&device->object, disps, oclass);
|
||||
disp->object.client = NULL;
|
||||
if (cid < 0) {
|
||||
NVIF_ERRON(cid, &device->object, "[NEW disp%04x] not supported", oclass);
|
||||
NVIF_DEBUG(&device->object, "[NEW disp%04x] not supported", oclass);
|
||||
return cid;
|
||||
}
|
||||
|
||||
|
@ -47,10 +47,134 @@ nvif_outp_dp_mst_vcpi(struct nvif_outp *outp, int head,
|
||||
}
|
||||
|
||||
int
|
||||
nvif_outp_dp_retrain(struct nvif_outp *outp)
|
||||
nvif_outp_dp_mst_id_put(struct nvif_outp *outp, u32 id)
|
||||
{
|
||||
int ret = nvif_object_mthd(&outp->object, NVIF_OUTP_V0_DP_RETRAIN, NULL, 0);
|
||||
NVIF_ERRON(ret, &outp->object, "[DP_RETRAIN]");
|
||||
struct nvif_outp_dp_mst_id_get_v0 args;
|
||||
int ret;
|
||||
|
||||
args.version = 0;
|
||||
args.id = id;
|
||||
ret = nvif_object_mthd(&outp->object, NVIF_OUTP_V0_DP_MST_ID_PUT, &args, sizeof(args));
|
||||
NVIF_ERRON(ret, &outp->object, "[DP_MST_ID_PUT id:%08x]", args.id);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nvif_outp_dp_mst_id_get(struct nvif_outp *outp, u32 *id)
|
||||
{
|
||||
struct nvif_outp_dp_mst_id_get_v0 args;
|
||||
int ret;
|
||||
|
||||
args.version = 0;
|
||||
ret = nvif_object_mthd(&outp->object, NVIF_OUTP_V0_DP_MST_ID_GET, &args, sizeof(args));
|
||||
NVIF_ERRON(ret, &outp->object, "[DP_MST_ID_GET] id:%08x", args.id);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*id = args.id;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nvif_outp_dp_sst(struct nvif_outp *outp, int head, u32 watermark, u32 hblanksym, u32 vblanksym)
|
||||
{
|
||||
struct nvif_outp_dp_sst_v0 args;
|
||||
int ret;
|
||||
|
||||
args.version = 0;
|
||||
args.head = head;
|
||||
args.watermark = watermark;
|
||||
args.hblanksym = hblanksym;
|
||||
args.vblanksym = vblanksym;
|
||||
ret = nvif_object_mthd(&outp->object, NVIF_OUTP_V0_DP_SST, &args, sizeof(args));
|
||||
NVIF_ERRON(ret, &outp->object,
|
||||
"[DP_SST head:%d watermark:%d hblanksym:%d vblanksym:%d]",
|
||||
args.head, args.watermark, args.hblanksym, args.vblanksym);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nvif_outp_dp_drive(struct nvif_outp *outp, u8 link_nr, u8 pe[4], u8 vs[4])
|
||||
{
|
||||
struct nvif_outp_dp_drive_v0 args;
|
||||
int ret;
|
||||
|
||||
args.version = 0;
|
||||
args.lanes = link_nr;
|
||||
memcpy(args.pe, pe, sizeof(args.pe));
|
||||
memcpy(args.vs, vs, sizeof(args.vs));
|
||||
|
||||
ret = nvif_object_mthd(&outp->object, NVIF_OUTP_V0_DP_DRIVE, &args, sizeof(args));
|
||||
NVIF_ERRON(ret, &outp->object, "[DP_DRIVE lanes:%d]", args.lanes);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nvif_outp_dp_train(struct nvif_outp *outp, u8 dpcd[DP_RECEIVER_CAP_SIZE], u8 lttprs,
|
||||
u8 link_nr, u32 link_bw, bool mst, bool post_lt_adj, bool retrain)
|
||||
{
|
||||
struct nvif_outp_dp_train_v0 args;
|
||||
int ret;
|
||||
|
||||
args.version = 0;
|
||||
args.retrain = retrain;
|
||||
args.mst = mst;
|
||||
args.lttprs = lttprs;
|
||||
args.post_lt_adj = post_lt_adj;
|
||||
args.link_nr = link_nr;
|
||||
args.link_bw = link_bw;
|
||||
memcpy(args.dpcd, dpcd, sizeof(args.dpcd));
|
||||
|
||||
ret = nvif_object_mthd(&outp->object, NVIF_OUTP_V0_DP_TRAIN, &args, sizeof(args));
|
||||
NVIF_ERRON(ret, &outp->object,
|
||||
"[DP_TRAIN retrain:%d mst:%d lttprs:%d post_lt_adj:%d nr:%d bw:%d]",
|
||||
args.retrain, args.mst, args.lttprs, args.post_lt_adj, args.link_nr,
|
||||
args.link_bw);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nvif_outp_dp_rates(struct nvif_outp *outp, struct nvif_outp_dp_rate *rate, int rate_nr)
|
||||
{
|
||||
struct nvif_outp_dp_rates_v0 args;
|
||||
int ret;
|
||||
|
||||
if (rate_nr > ARRAY_SIZE(args.rate))
|
||||
return -EINVAL;
|
||||
|
||||
args.version = 0;
|
||||
args.rates = rate_nr;
|
||||
for (int i = 0; i < args.rates; i++, rate++) {
|
||||
args.rate[i].dpcd = rate->dpcd;
|
||||
args.rate[i].rate = rate->rate;
|
||||
}
|
||||
|
||||
ret = nvif_object_mthd(&outp->object, NVIF_OUTP_V0_DP_RATES, &args, sizeof(args));
|
||||
NVIF_ERRON(ret, &outp->object, "[DP_RATES rates:%d]", args.rates);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nvif_outp_dp_aux_xfer(struct nvif_outp *outp, u8 type, u8 *psize, u32 addr, u8 *data)
|
||||
{
|
||||
struct nvif_outp_dp_aux_xfer_v0 args;
|
||||
u8 size = *psize;
|
||||
int ret;
|
||||
|
||||
args.version = 0;
|
||||
args.type = type;
|
||||
args.size = size;
|
||||
args.addr = addr;
|
||||
memcpy(args.data, data, size);
|
||||
ret = nvif_object_mthd(&outp->object, NVIF_OUTP_V0_DP_AUX_XFER, &args, sizeof(args));
|
||||
NVIF_DEBUG(&outp->object, "[DP_AUX_XFER type:%d size:%d addr:%05x] %d size:%d (ret: %d)",
|
||||
args.type, size, args.addr, ret, args.size, ret);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
*psize = args.size;
|
||||
|
||||
memcpy(data, args.data, size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -101,6 +225,74 @@ nvif_outp_infoframe(struct nvif_outp *outp, u8 type, struct nvif_outp_infoframe_
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nvif_outp_hdmi(struct nvif_outp *outp, int head, bool enable, u8 max_ac_packet, u8 rekey,
|
||||
u32 khz, bool scdc, bool scdc_scrambling, bool scdc_low_rates)
|
||||
{
|
||||
struct nvif_outp_hdmi_v0 args;
|
||||
int ret;
|
||||
|
||||
args.version = 0;
|
||||
args.head = head;
|
||||
args.enable = enable;
|
||||
args.max_ac_packet = max_ac_packet;
|
||||
args.rekey = rekey;
|
||||
args.khz = khz;
|
||||
args.scdc = scdc;
|
||||
args.scdc_scrambling = scdc_scrambling;
|
||||
args.scdc_low_rates = scdc_low_rates;
|
||||
|
||||
ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_HDMI, &args, sizeof(args));
|
||||
NVIF_ERRON(ret, &outp->object,
|
||||
"[HDMI head:%d enable:%d max_ac_packet:%d rekey:%d khz:%d scdc:%d "
|
||||
"scdc_scrambling:%d scdc_low_rates:%d]",
|
||||
args.head, args.enable, args.max_ac_packet, args.rekey, args.khz,
|
||||
args.scdc, args.scdc_scrambling, args.scdc_low_rates);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nvif_outp_lvds(struct nvif_outp *outp, bool dual, bool bpc8)
|
||||
{
|
||||
struct nvif_outp_lvds_v0 args;
|
||||
int ret;
|
||||
|
||||
args.version = 0;
|
||||
args.dual = dual;
|
||||
args.bpc8 = bpc8;
|
||||
|
||||
ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_LVDS, &args, sizeof(args));
|
||||
NVIF_ERRON(ret, &outp->object, "[LVDS dual:%d 8bpc:%d]", args.dual, args.bpc8);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nvif_outp_bl_set(struct nvif_outp *outp, int level)
|
||||
{
|
||||
struct nvif_outp_bl_set_v0 args;
|
||||
int ret;
|
||||
|
||||
args.version = 0;
|
||||
args.level = level;
|
||||
|
||||
ret = nvif_object_mthd(&outp->object, NVIF_OUTP_V0_BL_SET, &args, sizeof(args));
|
||||
NVIF_ERRON(ret, &outp->object, "[BL_SET level:%d]", args.level);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nvif_outp_bl_get(struct nvif_outp *outp)
|
||||
{
|
||||
struct nvif_outp_bl_get_v0 args;
|
||||
int ret;
|
||||
|
||||
args.version = 0;
|
||||
|
||||
ret = nvif_object_mthd(&outp->object, NVIF_OUTP_V0_BL_GET, &args, sizeof(args));
|
||||
NVIF_ERRON(ret, &outp->object, "[BL_GET level:%d]", args.level);
|
||||
return ret ? ret : args.level;
|
||||
}
|
||||
|
||||
void
|
||||
nvif_outp_release(struct nvif_outp *outp)
|
||||
{
|
||||
@ -110,12 +302,12 @@ nvif_outp_release(struct nvif_outp *outp)
|
||||
}
|
||||
|
||||
static inline int
|
||||
nvif_outp_acquire(struct nvif_outp *outp, u8 proto, struct nvif_outp_acquire_v0 *args)
|
||||
nvif_outp_acquire(struct nvif_outp *outp, u8 type, struct nvif_outp_acquire_v0 *args)
|
||||
{
|
||||
int ret;
|
||||
|
||||
args->version = 0;
|
||||
args->proto = proto;
|
||||
args->type = type;
|
||||
|
||||
ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_ACQUIRE, args, sizeof(*args));
|
||||
if (ret)
|
||||
@ -127,73 +319,106 @@ nvif_outp_acquire(struct nvif_outp *outp, u8 proto, struct nvif_outp_acquire_v0
|
||||
}
|
||||
|
||||
int
|
||||
nvif_outp_acquire_dp(struct nvif_outp *outp, u8 dpcd[DP_RECEIVER_CAP_SIZE],
|
||||
int link_nr, int link_bw, bool hda, bool mst)
|
||||
nvif_outp_acquire_pior(struct nvif_outp *outp)
|
||||
{
|
||||
struct nvif_outp_acquire_v0 args;
|
||||
int ret;
|
||||
|
||||
args.dp.link_nr = link_nr;
|
||||
args.dp.link_bw = link_bw;
|
||||
args.dp.hda = hda;
|
||||
args.dp.mst = mst;
|
||||
memcpy(args.dp.dpcd, dpcd, sizeof(args.dp.dpcd));
|
||||
|
||||
ret = nvif_outp_acquire(outp, NVIF_OUTP_ACQUIRE_V0_DP, &args);
|
||||
NVIF_ERRON(ret, &outp->object,
|
||||
"[ACQUIRE proto:DP link_nr:%d link_bw:%02x hda:%d mst:%d] or:%d link:%d",
|
||||
args.dp.link_nr, args.dp.link_bw, args.dp.hda, args.dp.mst, args.or, args.link);
|
||||
ret = nvif_outp_acquire(outp, NVIF_OUTP_ACQUIRE_V0_PIOR, &args);
|
||||
NVIF_ERRON(ret, &outp->object, "[ACQUIRE PIOR] or:%d", args.or);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nvif_outp_acquire_lvds(struct nvif_outp *outp, bool dual, bool bpc8)
|
||||
nvif_outp_acquire_sor(struct nvif_outp *outp, bool hda)
|
||||
{
|
||||
struct nvif_outp_acquire_v0 args;
|
||||
int ret;
|
||||
|
||||
args.lvds.dual = dual;
|
||||
args.lvds.bpc8 = bpc8;
|
||||
args.sor.hda = hda;
|
||||
|
||||
ret = nvif_outp_acquire(outp, NVIF_OUTP_ACQUIRE_V0_LVDS, &args);
|
||||
NVIF_ERRON(ret, &outp->object,
|
||||
"[ACQUIRE proto:LVDS dual:%d 8bpc:%d] or:%d link:%d",
|
||||
args.lvds.dual, args.lvds.bpc8, args.or, args.link);
|
||||
ret = nvif_outp_acquire(outp, NVIF_OUTP_ACQUIRE_V0_SOR, &args);
|
||||
NVIF_ERRON(ret, &outp->object, "[ACQUIRE SOR] or:%d link:%d", args.or, args.link);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nvif_outp_acquire_tmds(struct nvif_outp *outp, int head,
|
||||
bool hdmi, u8 max_ac_packet, u8 rekey, u8 scdc, bool hda)
|
||||
nvif_outp_acquire_dac(struct nvif_outp *outp)
|
||||
{
|
||||
struct nvif_outp_acquire_v0 args;
|
||||
int ret;
|
||||
|
||||
args.tmds.head = head;
|
||||
args.tmds.hdmi = hdmi;
|
||||
args.tmds.hdmi_max_ac_packet = max_ac_packet;
|
||||
args.tmds.hdmi_rekey = rekey;
|
||||
args.tmds.hdmi_scdc = scdc;
|
||||
args.tmds.hdmi_hda = hda;
|
||||
|
||||
ret = nvif_outp_acquire(outp, NVIF_OUTP_ACQUIRE_V0_TMDS, &args);
|
||||
NVIF_ERRON(ret, &outp->object,
|
||||
"[ACQUIRE proto:TMDS head:%d hdmi:%d max_ac_packet:%d rekey:%d scdc:%d hda:%d]"
|
||||
" or:%d link:%d", args.tmds.head, args.tmds.hdmi, args.tmds.hdmi_max_ac_packet,
|
||||
args.tmds.hdmi_rekey, args.tmds.hdmi_scdc, args.tmds.hdmi_hda,
|
||||
args.or, args.link);
|
||||
ret = nvif_outp_acquire(outp, NVIF_OUTP_ACQUIRE_V0_DAC, &args);
|
||||
NVIF_ERRON(ret, &outp->object, "[ACQUIRE DAC] or:%d", args.or);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nvif_outp_acquire_rgb_crt(struct nvif_outp *outp)
|
||||
static int
|
||||
nvif_outp_inherit(struct nvif_outp *outp,
|
||||
u8 proto,
|
||||
struct nvif_outp_inherit_v0 *args,
|
||||
u8 *proto_out)
|
||||
{
|
||||
struct nvif_outp_acquire_v0 args;
|
||||
int ret;
|
||||
|
||||
ret = nvif_outp_acquire(outp, NVIF_OUTP_ACQUIRE_V0_RGB_CRT, &args);
|
||||
NVIF_ERRON(ret, &outp->object, "[ACQUIRE proto:RGB_CRT] or:%d", args.or);
|
||||
return ret;
|
||||
args->version = 0;
|
||||
args->proto = proto;
|
||||
|
||||
ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_INHERIT, args, sizeof(*args));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
outp->or.id = args->or;
|
||||
outp->or.link = args->link;
|
||||
*proto_out = args->proto;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nvif_outp_inherit_lvds(struct nvif_outp *outp, u8 *proto_out)
|
||||
{
|
||||
struct nvif_outp_inherit_v0 args;
|
||||
int ret;
|
||||
|
||||
ret = nvif_outp_inherit(outp, NVIF_OUTP_INHERIT_V0_LVDS, &args, proto_out);
|
||||
NVIF_ERRON(ret && ret != -ENODEV, &outp->object, "[INHERIT proto:LVDS] ret:%d", ret);
|
||||
return ret ?: args.head;
|
||||
}
|
||||
|
||||
int
|
||||
nvif_outp_inherit_tmds(struct nvif_outp *outp, u8 *proto_out)
|
||||
{
|
||||
struct nvif_outp_inherit_v0 args;
|
||||
int ret;
|
||||
|
||||
ret = nvif_outp_inherit(outp, NVIF_OUTP_INHERIT_V0_TMDS, &args, proto_out);
|
||||
NVIF_ERRON(ret && ret != -ENODEV, &outp->object, "[INHERIT proto:TMDS] ret:%d", ret);
|
||||
return ret ?: args.head;
|
||||
}
|
||||
|
||||
int
|
||||
nvif_outp_inherit_dp(struct nvif_outp *outp, u8 *proto_out)
|
||||
{
|
||||
struct nvif_outp_inherit_v0 args;
|
||||
int ret;
|
||||
|
||||
ret = nvif_outp_inherit(outp, NVIF_OUTP_INHERIT_V0_DP, &args, proto_out);
|
||||
NVIF_ERRON(ret && ret != -ENODEV, &outp->object, "[INHERIT proto:DP] ret:%d", ret);
|
||||
|
||||
// TODO: Get current link info
|
||||
|
||||
return ret ?: args.head;
|
||||
}
|
||||
|
||||
int
|
||||
nvif_outp_inherit_rgb_crt(struct nvif_outp *outp, u8 *proto_out)
|
||||
{
|
||||
struct nvif_outp_inherit_v0 args;
|
||||
int ret;
|
||||
|
||||
ret = nvif_outp_inherit(outp, NVIF_OUTP_INHERIT_V0_RGB_CRT, &args, proto_out);
|
||||
NVIF_ERRON(ret && ret != -ENODEV, &outp->object, "[INHERIT proto:RGB_CRT] ret:%d", ret);
|
||||
return ret ?: args.head;
|
||||
}
|
||||
|
||||
int
|
||||
@ -210,6 +435,61 @@ nvif_outp_load_detect(struct nvif_outp *outp, u32 loadval)
|
||||
return ret < 0 ? ret : args.load;
|
||||
}
|
||||
|
||||
int
|
||||
nvif_outp_edid_get(struct nvif_outp *outp, u8 **pedid)
|
||||
{
|
||||
struct nvif_outp_edid_get_v0 *args;
|
||||
int ret;
|
||||
|
||||
args = kmalloc(sizeof(*args), GFP_KERNEL);
|
||||
if (!args)
|
||||
return -ENOMEM;
|
||||
|
||||
args->version = 0;
|
||||
|
||||
ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_EDID_GET, args, sizeof(*args));
|
||||
NVIF_ERRON(ret, &outp->object, "[EDID_GET] size:%d", args->size);
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
*pedid = kmalloc(args->size, GFP_KERNEL);
|
||||
if (!*pedid) {
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
}
|
||||
|
||||
memcpy(*pedid, args->data, args->size);
|
||||
ret = args->size;
|
||||
done:
|
||||
kfree(args);
|
||||
return ret;
|
||||
}
|
||||
|
||||
enum nvif_outp_detect_status
|
||||
nvif_outp_detect(struct nvif_outp *outp)
|
||||
{
|
||||
struct nvif_outp_detect_v0 args;
|
||||
int ret;
|
||||
|
||||
args.version = 0;
|
||||
|
||||
ret = nvif_mthd(&outp->object, NVIF_OUTP_V0_DETECT, &args, sizeof(args));
|
||||
NVIF_ERRON(ret, &outp->object, "[DETECT] status:%02x", args.status);
|
||||
if (ret)
|
||||
return UNKNOWN;
|
||||
|
||||
switch (args.status) {
|
||||
case NVIF_OUTP_DETECT_V0_NOT_PRESENT: return NOT_PRESENT;
|
||||
case NVIF_OUTP_DETECT_V0_PRESENT: return PRESENT;
|
||||
case NVIF_OUTP_DETECT_V0_UNKNOWN: return UNKNOWN;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
break;
|
||||
}
|
||||
|
||||
return UNKNOWN;
|
||||
}
|
||||
|
||||
void
|
||||
nvif_outp_dtor(struct nvif_outp *outp)
|
||||
{
|
||||
@ -231,6 +511,50 @@ nvif_outp_ctor(struct nvif_disp *disp, const char *name, int id, struct nvif_out
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
outp->id = args.id;
|
||||
|
||||
switch (args.type) {
|
||||
case NVIF_OUTP_V0_TYPE_DAC : outp->info.type = NVIF_OUTP_DAC; break;
|
||||
case NVIF_OUTP_V0_TYPE_SOR : outp->info.type = NVIF_OUTP_SOR; break;
|
||||
case NVIF_OUTP_V0_TYPE_PIOR: outp->info.type = NVIF_OUTP_PIOR; break;
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
nvif_outp_dtor(outp);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (args.proto) {
|
||||
case NVIF_OUTP_V0_PROTO_RGB_CRT:
|
||||
outp->info.proto = NVIF_OUTP_RGB_CRT;
|
||||
outp->info.rgb_crt.freq_max = args.rgb_crt.freq_max;
|
||||
break;
|
||||
case NVIF_OUTP_V0_PROTO_TMDS:
|
||||
outp->info.proto = NVIF_OUTP_TMDS;
|
||||
outp->info.tmds.dual = args.tmds.dual;
|
||||
break;
|
||||
case NVIF_OUTP_V0_PROTO_LVDS:
|
||||
outp->info.proto = NVIF_OUTP_LVDS;
|
||||
outp->info.lvds.acpi_edid = args.lvds.acpi_edid;
|
||||
break;
|
||||
case NVIF_OUTP_V0_PROTO_DP:
|
||||
outp->info.proto = NVIF_OUTP_DP;
|
||||
outp->info.dp.aux = args.dp.aux;
|
||||
outp->info.dp.mst = args.dp.mst;
|
||||
outp->info.dp.increased_wm = args.dp.increased_wm;
|
||||
outp->info.dp.link_nr = args.dp.link_nr;
|
||||
outp->info.dp.link_bw = args.dp.link_bw;
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
nvif_outp_dtor(outp);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
outp->info.heads = args.heads;
|
||||
outp->info.ddc = args.ddc;
|
||||
outp->info.conn = args.conn;
|
||||
|
||||
outp->or.id = -1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -79,8 +79,7 @@ nvkm_firmware_get(const struct nvkm_subdev *subdev, const char *fwname, int ver,
|
||||
int i;
|
||||
|
||||
/* Convert device name to lowercase */
|
||||
strncpy(cname, device->chip->name, sizeof(cname));
|
||||
cname[sizeof(cname) - 1] = '\0';
|
||||
strscpy(cname, device->chip->name, sizeof(cname));
|
||||
i = strlen(cname);
|
||||
while (i) {
|
||||
--i;
|
||||
|
@ -140,12 +140,23 @@ nvkm_memory_new(struct nvkm_device *device, enum nvkm_memory_target target,
|
||||
{
|
||||
struct nvkm_instmem *imem = device->imem;
|
||||
struct nvkm_memory *memory;
|
||||
bool preserve = true;
|
||||
int ret;
|
||||
|
||||
if (unlikely(target != NVKM_MEM_TARGET_INST || !imem))
|
||||
if (unlikely(!imem))
|
||||
return -ENOSYS;
|
||||
|
||||
ret = nvkm_instobj_new(imem, size, align, zero, &memory);
|
||||
switch (target) {
|
||||
case NVKM_MEM_TARGET_INST_SR_LOST:
|
||||
preserve = false;
|
||||
break;
|
||||
case NVKM_MEM_TARGET_INST:
|
||||
break;
|
||||
default:
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
ret = nvkm_instobj_new(imem, size, align, zero, preserve, &memory);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -23,15 +23,12 @@
|
||||
*/
|
||||
#include "priv.h"
|
||||
#include "conn.h"
|
||||
#include "dp.h"
|
||||
#include "head.h"
|
||||
#include "ior.h"
|
||||
#include "outp.h"
|
||||
|
||||
#include <core/client.h>
|
||||
#include <core/ramht.h>
|
||||
#include <subdev/bios.h>
|
||||
#include <subdev/bios/dcb.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nvif/cl0046.h>
|
||||
@ -105,18 +102,14 @@ static int
|
||||
nvkm_disp_fini(struct nvkm_engine *engine, bool suspend)
|
||||
{
|
||||
struct nvkm_disp *disp = nvkm_disp(engine);
|
||||
struct nvkm_conn *conn;
|
||||
struct nvkm_outp *outp;
|
||||
|
||||
if (disp->func->fini)
|
||||
disp->func->fini(disp);
|
||||
|
||||
list_for_each_entry(outp, &disp->outps, head) {
|
||||
nvkm_outp_fini(outp);
|
||||
}
|
||||
|
||||
list_for_each_entry(conn, &disp->conns, head) {
|
||||
nvkm_conn_fini(conn);
|
||||
if (outp->func->fini)
|
||||
outp->func->fini(outp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -126,16 +119,12 @@ static int
|
||||
nvkm_disp_init(struct nvkm_engine *engine)
|
||||
{
|
||||
struct nvkm_disp *disp = nvkm_disp(engine);
|
||||
struct nvkm_conn *conn;
|
||||
struct nvkm_outp *outp;
|
||||
struct nvkm_ior *ior;
|
||||
|
||||
list_for_each_entry(conn, &disp->conns, head) {
|
||||
nvkm_conn_init(conn);
|
||||
}
|
||||
|
||||
list_for_each_entry(outp, &disp->outps, head) {
|
||||
nvkm_outp_init(outp);
|
||||
if (outp->func->init)
|
||||
outp->func->init(outp);
|
||||
}
|
||||
|
||||
if (disp->func->init) {
|
||||
@ -159,142 +148,15 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
|
||||
{
|
||||
struct nvkm_disp *disp = nvkm_disp(engine);
|
||||
struct nvkm_subdev *subdev = &disp->engine.subdev;
|
||||
struct nvkm_bios *bios = subdev->device->bios;
|
||||
struct nvkm_outp *outp, *outt, *pair;
|
||||
struct nvkm_conn *conn;
|
||||
struct nvkm_head *head;
|
||||
struct nvkm_ior *ior;
|
||||
struct nvbios_connE connE;
|
||||
struct dcb_output dcbE;
|
||||
u8 hpd = 0, ver, hdr;
|
||||
u32 data;
|
||||
int ret, i;
|
||||
|
||||
/* Create output path objects for each VBIOS display path. */
|
||||
i = -1;
|
||||
while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
|
||||
if (ver < 0x40) /* No support for chipsets prior to NV50. */
|
||||
break;
|
||||
if (dcbE.type == DCB_OUTPUT_UNUSED)
|
||||
continue;
|
||||
if (dcbE.type == DCB_OUTPUT_EOL)
|
||||
break;
|
||||
outp = NULL;
|
||||
|
||||
switch (dcbE.type) {
|
||||
case DCB_OUTPUT_ANALOG:
|
||||
case DCB_OUTPUT_TV:
|
||||
case DCB_OUTPUT_TMDS:
|
||||
case DCB_OUTPUT_LVDS:
|
||||
ret = nvkm_outp_new(disp, i, &dcbE, &outp);
|
||||
break;
|
||||
case DCB_OUTPUT_DP:
|
||||
ret = nvkm_dp_new(disp, i, &dcbE, &outp);
|
||||
break;
|
||||
case DCB_OUTPUT_WFD:
|
||||
/* No support for WFD yet. */
|
||||
ret = -ENODEV;
|
||||
continue;
|
||||
default:
|
||||
nvkm_warn(subdev, "dcb %d type %d unknown\n",
|
||||
i, dcbE.type);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
if (outp) {
|
||||
if (ret != -ENODEV)
|
||||
OUTP_ERR(outp, "ctor failed: %d", ret);
|
||||
else
|
||||
OUTP_DBG(outp, "not supported");
|
||||
nvkm_outp_del(&outp);
|
||||
continue;
|
||||
}
|
||||
nvkm_error(subdev, "failed to create outp %d\n", i);
|
||||
continue;
|
||||
}
|
||||
|
||||
list_add_tail(&outp->head, &disp->outps);
|
||||
hpd = max(hpd, (u8)(dcbE.connector + 1));
|
||||
}
|
||||
|
||||
/* Create connector objects based on available output paths. */
|
||||
list_for_each_entry_safe(outp, outt, &disp->outps, head) {
|
||||
/* VBIOS data *should* give us the most useful information. */
|
||||
data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr,
|
||||
&connE);
|
||||
|
||||
/* No bios connector data... */
|
||||
if (!data) {
|
||||
/* Heuristic: anything with the same ccb index is
|
||||
* considered to be on the same connector, any
|
||||
* output path without an associated ccb entry will
|
||||
* be put on its own connector.
|
||||
*/
|
||||
int ccb_index = outp->info.i2c_index;
|
||||
if (ccb_index != 0xf) {
|
||||
list_for_each_entry(pair, &disp->outps, head) {
|
||||
if (pair->info.i2c_index == ccb_index) {
|
||||
outp->conn = pair->conn;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Connector shared with another output path. */
|
||||
if (outp->conn)
|
||||
continue;
|
||||
|
||||
memset(&connE, 0x00, sizeof(connE));
|
||||
connE.type = DCB_CONNECTOR_NONE;
|
||||
i = -1;
|
||||
} else {
|
||||
i = outp->info.connector;
|
||||
}
|
||||
|
||||
/* Check that we haven't already created this connector. */
|
||||
list_for_each_entry(conn, &disp->conns, head) {
|
||||
if (conn->index == outp->info.connector) {
|
||||
outp->conn = conn;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (outp->conn)
|
||||
continue;
|
||||
|
||||
/* Apparently we need to create a new one! */
|
||||
ret = nvkm_conn_new(disp, i, &connE, &outp->conn);
|
||||
if (ret) {
|
||||
nvkm_error(subdev, "failed to create outp %d conn: %d\n", outp->index, ret);
|
||||
nvkm_conn_del(&outp->conn);
|
||||
list_del(&outp->head);
|
||||
nvkm_outp_del(&outp);
|
||||
continue;
|
||||
}
|
||||
|
||||
list_add_tail(&outp->conn->head, &disp->conns);
|
||||
}
|
||||
|
||||
if (disp->func->oneinit) {
|
||||
ret = disp->func->oneinit(disp);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Enforce identity-mapped SOR assignment for panels, which have
|
||||
* certain bits (ie. backlight controls) wired to a specific SOR.
|
||||
*/
|
||||
list_for_each_entry(outp, &disp->outps, head) {
|
||||
if (outp->conn->info.type == DCB_CONNECTOR_LVDS ||
|
||||
outp->conn->info.type == DCB_CONNECTOR_eDP) {
|
||||
ior = nvkm_ior_find(disp, SOR, ffs(outp->info.or) - 1);
|
||||
if (!WARN_ON(!ior))
|
||||
ior->identity = true;
|
||||
outp->identity = true;
|
||||
}
|
||||
}
|
||||
|
||||
i = 0;
|
||||
list_for_each_entry(head, &disp->heads, head)
|
||||
i = max(i, head->id + 1);
|
||||
|
@ -29,16 +29,6 @@
|
||||
|
||||
#include <nvif/event.h>
|
||||
|
||||
void
|
||||
nvkm_conn_fini(struct nvkm_conn *conn)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_conn_init(struct nvkm_conn *conn)
|
||||
{
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_conn_del(struct nvkm_conn **pconn)
|
||||
{
|
||||
|
@ -19,8 +19,6 @@ struct nvkm_conn {
|
||||
int nvkm_conn_new(struct nvkm_disp *, int index, struct nvbios_connE *,
|
||||
struct nvkm_conn **);
|
||||
void nvkm_conn_del(struct nvkm_conn **);
|
||||
void nvkm_conn_init(struct nvkm_conn *);
|
||||
void nvkm_conn_fini(struct nvkm_conn *);
|
||||
|
||||
#define CONN_MSG(c,l,f,a...) do { \
|
||||
struct nvkm_conn *_conn = (c); \
|
||||
|
@ -41,6 +41,40 @@
|
||||
*/
|
||||
#define AMPERE_IED_HACK(disp) ((disp)->engine.subdev.device->card_type >= GA100)
|
||||
|
||||
static int
|
||||
nvkm_dp_mst_id_put(struct nvkm_outp *outp, u32 id)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_dp_mst_id_get(struct nvkm_outp *outp, u32 *pid)
|
||||
{
|
||||
*pid = BIT(outp->index);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_dp_aux_xfer(struct nvkm_outp *outp, u8 type, u32 addr, u8 *data, u8 *size)
|
||||
{
|
||||
int ret = nvkm_i2c_aux_acquire(outp->dp.aux);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_i2c_aux_xfer(outp->dp.aux, false, type, addr, data, size);
|
||||
nvkm_i2c_aux_release(outp->dp.aux);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_dp_aux_pwr(struct nvkm_outp *outp, bool pu)
|
||||
{
|
||||
outp->dp.enabled = pu;
|
||||
nvkm_dp_enable(outp, outp->dp.enabled);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct lt_state {
|
||||
struct nvkm_outp *outp;
|
||||
|
||||
@ -282,31 +316,20 @@ nvkm_dp_train_link(struct nvkm_outp *outp, int rate)
|
||||
struct lt_state lt = {
|
||||
.outp = outp,
|
||||
.pc2 = outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_TPS3_SUPPORTED,
|
||||
.repeaters = outp->dp.lttprs,
|
||||
};
|
||||
u8 sink[2], data;
|
||||
u8 sink[2];
|
||||
int ret;
|
||||
|
||||
OUTP_DBG(outp, "training %dx%02x", ior->dp.nr, ior->dp.bw);
|
||||
|
||||
/* Select LTTPR non-transparent mode if we have a valid configuration,
|
||||
* use transparent mode otherwise.
|
||||
*/
|
||||
if (outp->dp.lttpr[0] >= 0x14) {
|
||||
data = DPCD_LTTPR_MODE_TRANSPARENT;
|
||||
nvkm_wraux(outp->dp.aux, DPCD_LTTPR_MODE, &data, sizeof(data));
|
||||
|
||||
if (outp->dp.lttprs) {
|
||||
data = DPCD_LTTPR_MODE_NON_TRANSPARENT;
|
||||
nvkm_wraux(outp->dp.aux, DPCD_LTTPR_MODE, &data, sizeof(data));
|
||||
lt.repeaters = outp->dp.lttprs;
|
||||
}
|
||||
}
|
||||
|
||||
/* Set desired link configuration on the sink. */
|
||||
sink[0] = (outp->dp.rate[rate].dpcd < 0) ? ior->dp.bw : 0;
|
||||
sink[1] = ior->dp.nr;
|
||||
if (ior->dp.ef)
|
||||
sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN;
|
||||
if (outp->dp.lt.post_adj)
|
||||
sink[1] |= 0x20;
|
||||
|
||||
ret = nvkm_wraux(outp->dp.aux, DPCD_LC00_LINK_BW_SET, sink, 2);
|
||||
if (ret)
|
||||
@ -447,71 +470,58 @@ nvkm_dp_train_init(struct nvkm_outp *outp)
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_dp_train(struct nvkm_outp *outp, u32 dataKBps)
|
||||
nvkm_dp_drive(struct nvkm_outp *outp, u8 lanes, u8 pe[4], u8 vs[4])
|
||||
{
|
||||
struct lt_state lt = {
|
||||
.outp = outp,
|
||||
.stat[4] = (pe[0] << 2) | (vs[0] << 0) |
|
||||
(pe[1] << 6) | (vs[1] << 4),
|
||||
.stat[5] = (pe[2] << 2) | (vs[2] << 0) |
|
||||
(pe[3] << 6) | (vs[3] << 4),
|
||||
};
|
||||
|
||||
return nvkm_dp_train_drive(<, false);
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_dp_train(struct nvkm_outp *outp, bool retrain)
|
||||
{
|
||||
struct nvkm_ior *ior = outp->ior;
|
||||
int ret = -EINVAL, nr, rate;
|
||||
u8 pwr;
|
||||
int ret, rate;
|
||||
|
||||
for (rate = 0; rate < outp->dp.rates; rate++) {
|
||||
if (outp->dp.rate[rate].rate == (retrain ? ior->dp.bw : outp->dp.lt.bw) * 27000)
|
||||
break;
|
||||
}
|
||||
|
||||
if (WARN_ON(rate == outp->dp.rates))
|
||||
return -EINVAL;
|
||||
|
||||
/* Retraining link? Skip source configuration, it can mess up the active modeset. */
|
||||
if (atomic_read(&outp->dp.lt.done)) {
|
||||
for (rate = 0; rate < outp->dp.rates; rate++) {
|
||||
if (outp->dp.rate[rate].rate == ior->dp.bw * 27000)
|
||||
return nvkm_dp_train_link(outp, ret);
|
||||
}
|
||||
WARN_ON(1);
|
||||
return -EINVAL;
|
||||
if (retrain) {
|
||||
mutex_lock(&outp->dp.mutex);
|
||||
ret = nvkm_dp_train_link(outp, rate);
|
||||
mutex_unlock(&outp->dp.mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Ensure sink is not in a low-power state. */
|
||||
if (!nvkm_rdaux(outp->dp.aux, DPCD_SC00, &pwr, 1)) {
|
||||
if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
|
||||
pwr &= ~DPCD_SC00_SET_POWER;
|
||||
pwr |= DPCD_SC00_SET_POWER_D0;
|
||||
nvkm_wraux(outp->dp.aux, DPCD_SC00, &pwr, 1);
|
||||
}
|
||||
}
|
||||
mutex_lock(&outp->dp.mutex);
|
||||
OUTP_DBG(outp, "training");
|
||||
|
||||
ior->dp.mst = outp->dp.lt.mst;
|
||||
ior->dp.ef = outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP;
|
||||
ior->dp.nr = 0;
|
||||
ior->dp.bw = outp->dp.lt.bw;
|
||||
ior->dp.nr = outp->dp.lt.nr;
|
||||
|
||||
/* Link training. */
|
||||
OUTP_DBG(outp, "training");
|
||||
nvkm_dp_train_init(outp);
|
||||
|
||||
/* Validate and train at configuration requested (if any) on ACQUIRE. */
|
||||
if (outp->dp.lt.nr) {
|
||||
for (nr = outp->dp.links; ret < 0 && nr; nr >>= 1) {
|
||||
for (rate = 0; nr == outp->dp.lt.nr && rate < outp->dp.rates; rate++) {
|
||||
if (outp->dp.rate[rate].rate / 27000 == outp->dp.lt.bw) {
|
||||
ior->dp.bw = outp->dp.rate[rate].rate / 27000;
|
||||
ior->dp.nr = nr;
|
||||
ret = nvkm_dp_train_links(outp, rate);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Otherwise, loop through all valid link configurations that support the data rate. */
|
||||
for (nr = outp->dp.links; ret < 0 && nr; nr >>= 1) {
|
||||
for (rate = 0; ret < 0 && rate < outp->dp.rates; rate++) {
|
||||
if (outp->dp.rate[rate].rate * nr >= dataKBps || WARN_ON(!ior->dp.nr)) {
|
||||
/* Program selected link configuration. */
|
||||
ior->dp.bw = outp->dp.rate[rate].rate / 27000;
|
||||
ior->dp.nr = nr;
|
||||
ret = nvkm_dp_train_links(outp, rate);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Finish up. */
|
||||
ret = nvkm_dp_train_links(outp, rate);
|
||||
nvkm_dp_train_fini(outp);
|
||||
if (ret < 0)
|
||||
OUTP_ERR(outp, "training failed");
|
||||
else
|
||||
OUTP_DBG(outp, "training done");
|
||||
atomic_set(&outp->dp.lt.done, 1);
|
||||
|
||||
mutex_unlock(&outp->dp.mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -529,155 +539,10 @@ nvkm_dp_disable(struct nvkm_outp *outp, struct nvkm_ior *ior)
|
||||
static void
|
||||
nvkm_dp_release(struct nvkm_outp *outp)
|
||||
{
|
||||
/* Prevent link from being retrained if sink sends an IRQ. */
|
||||
atomic_set(&outp->dp.lt.done, 0);
|
||||
outp->ior->dp.nr = 0;
|
||||
}
|
||||
nvkm_dp_disable(outp, outp->ior);
|
||||
|
||||
static int
|
||||
nvkm_dp_acquire(struct nvkm_outp *outp)
|
||||
{
|
||||
struct nvkm_ior *ior = outp->ior;
|
||||
struct nvkm_head *head;
|
||||
bool retrain = true;
|
||||
u32 datakbps = 0;
|
||||
u32 dataKBps;
|
||||
u32 linkKBps;
|
||||
u8 stat[3];
|
||||
int ret, i;
|
||||
|
||||
mutex_lock(&outp->dp.mutex);
|
||||
|
||||
/* Check that link configuration meets current requirements. */
|
||||
list_for_each_entry(head, &outp->disp->heads, head) {
|
||||
if (ior->asy.head & (1 << head->id)) {
|
||||
u32 khz = (head->asy.hz >> ior->asy.rgdiv) / 1000;
|
||||
datakbps += khz * head->asy.or.depth;
|
||||
}
|
||||
}
|
||||
|
||||
linkKBps = ior->dp.bw * 27000 * ior->dp.nr;
|
||||
dataKBps = DIV_ROUND_UP(datakbps, 8);
|
||||
OUTP_DBG(outp, "data %d KB/s link %d KB/s mst %d->%d",
|
||||
dataKBps, linkKBps, ior->dp.mst, outp->dp.lt.mst);
|
||||
if (linkKBps < dataKBps || ior->dp.mst != outp->dp.lt.mst) {
|
||||
OUTP_DBG(outp, "link requirements changed");
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Check that link is still trained. */
|
||||
ret = nvkm_rdaux(outp->dp.aux, DPCD_LS02, stat, 3);
|
||||
if (ret) {
|
||||
OUTP_DBG(outp, "failed to read link status, assuming no sink");
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE) {
|
||||
for (i = 0; i < ior->dp.nr; i++) {
|
||||
u8 lane = (stat[i >> 1] >> ((i & 1) * 4)) & 0x0f;
|
||||
if (!(lane & DPCD_LS02_LANE0_CR_DONE) ||
|
||||
!(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) ||
|
||||
!(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED)) {
|
||||
OUTP_DBG(outp, "lane %d not equalised", lane);
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
retrain = false;
|
||||
} else {
|
||||
OUTP_DBG(outp, "no inter-lane alignment");
|
||||
}
|
||||
|
||||
done:
|
||||
if (retrain || !atomic_read(&outp->dp.lt.done))
|
||||
ret = nvkm_dp_train(outp, dataKBps);
|
||||
mutex_unlock(&outp->dp.mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool
|
||||
nvkm_dp_enable_supported_link_rates(struct nvkm_outp *outp)
|
||||
{
|
||||
u8 sink_rates[DPCD_RC10_SUPPORTED_LINK_RATES__SIZE];
|
||||
int i, j, k;
|
||||
|
||||
if (outp->conn->info.type != DCB_CONNECTOR_eDP ||
|
||||
outp->dp.dpcd[DPCD_RC00_DPCD_REV] < 0x13 ||
|
||||
nvkm_rdaux(outp->dp.aux, DPCD_RC10_SUPPORTED_LINK_RATES(0),
|
||||
sink_rates, sizeof(sink_rates)))
|
||||
return false;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sink_rates); i += 2) {
|
||||
const u32 rate = ((sink_rates[i + 1] << 8) | sink_rates[i]) * 200 / 10;
|
||||
|
||||
if (!rate || WARN_ON(outp->dp.rates == ARRAY_SIZE(outp->dp.rate)))
|
||||
break;
|
||||
|
||||
if (rate > outp->info.dpconf.link_bw * 27000) {
|
||||
OUTP_DBG(outp, "rate %d !outp", rate);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (j = 0; j < outp->dp.rates; j++) {
|
||||
if (rate > outp->dp.rate[j].rate) {
|
||||
for (k = outp->dp.rates; k > j; k--)
|
||||
outp->dp.rate[k] = outp->dp.rate[k - 1];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
outp->dp.rate[j].dpcd = i / 2;
|
||||
outp->dp.rate[j].rate = rate;
|
||||
outp->dp.rates++;
|
||||
}
|
||||
|
||||
for (i = 0; i < outp->dp.rates; i++)
|
||||
OUTP_DBG(outp, "link_rate[%d] = %d", outp->dp.rate[i].dpcd, outp->dp.rate[i].rate);
|
||||
|
||||
return outp->dp.rates != 0;
|
||||
}
|
||||
|
||||
/* XXX: This is a big fat hack, and this is just drm_dp_read_dpcd_caps()
|
||||
* converted to work inside nvkm. This is a temporary holdover until we start
|
||||
* passing the drm_dp_aux device through NVKM
|
||||
*/
|
||||
static int
|
||||
nvkm_dp_read_dpcd_caps(struct nvkm_outp *outp)
|
||||
{
|
||||
struct nvkm_i2c_aux *aux = outp->dp.aux;
|
||||
u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
|
||||
int ret;
|
||||
|
||||
ret = nvkm_rdaux(aux, DPCD_RC00_DPCD_REV, outp->dp.dpcd, DP_RECEIVER_CAP_SIZE);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Prior to DP1.3 the bit represented by
|
||||
* DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
|
||||
* If it is set DP_DPCD_REV at 0000h could be at a value less than
|
||||
* the true capability of the panel. The only way to check is to
|
||||
* then compare 0000h and 2200h.
|
||||
*/
|
||||
if (!(outp->dp.dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
|
||||
DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
|
||||
return 0;
|
||||
|
||||
ret = nvkm_rdaux(aux, DP_DP13_DPCD_REV, dpcd_ext, sizeof(dpcd_ext));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (outp->dp.dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
|
||||
OUTP_DBG(outp, "Extended DPCD rev less than base DPCD rev (%d > %d)\n",
|
||||
outp->dp.dpcd[DP_DPCD_REV], dpcd_ext[DP_DPCD_REV]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!memcmp(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext)))
|
||||
return 0;
|
||||
|
||||
memcpy(outp->dp.dpcd, dpcd_ext, sizeof(dpcd_ext));
|
||||
|
||||
return 0;
|
||||
nvkm_outp_release(outp);
|
||||
}
|
||||
|
||||
void
|
||||
@ -711,66 +576,11 @@ nvkm_dp_enable(struct nvkm_outp *outp, bool auxpwr)
|
||||
OUTP_DBG(outp, "aux power -> always");
|
||||
nvkm_i2c_aux_monitor(aux, true);
|
||||
outp->dp.aux_pwr = true;
|
||||
|
||||
/* Detect any LTTPRs before reading DPCD receiver caps. */
|
||||
if (!nvkm_rdaux(aux, DPCD_LTTPR_REV, outp->dp.lttpr, sizeof(outp->dp.lttpr)) &&
|
||||
outp->dp.lttpr[0] >= 0x14 && outp->dp.lttpr[2]) {
|
||||
switch (outp->dp.lttpr[2]) {
|
||||
case 0x80: outp->dp.lttprs = 1; break;
|
||||
case 0x40: outp->dp.lttprs = 2; break;
|
||||
case 0x20: outp->dp.lttprs = 3; break;
|
||||
case 0x10: outp->dp.lttprs = 4; break;
|
||||
case 0x08: outp->dp.lttprs = 5; break;
|
||||
case 0x04: outp->dp.lttprs = 6; break;
|
||||
case 0x02: outp->dp.lttprs = 7; break;
|
||||
case 0x01: outp->dp.lttprs = 8; break;
|
||||
default:
|
||||
/* Unknown LTTPR count, we'll switch to transparent mode. */
|
||||
WARN_ON(1);
|
||||
outp->dp.lttprs = 0;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
/* No LTTPR support, or zero LTTPR count - don't touch it at all. */
|
||||
memset(outp->dp.lttpr, 0x00, sizeof(outp->dp.lttpr));
|
||||
}
|
||||
|
||||
if (!nvkm_dp_read_dpcd_caps(outp)) {
|
||||
const u8 rates[] = { 0x1e, 0x14, 0x0a, 0x06, 0 };
|
||||
const u8 *rate;
|
||||
int rate_max;
|
||||
|
||||
outp->dp.rates = 0;
|
||||
outp->dp.links = outp->dp.dpcd[DPCD_RC02] & DPCD_RC02_MAX_LANE_COUNT;
|
||||
outp->dp.links = min(outp->dp.links, outp->info.dpconf.link_nr);
|
||||
if (outp->dp.lttprs && outp->dp.lttpr[4])
|
||||
outp->dp.links = min_t(int, outp->dp.links, outp->dp.lttpr[4]);
|
||||
|
||||
rate_max = outp->dp.dpcd[DPCD_RC01_MAX_LINK_RATE];
|
||||
rate_max = min(rate_max, outp->info.dpconf.link_bw);
|
||||
if (outp->dp.lttprs && outp->dp.lttpr[1])
|
||||
rate_max = min_t(int, rate_max, outp->dp.lttpr[1]);
|
||||
|
||||
if (!nvkm_dp_enable_supported_link_rates(outp)) {
|
||||
for (rate = rates; *rate; rate++) {
|
||||
if (*rate > rate_max)
|
||||
continue;
|
||||
|
||||
if (WARN_ON(outp->dp.rates == ARRAY_SIZE(outp->dp.rate)))
|
||||
break;
|
||||
|
||||
outp->dp.rate[outp->dp.rates].dpcd = -1;
|
||||
outp->dp.rate[outp->dp.rates].rate = *rate * 27000;
|
||||
outp->dp.rates++;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else
|
||||
if (!auxpwr && outp->dp.aux_pwr) {
|
||||
OUTP_DBG(outp, "aux power -> demand");
|
||||
nvkm_i2c_aux_monitor(aux, false);
|
||||
outp->dp.aux_pwr = false;
|
||||
atomic_set(&outp->dp.lt.done, 0);
|
||||
|
||||
/* Restore eDP panel GPIO to its prior state if we changed it, as
|
||||
* it could potentially interfere with other outputs.
|
||||
@ -793,6 +603,7 @@ nvkm_dp_fini(struct nvkm_outp *outp)
|
||||
static void
|
||||
nvkm_dp_init(struct nvkm_outp *outp)
|
||||
{
|
||||
nvkm_outp_init(outp);
|
||||
nvkm_dp_enable(outp, outp->dp.enabled);
|
||||
}
|
||||
|
||||
@ -807,9 +618,18 @@ nvkm_dp_func = {
|
||||
.dtor = nvkm_dp_dtor,
|
||||
.init = nvkm_dp_init,
|
||||
.fini = nvkm_dp_fini,
|
||||
.acquire = nvkm_dp_acquire,
|
||||
.detect = nvkm_outp_detect,
|
||||
.inherit = nvkm_outp_inherit,
|
||||
.acquire = nvkm_outp_acquire,
|
||||
.release = nvkm_dp_release,
|
||||
.disable = nvkm_dp_disable,
|
||||
.bl.get = nvkm_outp_bl_get,
|
||||
.bl.set = nvkm_outp_bl_set,
|
||||
.dp.aux_pwr = nvkm_dp_aux_pwr,
|
||||
.dp.aux_xfer = nvkm_dp_aux_xfer,
|
||||
.dp.train = nvkm_dp_train,
|
||||
.dp.drive = nvkm_dp_drive,
|
||||
.dp.mst_id_get = nvkm_dp_mst_id_get,
|
||||
.dp.mst_id_put = nvkm_dp_mst_id_put,
|
||||
};
|
||||
|
||||
int
|
||||
@ -819,7 +639,7 @@ nvkm_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE, struct n
|
||||
struct nvkm_bios *bios = device->bios;
|
||||
struct nvkm_i2c *i2c = device->i2c;
|
||||
struct nvkm_outp *outp;
|
||||
u8 hdr, cnt, len;
|
||||
u8 ver, hdr, cnt, len;
|
||||
u32 data;
|
||||
int ret;
|
||||
|
||||
@ -847,7 +667,9 @@ nvkm_dp_new(struct nvkm_disp *disp, int index, struct dcb_output *dcbE, struct n
|
||||
|
||||
OUTP_DBG(outp, "bios dp %02x %02x %02x %02x", outp->dp.version, hdr, cnt, len);
|
||||
|
||||
data = nvbios_dp_table(bios, &ver, &hdr, &cnt, &len);
|
||||
outp->dp.mst = data && ver >= 0x40 && (nvbios_rd08(bios, data + 0x08) & 0x04);
|
||||
|
||||
mutex_init(&outp->dp.mutex);
|
||||
atomic_set(&outp->dp.lt.done, 0);
|
||||
return 0;
|
||||
}
|
||||
|
@ -124,6 +124,7 @@ g84_sor = {
|
||||
.state = nv50_sor_state,
|
||||
.power = nv50_sor_power,
|
||||
.clock = nv50_sor_clock,
|
||||
.bl = &nv50_sor_bl,
|
||||
.hdmi = &g84_sor_hdmi,
|
||||
};
|
||||
|
||||
|
@ -295,6 +295,7 @@ g94_sor = {
|
||||
.clock = nv50_sor_clock,
|
||||
.war_2 = g94_sor_war_2,
|
||||
.war_3 = g94_sor_war_3,
|
||||
.bl = &nv50_sor_bl,
|
||||
.hdmi = &g84_sor_hdmi,
|
||||
.dp = &g94_sor_dp,
|
||||
};
|
||||
|
@ -105,6 +105,7 @@ ga102_sor = {
|
||||
.state = gv100_sor_state,
|
||||
.power = nv50_sor_power,
|
||||
.clock = ga102_sor_clock,
|
||||
.bl = >215_sor_bl,
|
||||
.hdmi = &gv100_sor_hdmi,
|
||||
.dp = &ga102_sor_dp,
|
||||
.hda = &gv100_sor_hda,
|
||||
|
@ -328,6 +328,7 @@ gf119_sor = {
|
||||
.state = gf119_sor_state,
|
||||
.power = nv50_sor_power,
|
||||
.clock = gf119_sor_clock,
|
||||
.bl = >215_sor_bl,
|
||||
.hdmi = &gf119_sor_hdmi,
|
||||
.dp = &gf119_sor_dp,
|
||||
.hda = &gf119_sor_hda,
|
||||
@ -1038,7 +1039,6 @@ gf119_disp_super(struct work_struct *work)
|
||||
continue;
|
||||
nv50_disp_super_2_0(disp, head);
|
||||
}
|
||||
nvkm_outp_route(disp);
|
||||
list_for_each_entry(head, &disp->heads, head) {
|
||||
if (!(mask[head->id] & 0x00010000))
|
||||
continue;
|
||||
|
@ -115,6 +115,7 @@ gk104_sor = {
|
||||
.state = gf119_sor_state,
|
||||
.power = nv50_sor_power,
|
||||
.clock = gf119_sor_clock,
|
||||
.bl = >215_sor_bl,
|
||||
.hdmi = &gk104_sor_hdmi,
|
||||
.dp = &gf119_sor_dp,
|
||||
.hda = &gf119_sor_hda,
|
||||
|
@ -70,6 +70,7 @@ gm107_sor = {
|
||||
.state = gf119_sor_state,
|
||||
.power = nv50_sor_power,
|
||||
.clock = gf119_sor_clock,
|
||||
.bl = >215_sor_bl,
|
||||
.hdmi = &gk104_sor_hdmi,
|
||||
.dp = &gm107_sor_dp,
|
||||
.hda = &gf119_sor_hda,
|
||||
|
@ -68,15 +68,23 @@ gm200_sor_dp = {
|
||||
};
|
||||
|
||||
void
|
||||
gm200_sor_hdmi_scdc(struct nvkm_ior *ior, u8 scdc)
|
||||
gm200_sor_hdmi_scdc(struct nvkm_ior *ior, u32 khz, bool support, bool scrambling,
|
||||
bool scrambling_low_rates)
|
||||
{
|
||||
struct nvkm_device *device = ior->disp->engine.subdev.device;
|
||||
const u32 soff = nv50_ior_base(ior);
|
||||
const u32 ctrl = scdc & 0x3;
|
||||
u32 ctrl = 0;
|
||||
|
||||
ior->tmds.high_speed = khz > 340000;
|
||||
|
||||
if (support && scrambling) {
|
||||
if (ior->tmds.high_speed)
|
||||
ctrl |= 0x00000002;
|
||||
if (ior->tmds.high_speed || scrambling_low_rates)
|
||||
ctrl |= 0x00000001;
|
||||
}
|
||||
|
||||
nvkm_mask(device, 0x61c5bc + soff, 0x00000003, ctrl);
|
||||
|
||||
ior->tmds.high_speed = !!(scdc & 0x2);
|
||||
}
|
||||
|
||||
const struct nvkm_ior_func_hdmi
|
||||
@ -139,6 +147,7 @@ gm200_sor = {
|
||||
.state = gf119_sor_state,
|
||||
.power = nv50_sor_power,
|
||||
.clock = gf119_sor_clock,
|
||||
.bl = >215_sor_bl,
|
||||
.hdmi = &gm200_sor_hdmi,
|
||||
.dp = &gm200_sor_dp,
|
||||
.hda = &gf119_sor_hda,
|
||||
|
@ -37,6 +37,7 @@ gp100_sor = {
|
||||
.state = gf119_sor_state,
|
||||
.power = nv50_sor_power,
|
||||
.clock = gf119_sor_clock,
|
||||
.bl = >215_sor_bl,
|
||||
.hdmi = &gm200_sor_hdmi,
|
||||
.dp = &gm200_sor_dp,
|
||||
.hda = &gf119_sor_hda,
|
||||
|
@ -182,11 +182,49 @@ gt215_sor_hdmi = {
|
||||
.infoframe_vsi = gt215_sor_hdmi_infoframe_vsi,
|
||||
};
|
||||
|
||||
static int
|
||||
gt215_sor_bl_set(struct nvkm_ior *ior, int lvl)
|
||||
{
|
||||
struct nvkm_device *device = ior->disp->engine.subdev.device;
|
||||
const u32 soff = nv50_ior_base(ior);
|
||||
u32 div, val;
|
||||
|
||||
div = nvkm_rd32(device, 0x61c080 + soff);
|
||||
val = (lvl * div) / 100;
|
||||
if (div)
|
||||
nvkm_wr32(device, 0x61c084 + soff, 0xc0000000 | val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gt215_sor_bl_get(struct nvkm_ior *ior)
|
||||
{
|
||||
struct nvkm_device *device = ior->disp->engine.subdev.device;
|
||||
const u32 soff = nv50_ior_base(ior);
|
||||
u32 div, val;
|
||||
|
||||
div = nvkm_rd32(device, 0x61c080 + soff);
|
||||
val = nvkm_rd32(device, 0x61c084 + soff);
|
||||
val &= 0x00ffffff;
|
||||
if (div && div >= val)
|
||||
return ((val * 100) + (div / 2)) / div;
|
||||
|
||||
return 100;
|
||||
}
|
||||
|
||||
const struct nvkm_ior_func_bl
|
||||
gt215_sor_bl = {
|
||||
.get = gt215_sor_bl_get,
|
||||
.set = gt215_sor_bl_set,
|
||||
};
|
||||
|
||||
static const struct nvkm_ior_func
|
||||
gt215_sor = {
|
||||
.state = g94_sor_state,
|
||||
.power = nv50_sor_power,
|
||||
.clock = nv50_sor_clock,
|
||||
.bl = >215_sor_bl,
|
||||
.hdmi = >215_sor_hdmi,
|
||||
.dp = >215_sor_dp,
|
||||
.hda = >215_sor_hda,
|
||||
|
@ -212,6 +212,7 @@ gv100_sor = {
|
||||
.state = gv100_sor_state,
|
||||
.power = nv50_sor_power,
|
||||
.clock = gf119_sor_clock,
|
||||
.bl = >215_sor_bl,
|
||||
.hdmi = &gv100_sor_hdmi,
|
||||
.dp = &gv100_sor_dp,
|
||||
.hda = &gv100_sor_hda,
|
||||
@ -863,7 +864,6 @@ gv100_disp_super(struct work_struct *work)
|
||||
continue;
|
||||
nv50_disp_super_2_0(disp, head);
|
||||
}
|
||||
nvkm_outp_route(disp);
|
||||
list_for_each_entry(head, &disp->heads, head) {
|
||||
if (!(mask[head->id] & 0x00010000))
|
||||
continue;
|
||||
|
@ -63,11 +63,18 @@ struct nvkm_ior_func {
|
||||
void (*war_2)(struct nvkm_ior *);
|
||||
void (*war_3)(struct nvkm_ior *);
|
||||
|
||||
const struct nvkm_ior_func_bl {
|
||||
int (*get)(struct nvkm_ior *);
|
||||
int (*set)(struct nvkm_ior *, int lvl);
|
||||
} *bl;
|
||||
|
||||
const struct nvkm_ior_func_hdmi {
|
||||
void (*ctrl)(struct nvkm_ior *, int head, bool enable, u8 max_ac_packet, u8 rekey);
|
||||
void (*scdc)(struct nvkm_ior *, u8 scdc);
|
||||
void (*scdc)(struct nvkm_ior *, u32 khz, bool support, bool scrambling,
|
||||
bool scrambling_low_rates);
|
||||
void (*infoframe_avi)(struct nvkm_ior *, int head, void *data, u32 size);
|
||||
void (*infoframe_vsi)(struct nvkm_ior *, int head, void *data, u32 size);
|
||||
void (*audio)(struct nvkm_ior *, int head, bool enable);
|
||||
} *hdmi;
|
||||
|
||||
const struct nvkm_ior_func_dp {
|
||||
@ -77,6 +84,8 @@ struct nvkm_ior_func {
|
||||
void (*pattern)(struct nvkm_ior *, int pattern);
|
||||
void (*drive)(struct nvkm_ior *, int ln, int pc,
|
||||
int dc, int pe, int tx_pu);
|
||||
int (*sst)(struct nvkm_ior *, int head, bool ef,
|
||||
u32 watermark, u32 hblanksym, u32 vblanksym);
|
||||
void (*vcpi)(struct nvkm_ior *, int head, u8 slot,
|
||||
u8 slot_nr, u16 pbn, u16 aligned);
|
||||
void (*audio)(struct nvkm_ior *, int head, bool enable);
|
||||
@ -122,6 +131,7 @@ int nv50_sor_cnt(struct nvkm_disp *, unsigned long *);
|
||||
void nv50_sor_state(struct nvkm_ior *, struct nvkm_ior_state *);
|
||||
void nv50_sor_power(struct nvkm_ior *, bool, bool, bool, bool, bool);
|
||||
void nv50_sor_clock(struct nvkm_ior *);
|
||||
extern const struct nvkm_ior_func_bl nv50_sor_bl;
|
||||
|
||||
int g84_sor_new(struct nvkm_disp *, int);
|
||||
extern const struct nvkm_ior_func_hdmi g84_sor_hdmi;
|
||||
@ -138,6 +148,7 @@ void g94_sor_dp_audio_sym(struct nvkm_ior *, int, u16, u32);
|
||||
void g94_sor_dp_activesym(struct nvkm_ior *, int, u8, u8, u8, u8);
|
||||
void g94_sor_dp_watermark(struct nvkm_ior *, int, u8);
|
||||
|
||||
extern const struct nvkm_ior_func_bl gt215_sor_bl;
|
||||
extern const struct nvkm_ior_func_hdmi gt215_sor_hdmi;
|
||||
void gt215_sor_dp_audio(struct nvkm_ior *, int, bool);
|
||||
extern const struct nvkm_ior_func_hda gt215_sor_hda;
|
||||
@ -167,7 +178,7 @@ void gm107_sor_dp_pattern(struct nvkm_ior *, int);
|
||||
void gm200_sor_route_set(struct nvkm_outp *, struct nvkm_ior *);
|
||||
int gm200_sor_route_get(struct nvkm_outp *, int *);
|
||||
extern const struct nvkm_ior_func_hdmi gm200_sor_hdmi;
|
||||
void gm200_sor_hdmi_scdc(struct nvkm_ior *, u8);
|
||||
void gm200_sor_hdmi_scdc(struct nvkm_ior *, u32, bool, bool, bool);
|
||||
extern const struct nvkm_ior_func_dp gm200_sor_dp;
|
||||
void gm200_sor_dp_drive(struct nvkm_ior *, int, int, int, int, int);
|
||||
|
||||
|
@ -44,6 +44,7 @@ mcp89_sor = {
|
||||
.state = g94_sor_state,
|
||||
.power = nv50_sor_power,
|
||||
.clock = nv50_sor_clock,
|
||||
.bl = >215_sor_bl,
|
||||
.hdmi = >215_sor_hdmi,
|
||||
.dp = &mcp89_sor_dp,
|
||||
.hda = >215_sor_hda,
|
||||
|
@ -23,7 +23,9 @@
|
||||
*/
|
||||
#include "priv.h"
|
||||
#include "chan.h"
|
||||
#include "conn.h"
|
||||
#include "head.h"
|
||||
#include "dp.h"
|
||||
#include "ior.h"
|
||||
#include "outp.h"
|
||||
|
||||
@ -156,6 +158,37 @@ nv50_pior_cnt(struct nvkm_disp *disp, unsigned long *pmask)
|
||||
return 3;
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_sor_bl_set(struct nvkm_ior *ior, int lvl)
|
||||
{
|
||||
struct nvkm_device *device = ior->disp->engine.subdev.device;
|
||||
const u32 soff = nv50_ior_base(ior);
|
||||
u32 div = 1025;
|
||||
u32 val = (lvl * div) / 100;
|
||||
|
||||
nvkm_wr32(device, 0x61c084 + soff, 0x80000000 | val);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv50_sor_bl_get(struct nvkm_ior *ior)
|
||||
{
|
||||
struct nvkm_device *device = ior->disp->engine.subdev.device;
|
||||
const u32 soff = nv50_ior_base(ior);
|
||||
u32 div = 1025;
|
||||
u32 val;
|
||||
|
||||
val = nvkm_rd32(device, 0x61c084 + soff);
|
||||
val &= 0x000007ff;
|
||||
return ((val * 100) + (div / 2)) / div;
|
||||
}
|
||||
|
||||
const struct nvkm_ior_func_bl
|
||||
nv50_sor_bl = {
|
||||
.get = nv50_sor_bl_get,
|
||||
.set = nv50_sor_bl_set,
|
||||
};
|
||||
|
||||
void
|
||||
nv50_sor_clock(struct nvkm_ior *sor)
|
||||
{
|
||||
@ -220,6 +253,7 @@ nv50_sor = {
|
||||
.state = nv50_sor_state,
|
||||
.power = nv50_sor_power,
|
||||
.clock = nv50_sor_clock,
|
||||
.bl = &nv50_sor_bl,
|
||||
};
|
||||
|
||||
static int
|
||||
@ -1254,10 +1288,6 @@ nv50_disp_super_2_2(struct nvkm_disp *disp, struct nvkm_head *head)
|
||||
ior->asy.link = outp->lvds.dual ? 3 : 1;
|
||||
}
|
||||
|
||||
/* Handle any link training, etc. */
|
||||
if (outp && outp->func->acquire)
|
||||
outp->func->acquire(outp);
|
||||
|
||||
/* Execute OnInt2 IED script. */
|
||||
nv50_disp_super_ied_on(head, ior, 0, khz);
|
||||
|
||||
@ -1287,7 +1317,6 @@ nv50_disp_super_2_1(struct nvkm_disp *disp, struct nvkm_head *head)
|
||||
void
|
||||
nv50_disp_super_2_0(struct nvkm_disp *disp, struct nvkm_head *head)
|
||||
{
|
||||
struct nvkm_outp *outp;
|
||||
struct nvkm_ior *ior;
|
||||
|
||||
/* Determine which OR, if any, we're detaching from the head. */
|
||||
@ -1298,14 +1327,6 @@ nv50_disp_super_2_0(struct nvkm_disp *disp, struct nvkm_head *head)
|
||||
|
||||
/* Execute OffInt2 IED script. */
|
||||
nv50_disp_super_ied_off(head, ior, 2);
|
||||
|
||||
/* If we're shutting down the OR's only active head, execute
|
||||
* the output path's disable function.
|
||||
*/
|
||||
if (ior->arm.head == (1 << head->id)) {
|
||||
if ((outp = ior->arm.outp) && outp->func->disable)
|
||||
outp->func->disable(outp, ior);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
@ -1371,7 +1392,6 @@ nv50_disp_super(struct work_struct *work)
|
||||
continue;
|
||||
nv50_disp_super_2_0(disp, head);
|
||||
}
|
||||
nvkm_outp_route(disp);
|
||||
list_for_each_entry(head, &disp->heads, head) {
|
||||
if (!(super & (0x00000200 << head->id)))
|
||||
continue;
|
||||
@ -1563,7 +1583,15 @@ nv50_disp_oneinit(struct nvkm_disp *disp)
|
||||
const struct nvkm_disp_func *func = disp->func;
|
||||
struct nvkm_subdev *subdev = &disp->engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
struct nvkm_bios *bios = device->bios;
|
||||
struct nvkm_outp *outp, *outt, *pair;
|
||||
struct nvkm_conn *conn;
|
||||
struct nvkm_ior *ior;
|
||||
int ret, i;
|
||||
u8 ver, hdr;
|
||||
u32 data;
|
||||
struct dcb_output dcbE;
|
||||
struct nvbios_connE connE;
|
||||
|
||||
if (func->wndw.cnt) {
|
||||
disp->wndw.nr = func->wndw.cnt(disp, &disp->wndw.mask);
|
||||
@ -1610,8 +1638,130 @@ nv50_disp_oneinit(struct nvkm_disp *disp)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return nvkm_ramht_new(device, func->ramht_size ? func->ramht_size :
|
||||
0x1000, 0, disp->inst, &disp->ramht);
|
||||
ret = nvkm_ramht_new(device, func->ramht_size ? func->ramht_size : 0x1000, 0, disp->inst,
|
||||
&disp->ramht);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Create output path objects for each VBIOS display path. */
|
||||
i = -1;
|
||||
while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
|
||||
if (WARN_ON((ver & 0xf0) != 0x40))
|
||||
return -EINVAL;
|
||||
if (dcbE.type == DCB_OUTPUT_UNUSED)
|
||||
continue;
|
||||
if (dcbE.type == DCB_OUTPUT_EOL)
|
||||
break;
|
||||
outp = NULL;
|
||||
|
||||
switch (dcbE.type) {
|
||||
case DCB_OUTPUT_ANALOG:
|
||||
case DCB_OUTPUT_TMDS:
|
||||
case DCB_OUTPUT_LVDS:
|
||||
ret = nvkm_outp_new(disp, i, &dcbE, &outp);
|
||||
break;
|
||||
case DCB_OUTPUT_DP:
|
||||
ret = nvkm_dp_new(disp, i, &dcbE, &outp);
|
||||
break;
|
||||
case DCB_OUTPUT_TV:
|
||||
case DCB_OUTPUT_WFD:
|
||||
/* No support for WFD yet. */
|
||||
ret = -ENODEV;
|
||||
continue;
|
||||
default:
|
||||
nvkm_warn(subdev, "dcb %d type %d unknown\n",
|
||||
i, dcbE.type);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
if (outp) {
|
||||
if (ret != -ENODEV)
|
||||
OUTP_ERR(outp, "ctor failed: %d", ret);
|
||||
else
|
||||
OUTP_DBG(outp, "not supported");
|
||||
nvkm_outp_del(&outp);
|
||||
continue;
|
||||
}
|
||||
nvkm_error(subdev, "failed to create outp %d\n", i);
|
||||
continue;
|
||||
}
|
||||
|
||||
list_add_tail(&outp->head, &disp->outps);
|
||||
}
|
||||
|
||||
/* Create connector objects based on available output paths. */
|
||||
list_for_each_entry_safe(outp, outt, &disp->outps, head) {
|
||||
/* VBIOS data *should* give us the most useful information. */
|
||||
data = nvbios_connEp(bios, outp->info.connector, &ver, &hdr,
|
||||
&connE);
|
||||
|
||||
/* No bios connector data... */
|
||||
if (!data) {
|
||||
/* Heuristic: anything with the same ccb index is
|
||||
* considered to be on the same connector, any
|
||||
* output path without an associated ccb entry will
|
||||
* be put on its own connector.
|
||||
*/
|
||||
int ccb_index = outp->info.i2c_index;
|
||||
if (ccb_index != 0xf) {
|
||||
list_for_each_entry(pair, &disp->outps, head) {
|
||||
if (pair->info.i2c_index == ccb_index) {
|
||||
outp->conn = pair->conn;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Connector shared with another output path. */
|
||||
if (outp->conn)
|
||||
continue;
|
||||
|
||||
memset(&connE, 0x00, sizeof(connE));
|
||||
connE.type = DCB_CONNECTOR_NONE;
|
||||
i = -1;
|
||||
} else {
|
||||
i = outp->info.connector;
|
||||
}
|
||||
|
||||
/* Check that we haven't already created this connector. */
|
||||
list_for_each_entry(conn, &disp->conns, head) {
|
||||
if (conn->index == outp->info.connector) {
|
||||
outp->conn = conn;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (outp->conn)
|
||||
continue;
|
||||
|
||||
/* Apparently we need to create a new one! */
|
||||
ret = nvkm_conn_new(disp, i, &connE, &outp->conn);
|
||||
if (ret) {
|
||||
nvkm_error(subdev, "failed to create outp %d conn: %d\n", outp->index, ret);
|
||||
nvkm_conn_del(&outp->conn);
|
||||
list_del(&outp->head);
|
||||
nvkm_outp_del(&outp);
|
||||
continue;
|
||||
}
|
||||
|
||||
list_add_tail(&outp->conn->head, &disp->conns);
|
||||
}
|
||||
|
||||
/* Enforce identity-mapped SOR assignment for panels, which have
|
||||
* certain bits (ie. backlight controls) wired to a specific SOR.
|
||||
*/
|
||||
list_for_each_entry(outp, &disp->outps, head) {
|
||||
if (outp->conn->info.type == DCB_CONNECTOR_LVDS ||
|
||||
outp->conn->info.type == DCB_CONNECTOR_eDP) {
|
||||
ior = nvkm_ior_find(disp, SOR, ffs(outp->info.or) - 1);
|
||||
if (!WARN_ON(!ior))
|
||||
ior->identity = true;
|
||||
outp->identity = true;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct nvkm_disp_func
|
||||
|
@ -22,14 +22,16 @@
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include "outp.h"
|
||||
#include "conn.h"
|
||||
#include "dp.h"
|
||||
#include "ior.h"
|
||||
|
||||
#include <subdev/bios.h>
|
||||
#include <subdev/bios/dcb.h>
|
||||
#include <subdev/gpio.h>
|
||||
#include <subdev/i2c.h>
|
||||
|
||||
void
|
||||
static void
|
||||
nvkm_outp_route(struct nvkm_disp *disp)
|
||||
{
|
||||
struct nvkm_outp *outp;
|
||||
@ -46,8 +48,8 @@ nvkm_outp_route(struct nvkm_disp *disp)
|
||||
|
||||
list_for_each_entry(ior, &disp->iors, head) {
|
||||
if ((outp = ior->asy.outp)) {
|
||||
OUTP_DBG(outp, "acquire %s", ior->name);
|
||||
if (ior->asy.outp != ior->arm.outp) {
|
||||
OUTP_DBG(outp, "acquire %s", ior->name);
|
||||
if (ior->func->route.set)
|
||||
ior->func->route.set(outp, ior);
|
||||
ior->arm.outp = ior->asy.outp;
|
||||
@ -87,22 +89,20 @@ nvkm_outp_xlat(struct nvkm_outp *outp, enum nvkm_ior_type *type)
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_outp_release(struct nvkm_outp *outp, u8 user)
|
||||
nvkm_outp_release_or(struct nvkm_outp *outp, u8 user)
|
||||
{
|
||||
struct nvkm_ior *ior = outp->ior;
|
||||
OUTP_TRACE(outp, "release %02x &= %02x %p", outp->acquired, ~user, ior);
|
||||
if (ior) {
|
||||
outp->acquired &= ~user;
|
||||
if (!outp->acquired) {
|
||||
if (outp->func->release && outp->ior)
|
||||
outp->func->release(outp);
|
||||
outp->ior->asy.outp = NULL;
|
||||
outp->ior = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline int
|
||||
int
|
||||
nvkm_outp_acquire_ior(struct nvkm_outp *outp, u8 user, struct nvkm_ior *ior)
|
||||
{
|
||||
outp->ior = ior;
|
||||
@ -140,7 +140,7 @@ nvkm_outp_acquire_hda(struct nvkm_outp *outp, enum nvkm_ior_type type,
|
||||
}
|
||||
|
||||
int
|
||||
nvkm_outp_acquire(struct nvkm_outp *outp, u8 user, bool hda)
|
||||
nvkm_outp_acquire_or(struct nvkm_outp *outp, u8 user, bool hda)
|
||||
{
|
||||
struct nvkm_ior *ior = outp->ior;
|
||||
enum nvkm_ior_proto proto;
|
||||
@ -207,39 +207,110 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user, bool hda)
|
||||
return nvkm_outp_acquire_hda(outp, type, user, false);
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_outp_fini(struct nvkm_outp *outp)
|
||||
int
|
||||
nvkm_outp_bl_set(struct nvkm_outp *outp, int level)
|
||||
{
|
||||
if (outp->func->fini)
|
||||
outp->func->fini(outp);
|
||||
int ret;
|
||||
|
||||
ret = nvkm_outp_acquire_or(outp, NVKM_OUTP_PRIV, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (outp->ior->func->bl)
|
||||
ret = outp->ior->func->bl->set(outp->ior, level);
|
||||
else
|
||||
ret = -EINVAL;
|
||||
|
||||
nvkm_outp_release_or(outp, NVKM_OUTP_PRIV);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
nvkm_outp_init_route(struct nvkm_outp *outp)
|
||||
int
|
||||
nvkm_outp_bl_get(struct nvkm_outp *outp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = nvkm_outp_acquire_or(outp, NVKM_OUTP_PRIV, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (outp->ior->func->bl)
|
||||
ret = outp->ior->func->bl->get(outp->ior);
|
||||
else
|
||||
ret = -EINVAL;
|
||||
|
||||
nvkm_outp_release_or(outp, NVKM_OUTP_PRIV);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nvkm_outp_detect(struct nvkm_outp *outp)
|
||||
{
|
||||
struct nvkm_gpio *gpio = outp->disp->engine.subdev.device->gpio;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (outp->conn->info.hpd != DCB_GPIO_UNUSED) {
|
||||
ret = nvkm_gpio_get(gpio, 0, DCB_GPIO_UNUSED, outp->conn->info.hpd);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret)
|
||||
return 1;
|
||||
|
||||
/*TODO: Look into returning NOT_PRESENT if !HPD on DVI/HDMI.
|
||||
*
|
||||
* It's uncertain whether this is accurate for all older chipsets,
|
||||
* so we're returning UNKNOWN, and the DRM will probe DDC instead.
|
||||
*/
|
||||
if (outp->info.type == DCB_OUTPUT_DP)
|
||||
return 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_outp_release(struct nvkm_outp *outp)
|
||||
{
|
||||
nvkm_outp_release_or(outp, NVKM_OUTP_USER);
|
||||
nvkm_outp_route(outp->disp);
|
||||
}
|
||||
|
||||
int
|
||||
nvkm_outp_acquire(struct nvkm_outp *outp, bool hda)
|
||||
{
|
||||
int ret = nvkm_outp_acquire_or(outp, NVKM_OUTP_USER, hda);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_outp_route(outp->disp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct nvkm_ior *
|
||||
nvkm_outp_inherit(struct nvkm_outp *outp)
|
||||
{
|
||||
struct nvkm_disp *disp = outp->disp;
|
||||
struct nvkm_ior *ior;
|
||||
enum nvkm_ior_proto proto;
|
||||
enum nvkm_ior_type type;
|
||||
struct nvkm_ior *ior;
|
||||
int id, link;
|
||||
|
||||
/* Find any OR from the class that is able to support this device. */
|
||||
proto = nvkm_outp_xlat(outp, &type);
|
||||
if (proto == UNKNOWN)
|
||||
return;
|
||||
return NULL;
|
||||
|
||||
ior = nvkm_ior_find(disp, type, -1);
|
||||
if (!ior) {
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
if (WARN_ON(!ior))
|
||||
return NULL;
|
||||
|
||||
/* Determine the specific OR, if any, this device is attached to. */
|
||||
if (ior->func->route.get) {
|
||||
id = ior->func->route.get(outp, &link);
|
||||
if (id < 0) {
|
||||
OUTP_DBG(outp, "no route");
|
||||
return;
|
||||
return NULL;
|
||||
}
|
||||
} else {
|
||||
/* Prior to DCB 4.1, this is hardwired like so. */
|
||||
@ -248,10 +319,24 @@ nvkm_outp_init_route(struct nvkm_outp *outp)
|
||||
}
|
||||
|
||||
ior = nvkm_ior_find(disp, type, id);
|
||||
if (!ior) {
|
||||
WARN_ON(1);
|
||||
if (WARN_ON(!ior))
|
||||
return NULL;
|
||||
|
||||
return ior;
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_outp_init(struct nvkm_outp *outp)
|
||||
{
|
||||
enum nvkm_ior_proto proto;
|
||||
enum nvkm_ior_type type;
|
||||
struct nvkm_ior *ior;
|
||||
|
||||
/* Find any OR from the class that is able to support this device. */
|
||||
proto = nvkm_outp_xlat(outp, &type);
|
||||
ior = outp->func->inherit(outp);
|
||||
if (!ior)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Determine if the OR is already configured for this device. */
|
||||
ior->func->state(ior, &ior->arm);
|
||||
@ -273,14 +358,6 @@ nvkm_outp_init_route(struct nvkm_outp *outp)
|
||||
ior->arm.outp = outp;
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_outp_init(struct nvkm_outp *outp)
|
||||
{
|
||||
nvkm_outp_init_route(outp);
|
||||
if (outp->func->init)
|
||||
outp->func->init(outp);
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_outp_del(struct nvkm_outp **poutp)
|
||||
{
|
||||
@ -328,6 +405,13 @@ nvkm_outp_new_(const struct nvkm_outp_func *func, struct nvkm_disp *disp,
|
||||
|
||||
static const struct nvkm_outp_func
|
||||
nvkm_outp = {
|
||||
.init = nvkm_outp_init,
|
||||
.detect = nvkm_outp_detect,
|
||||
.inherit = nvkm_outp_inherit,
|
||||
.acquire = nvkm_outp_acquire,
|
||||
.release = nvkm_outp_release,
|
||||
.bl.get = nvkm_outp_bl_get,
|
||||
.bl.set = nvkm_outp_bl_set,
|
||||
};
|
||||
|
||||
int
|
||||
|
@ -35,6 +35,8 @@ struct nvkm_outp {
|
||||
struct {
|
||||
struct nvbios_dpout info;
|
||||
u8 version;
|
||||
bool mst;
|
||||
bool increased_wm;
|
||||
|
||||
struct nvkm_i2c_aux *aux;
|
||||
|
||||
@ -50,14 +52,13 @@ struct nvkm_outp {
|
||||
u32 rate;
|
||||
} rate[8];
|
||||
int rates;
|
||||
int links;
|
||||
|
||||
struct mutex mutex;
|
||||
struct {
|
||||
atomic_t done;
|
||||
u8 nr;
|
||||
u8 bw;
|
||||
bool mst;
|
||||
bool post_adj;
|
||||
} lt;
|
||||
} dp;
|
||||
};
|
||||
@ -74,17 +75,45 @@ int nvkm_outp_new(struct nvkm_disp *, int index, struct dcb_output *, struct nvk
|
||||
void nvkm_outp_del(struct nvkm_outp **);
|
||||
void nvkm_outp_init(struct nvkm_outp *);
|
||||
void nvkm_outp_fini(struct nvkm_outp *);
|
||||
int nvkm_outp_acquire(struct nvkm_outp *, u8 user, bool hda);
|
||||
void nvkm_outp_release(struct nvkm_outp *, u8 user);
|
||||
void nvkm_outp_route(struct nvkm_disp *);
|
||||
|
||||
int nvkm_outp_detect(struct nvkm_outp *);
|
||||
|
||||
struct nvkm_ior *nvkm_outp_inherit(struct nvkm_outp *);
|
||||
int nvkm_outp_acquire(struct nvkm_outp *, bool hda);
|
||||
int nvkm_outp_acquire_or(struct nvkm_outp *, u8 user, bool hda);
|
||||
int nvkm_outp_acquire_ior(struct nvkm_outp *, u8 user, struct nvkm_ior *);
|
||||
void nvkm_outp_release(struct nvkm_outp *);
|
||||
void nvkm_outp_release_or(struct nvkm_outp *, u8 user);
|
||||
|
||||
int nvkm_outp_bl_get(struct nvkm_outp *);
|
||||
int nvkm_outp_bl_set(struct nvkm_outp *, int level);
|
||||
|
||||
struct nvkm_outp_func {
|
||||
void *(*dtor)(struct nvkm_outp *);
|
||||
void (*init)(struct nvkm_outp *);
|
||||
void (*fini)(struct nvkm_outp *);
|
||||
int (*acquire)(struct nvkm_outp *);
|
||||
|
||||
int (*detect)(struct nvkm_outp *);
|
||||
int (*edid_get)(struct nvkm_outp *, u8 *data, u16 *size);
|
||||
|
||||
struct nvkm_ior *(*inherit)(struct nvkm_outp *);
|
||||
int (*acquire)(struct nvkm_outp *, bool hda);
|
||||
void (*release)(struct nvkm_outp *);
|
||||
void (*disable)(struct nvkm_outp *, struct nvkm_ior *);
|
||||
|
||||
struct {
|
||||
int (*get)(struct nvkm_outp *);
|
||||
int (*set)(struct nvkm_outp *, int level);
|
||||
} bl;
|
||||
|
||||
struct {
|
||||
int (*aux_pwr)(struct nvkm_outp *, bool pu);
|
||||
int (*aux_xfer)(struct nvkm_outp *, u8 type, u32 addr, u8 *data, u8 *size);
|
||||
int (*rates)(struct nvkm_outp *);
|
||||
int (*train)(struct nvkm_outp *, bool retrain);
|
||||
int (*drive)(struct nvkm_outp *, u8 lanes, u8 pe[4], u8 vs[4]);
|
||||
int (*mst_id_get)(struct nvkm_outp *, u32 *id);
|
||||
int (*mst_id_put)(struct nvkm_outp *, u32 id);
|
||||
} dp;
|
||||
};
|
||||
|
||||
#define OUTP_MSG(o,l,f,a...) do { \
|
||||
|
@ -88,6 +88,7 @@ tu102_sor = {
|
||||
.state = gv100_sor_state,
|
||||
.power = nv50_sor_power,
|
||||
.clock = gf119_sor_clock,
|
||||
.bl = >215_sor_bl,
|
||||
.hdmi = &gv100_sor_hdmi,
|
||||
.dp = &tu102_sor_dp,
|
||||
.hda = &gv100_sor_hda,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user