Merge branch 'etnaviv/next' of https://git.pengutronix.de/git/lst/linux into drm-next

- make etnaviv work on IOMMU enabled systems
- fix mapping of command buffers on systems with more than 4GB RAM
- close a DoS vector
- fix spurious GPU resets

Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Lucas Stach <l.stach@pengutronix.de>
Link: https://patchwork.freedesktop.org/patch/msgid/59619f8e9eb1d7ed7ea72cbead1f0aabc49f4e68.camel@pengutronix.de
This commit is contained in:
Dave Airlie 2021-12-24 06:01:39 +10:00
commit 78942ae41d
5 changed files with 42 additions and 12 deletions

View File

@ -589,6 +589,7 @@ static int compare_str(struct device *dev, void *data)
static int etnaviv_pdev_probe(struct platform_device *pdev) static int etnaviv_pdev_probe(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct device_node *first_node = NULL;
struct component_match *match = NULL; struct component_match *match = NULL;
if (!dev->platform_data) { if (!dev->platform_data) {
@ -598,6 +599,9 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
if (!of_device_is_available(core_node)) if (!of_device_is_available(core_node))
continue; continue;
if (!first_node)
first_node = core_node;
drm_of_component_match_add(&pdev->dev, &match, drm_of_component_match_add(&pdev->dev, &match,
compare_of, core_node); compare_of, core_node);
} }
@ -609,6 +613,32 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
component_match_add(dev, &match, compare_str, names[i]); component_match_add(dev, &match, compare_str, names[i]);
} }
/*
* PTA and MTLB can have 40 bit base addresses, but
* unfortunately, an entry in the MTLB can only point to a
* 32 bit base address of a STLB. Moreover, to initialize the
* MMU we need a command buffer with a 32 bit address because
* without an MMU there is only an indentity mapping between
* the internal 32 bit addresses and the bus addresses.
*
* To make things easy, we set the dma_coherent_mask to 32
* bit to make sure we are allocating the command buffers and
* TLBs in the lower 4 GiB address space.
*/
if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)) ||
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32))) {
dev_dbg(&pdev->dev, "No suitable DMA available\n");
return -ENODEV;
}
/*
* Apply the same DMA configuration to the virtual etnaviv
* device as the GPU we found. This assumes that all Vivante
* GPUs in the system share the same DMA constraints.
*/
if (first_node)
of_dma_configure(&pdev->dev, first_node, true);
return component_master_add_with_match(dev, &etnaviv_master_ops, match); return component_master_add_with_match(dev, &etnaviv_master_ops, match);
} }
@ -653,21 +683,12 @@ static int __init etnaviv_init(void)
if (!of_device_is_available(np)) if (!of_device_is_available(np))
continue; continue;
pdev = platform_device_alloc("etnaviv", -1); pdev = platform_device_alloc("etnaviv", PLATFORM_DEVID_NONE);
if (!pdev) { if (!pdev) {
ret = -ENOMEM; ret = -ENOMEM;
of_node_put(np); of_node_put(np);
goto unregister_platform_driver; goto unregister_platform_driver;
} }
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
/*
* Apply the same DMA configuration to the virtual etnaviv
* device as the GPU we found. This assumes that all Vivante
* GPUs in the system share the same DMA constraints.
*/
of_dma_configure(&pdev->dev, np, true);
ret = platform_device_add(pdev); ret = platform_device_add(pdev);
if (ret) { if (ret) {

View File

@ -469,6 +469,12 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
if (args->stream_size > SZ_64K || args->nr_relocs > SZ_64K ||
args->nr_bos > SZ_64K || args->nr_pmrs > 128) {
DRM_ERROR("submit arguments out of size limits\n");
return -EINVAL;
}
/* /*
* Copy the command submission and bo array to kernel space in * Copy the command submission and bo array to kernel space in
* one go, and do this outside of any locks. * one go, and do this outside of any locks.

View File

@ -1658,7 +1658,7 @@ etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
return 0; return 0;
} }
static struct thermal_cooling_device_ops cooling_ops = { static const struct thermal_cooling_device_ops cooling_ops = {
.get_max_state = etnaviv_gpu_cooling_get_max_state, .get_max_state = etnaviv_gpu_cooling_get_max_state,
.get_cur_state = etnaviv_gpu_cooling_get_cur_state, .get_cur_state = etnaviv_gpu_cooling_get_cur_state,
.set_cur_state = etnaviv_gpu_cooling_set_cur_state, .set_cur_state = etnaviv_gpu_cooling_set_cur_state,

View File

@ -130,6 +130,7 @@ struct etnaviv_gpu {
/* hang detection */ /* hang detection */
u32 hangcheck_dma_addr; u32 hangcheck_dma_addr;
u32 hangcheck_fence;
void __iomem *mmio; void __iomem *mmio;
int irq; int irq;

View File

@ -107,8 +107,10 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
*/ */
dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
change = dma_addr - gpu->hangcheck_dma_addr; change = dma_addr - gpu->hangcheck_dma_addr;
if (change < 0 || change > 16) { if (gpu->completed_fence != gpu->hangcheck_fence ||
change < 0 || change > 16) {
gpu->hangcheck_dma_addr = dma_addr; gpu->hangcheck_dma_addr = dma_addr;
gpu->hangcheck_fence = gpu->completed_fence;
goto out_no_timeout; goto out_no_timeout;
} }