dmaengine fixes for v5.9-rc4
Core: - drop ACPI CSRT table reference after using it - fix of_dma_router_xlate() error handling Drivers: Off fixes in: - idxd - at_hdmac - pl330 - dw-edma - jz478 -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAl9SFUQACgkQfBQHDyUj g0cSKxAApV4hJzU1hK1vZR3SeAN1T5n7Z6OowMR/gBqt6wjffJvh8FMUf3onOEDq jjLDA106JAXKGnIvd0ZJ0B7U0F6sfC9uKjz8LTxoF+XK89HxTQL5REFpR7BG2oFs sqoxrhgYxOqQ5czo0xSXk/psJmtJF8/KAfrXXxixe1CjIX6FeVP765w7DYl1CPT+ VQt0LGrtHCMwpSJJW3PJFCfwj6/LWhyBIZSEoE9c+U8LqdHt0T6NKvPtj7Q7XAnx 8L5YL+H68ZPmplZqUAtp9E2Ob5QxYNCxnbBrAk2izkALNpaN1WEJt/MVjNVXtn8l xKivCRxjti41piyubEAvaqUnEmbf1INE/t64N8+iKcGfZqcLWPsNTYJI0eGNBjna 3uh4NxBNMOMjWU4lCp/1P5cnplB6sTc0Svepoft6ohCLiTkBR7P6HJ4R3HpalJTu O5WpFoDQAqnnV9DAyR5XyJe65tioEqkttYiDPWSlgevJBLC5PA0kB9Ug7Fvi6E6J OcPFTYSh0FdAf4Nq49FfV/4IqXLqL1jz9z5uehZL7g22y9h9FZ9Mxw0y+30c6VEA akFWnMkTElOAU4F8MJ95+c0n8gXNPUhyPk3eIE2mq9hWFZiVWL+0SKyRZGWO2nWN BB//CG8tMP6ryy/W/RuLe8pHuRiy/ZilqdNJ8eFSIv0CyOshTJc= =DUvT -----END PGP SIGNATURE----- Merge tag 'dmaengine-fix-5.9-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine Pull dmaengine fixes from Vinod Koul: "A couple of core fixes and odd driver fixes for dmaengine subsystem: Core: - drop ACPI CSRT table reference after using it - fix of_dma_router_xlate() error handling Drivers fixes in idxd, at_hdmac, pl330, dw-edma and jz478" * tag 'dmaengine-fix-5.9-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: dmaengine: ti: k3-udma: Update rchan_oes_offset for am654 SYSFW ABI 3.0 drivers/dma/dma-jz4780: Fix race condition between probe and irq handler dmaengine: dw-edma: Fix scatter-gather address calculation dmaengine: ti: k3-udma: Fix the TR initialization for prep_slave_sg dmaengine: pl330: Fix burst length if burst size is smaller than bus width dmaengine: at_hdmac: add missing kfree() call in at_dma_xlate() dmaengine: at_hdmac: add missing put_device() call in at_dma_xlate() dmaengine: at_hdmac: check return value of of_find_device_by_node() in at_dma_xlate() dmaengine: of-dma: Fix of_dma_router_xlate's of_dma_xlate handling dmaengine: idxd: reset states after device disable or reset dmaengine: acpi: Put the CSRT table after using it
This commit is contained in:
commit
e2dacf6cd1
@ -135,11 +135,13 @@ static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
|
|||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
dev_warn(&adev->dev,
|
dev_warn(&adev->dev,
|
||||||
"error in parsing resource group\n");
|
"error in parsing resource group\n");
|
||||||
return;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
grp = (struct acpi_csrt_group *)((void *)grp + grp->length);
|
grp = (struct acpi_csrt_group *)((void *)grp + grp->length);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
acpi_put_table((struct acpi_table_header *)csrt);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1650,13 +1650,17 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
|
|||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
dmac_pdev = of_find_device_by_node(dma_spec->np);
|
dmac_pdev = of_find_device_by_node(dma_spec->np);
|
||||||
|
if (!dmac_pdev)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
dma_cap_zero(mask);
|
dma_cap_zero(mask);
|
||||||
dma_cap_set(DMA_SLAVE, mask);
|
dma_cap_set(DMA_SLAVE, mask);
|
||||||
|
|
||||||
atslave = kmalloc(sizeof(*atslave), GFP_KERNEL);
|
atslave = kmalloc(sizeof(*atslave), GFP_KERNEL);
|
||||||
if (!atslave)
|
if (!atslave) {
|
||||||
|
put_device(&dmac_pdev->dev);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
|
atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
|
||||||
/*
|
/*
|
||||||
@ -1685,8 +1689,11 @@ static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
|
|||||||
atslave->dma_dev = &dmac_pdev->dev;
|
atslave->dma_dev = &dmac_pdev->dev;
|
||||||
|
|
||||||
chan = dma_request_channel(mask, at_dma_filter, atslave);
|
chan = dma_request_channel(mask, at_dma_filter, atslave);
|
||||||
if (!chan)
|
if (!chan) {
|
||||||
|
put_device(&dmac_pdev->dev);
|
||||||
|
kfree(atslave);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
atchan = to_at_dma_chan(chan);
|
atchan = to_at_dma_chan(chan);
|
||||||
atchan->per_if = dma_spec->args[0] & 0xff;
|
atchan->per_if = dma_spec->args[0] & 0xff;
|
||||||
|
@ -879,24 +879,11 @@ static int jz4780_dma_probe(struct platform_device *pdev)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = platform_get_irq(pdev, 0);
|
|
||||||
if (ret < 0)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
jzdma->irq = ret;
|
|
||||||
|
|
||||||
ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev),
|
|
||||||
jzdma);
|
|
||||||
if (ret) {
|
|
||||||
dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
jzdma->clk = devm_clk_get(dev, NULL);
|
jzdma->clk = devm_clk_get(dev, NULL);
|
||||||
if (IS_ERR(jzdma->clk)) {
|
if (IS_ERR(jzdma->clk)) {
|
||||||
dev_err(dev, "failed to get clock\n");
|
dev_err(dev, "failed to get clock\n");
|
||||||
ret = PTR_ERR(jzdma->clk);
|
ret = PTR_ERR(jzdma->clk);
|
||||||
goto err_free_irq;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
clk_prepare_enable(jzdma->clk);
|
clk_prepare_enable(jzdma->clk);
|
||||||
@ -949,10 +936,23 @@ static int jz4780_dma_probe(struct platform_device *pdev)
|
|||||||
jzchan->vchan.desc_free = jz4780_dma_desc_free;
|
jzchan->vchan.desc_free = jz4780_dma_desc_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = platform_get_irq(pdev, 0);
|
||||||
|
if (ret < 0)
|
||||||
|
goto err_disable_clk;
|
||||||
|
|
||||||
|
jzdma->irq = ret;
|
||||||
|
|
||||||
|
ret = request_irq(jzdma->irq, jz4780_dma_irq_handler, 0, dev_name(dev),
|
||||||
|
jzdma);
|
||||||
|
if (ret) {
|
||||||
|
dev_err(dev, "failed to request IRQ %u!\n", jzdma->irq);
|
||||||
|
goto err_disable_clk;
|
||||||
|
}
|
||||||
|
|
||||||
ret = dmaenginem_async_device_register(dd);
|
ret = dmaenginem_async_device_register(dd);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(dev, "failed to register device\n");
|
dev_err(dev, "failed to register device\n");
|
||||||
goto err_disable_clk;
|
goto err_free_irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Register with OF DMA helpers. */
|
/* Register with OF DMA helpers. */
|
||||||
@ -960,17 +960,17 @@ static int jz4780_dma_probe(struct platform_device *pdev)
|
|||||||
jzdma);
|
jzdma);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(dev, "failed to register OF DMA controller\n");
|
dev_err(dev, "failed to register OF DMA controller\n");
|
||||||
goto err_disable_clk;
|
goto err_free_irq;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev_info(dev, "JZ4780 DMA controller initialised\n");
|
dev_info(dev, "JZ4780 DMA controller initialised\n");
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_disable_clk:
|
|
||||||
clk_disable_unprepare(jzdma->clk);
|
|
||||||
|
|
||||||
err_free_irq:
|
err_free_irq:
|
||||||
free_irq(jzdma->irq, jzdma);
|
free_irq(jzdma->irq, jzdma);
|
||||||
|
|
||||||
|
err_disable_clk:
|
||||||
|
clk_disable_unprepare(jzdma->clk);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -405,7 +405,7 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
|
|||||||
if (xfer->cyclic) {
|
if (xfer->cyclic) {
|
||||||
burst->dar = xfer->xfer.cyclic.paddr;
|
burst->dar = xfer->xfer.cyclic.paddr;
|
||||||
} else {
|
} else {
|
||||||
burst->dar = sg_dma_address(sg);
|
burst->dar = dst_addr;
|
||||||
/* Unlike the typical assumption by other
|
/* Unlike the typical assumption by other
|
||||||
* drivers/IPs the peripheral memory isn't
|
* drivers/IPs the peripheral memory isn't
|
||||||
* a FIFO memory, in this case, it's a
|
* a FIFO memory, in this case, it's a
|
||||||
@ -413,14 +413,13 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
|
|||||||
* and destination addresses are increased
|
* and destination addresses are increased
|
||||||
* by the same portion (data length)
|
* by the same portion (data length)
|
||||||
*/
|
*/
|
||||||
src_addr += sg_dma_len(sg);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
burst->dar = dst_addr;
|
burst->dar = dst_addr;
|
||||||
if (xfer->cyclic) {
|
if (xfer->cyclic) {
|
||||||
burst->sar = xfer->xfer.cyclic.paddr;
|
burst->sar = xfer->xfer.cyclic.paddr;
|
||||||
} else {
|
} else {
|
||||||
burst->sar = sg_dma_address(sg);
|
burst->sar = src_addr;
|
||||||
/* Unlike the typical assumption by other
|
/* Unlike the typical assumption by other
|
||||||
* drivers/IPs the peripheral memory isn't
|
* drivers/IPs the peripheral memory isn't
|
||||||
* a FIFO memory, in this case, it's a
|
* a FIFO memory, in this case, it's a
|
||||||
@ -428,12 +427,14 @@ dw_edma_device_transfer(struct dw_edma_transfer *xfer)
|
|||||||
* and destination addresses are increased
|
* and destination addresses are increased
|
||||||
* by the same portion (data length)
|
* by the same portion (data length)
|
||||||
*/
|
*/
|
||||||
dst_addr += sg_dma_len(sg);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!xfer->cyclic)
|
if (!xfer->cyclic) {
|
||||||
|
src_addr += sg_dma_len(sg);
|
||||||
|
dst_addr += sg_dma_len(sg);
|
||||||
sg = sg_next(sg);
|
sg = sg_next(sg);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags);
|
return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags);
|
||||||
|
@ -410,10 +410,27 @@ int idxd_device_enable(struct idxd_device *idxd)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void idxd_device_wqs_clear_state(struct idxd_device *idxd)
|
||||||
|
{
|
||||||
|
int i;
|
||||||
|
|
||||||
|
lockdep_assert_held(&idxd->dev_lock);
|
||||||
|
|
||||||
|
for (i = 0; i < idxd->max_wqs; i++) {
|
||||||
|
struct idxd_wq *wq = &idxd->wqs[i];
|
||||||
|
|
||||||
|
if (wq->state == IDXD_WQ_ENABLED) {
|
||||||
|
idxd_wq_disable_cleanup(wq);
|
||||||
|
wq->state = IDXD_WQ_DISABLED;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int idxd_device_disable(struct idxd_device *idxd)
|
int idxd_device_disable(struct idxd_device *idxd)
|
||||||
{
|
{
|
||||||
struct device *dev = &idxd->pdev->dev;
|
struct device *dev = &idxd->pdev->dev;
|
||||||
u32 status;
|
u32 status;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
if (!idxd_is_enabled(idxd)) {
|
if (!idxd_is_enabled(idxd)) {
|
||||||
dev_dbg(dev, "Device is not enabled\n");
|
dev_dbg(dev, "Device is not enabled\n");
|
||||||
@ -429,13 +446,22 @@ int idxd_device_disable(struct idxd_device *idxd)
|
|||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock_irqsave(&idxd->dev_lock, flags);
|
||||||
|
idxd_device_wqs_clear_state(idxd);
|
||||||
idxd->state = IDXD_DEV_CONF_READY;
|
idxd->state = IDXD_DEV_CONF_READY;
|
||||||
|
spin_unlock_irqrestore(&idxd->dev_lock, flags);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void idxd_device_reset(struct idxd_device *idxd)
|
void idxd_device_reset(struct idxd_device *idxd)
|
||||||
{
|
{
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
|
idxd_cmd_exec(idxd, IDXD_CMD_RESET_DEVICE, 0, NULL);
|
||||||
|
spin_lock_irqsave(&idxd->dev_lock, flags);
|
||||||
|
idxd_device_wqs_clear_state(idxd);
|
||||||
|
idxd->state = IDXD_DEV_CONF_READY;
|
||||||
|
spin_unlock_irqrestore(&idxd->dev_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Device configuration bits */
|
/* Device configuration bits */
|
||||||
|
@ -11,18 +11,6 @@
|
|||||||
#include "idxd.h"
|
#include "idxd.h"
|
||||||
#include "registers.h"
|
#include "registers.h"
|
||||||
|
|
||||||
void idxd_device_wqs_clear_state(struct idxd_device *idxd)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
lockdep_assert_held(&idxd->dev_lock);
|
|
||||||
for (i = 0; i < idxd->max_wqs; i++) {
|
|
||||||
struct idxd_wq *wq = &idxd->wqs[i];
|
|
||||||
|
|
||||||
wq->state = IDXD_WQ_DISABLED;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void idxd_device_reinit(struct work_struct *work)
|
static void idxd_device_reinit(struct work_struct *work)
|
||||||
{
|
{
|
||||||
struct idxd_device *idxd = container_of(work, struct idxd_device, work);
|
struct idxd_device *idxd = container_of(work, struct idxd_device, work);
|
||||||
|
@ -71,12 +71,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
|
|||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target);
|
chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target);
|
||||||
if (chan) {
|
if (IS_ERR_OR_NULL(chan)) {
|
||||||
chan->router = ofdma->dma_router;
|
|
||||||
chan->route_data = route_data;
|
|
||||||
} else {
|
|
||||||
ofdma->dma_router->route_free(ofdma->dma_router->dev,
|
ofdma->dma_router->route_free(ofdma->dma_router->dev,
|
||||||
route_data);
|
route_data);
|
||||||
|
} else {
|
||||||
|
chan->router = ofdma->dma_router;
|
||||||
|
chan->route_data = route_data;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2797,6 +2797,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
|
|||||||
while (burst != (1 << desc->rqcfg.brst_size))
|
while (burst != (1 << desc->rqcfg.brst_size))
|
||||||
desc->rqcfg.brst_size++;
|
desc->rqcfg.brst_size++;
|
||||||
|
|
||||||
|
desc->rqcfg.brst_len = get_burst_len(desc, len);
|
||||||
/*
|
/*
|
||||||
* If burst size is smaller than bus width then make sure we only
|
* If burst size is smaller than bus width then make sure we only
|
||||||
* transfer one at a time to avoid a burst stradling an MFIFO entry.
|
* transfer one at a time to avoid a burst stradling an MFIFO entry.
|
||||||
@ -2804,7 +2805,6 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
|
|||||||
if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width)
|
if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width)
|
||||||
desc->rqcfg.brst_len = 1;
|
desc->rqcfg.brst_len = 1;
|
||||||
|
|
||||||
desc->rqcfg.brst_len = get_burst_len(desc, len);
|
|
||||||
desc->bytes_requested = len;
|
desc->bytes_requested = len;
|
||||||
|
|
||||||
desc->txd.flags = flags;
|
desc->txd.flags = flags;
|
||||||
|
@ -2059,9 +2059,9 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
|
cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
|
||||||
CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
|
false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
|
||||||
cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT);
|
cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
|
||||||
|
|
||||||
tr_req[tr_idx].addr = sg_addr;
|
tr_req[tr_idx].addr = sg_addr;
|
||||||
tr_req[tr_idx].icnt0 = tr0_cnt0;
|
tr_req[tr_idx].icnt0 = tr0_cnt0;
|
||||||
@ -3101,14 +3101,14 @@ static struct udma_match_data am654_main_data = {
|
|||||||
.psil_base = 0x1000,
|
.psil_base = 0x1000,
|
||||||
.enable_memcpy_support = true,
|
.enable_memcpy_support = true,
|
||||||
.statictr_z_mask = GENMASK(11, 0),
|
.statictr_z_mask = GENMASK(11, 0),
|
||||||
.rchan_oes_offset = 0x2000,
|
.rchan_oes_offset = 0x200,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct udma_match_data am654_mcu_data = {
|
static struct udma_match_data am654_mcu_data = {
|
||||||
.psil_base = 0x6000,
|
.psil_base = 0x6000,
|
||||||
.enable_memcpy_support = false,
|
.enable_memcpy_support = false,
|
||||||
.statictr_z_mask = GENMASK(11, 0),
|
.statictr_z_mask = GENMASK(11, 0),
|
||||||
.rchan_oes_offset = 0x2000,
|
.rchan_oes_offset = 0x200,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct udma_match_data j721e_main_data = {
|
static struct udma_match_data j721e_main_data = {
|
||||||
|
Loading…
Reference in New Issue
Block a user