mirror of
https://github.com/torvalds/linux.git
synced 2024-11-28 07:01:32 +00:00
spi: spi-geni-qcom: Add support for SE DMA mode
SE DMA mode can be used for larger transfers and FIFO mode for smaller transfers. Signed-off-by: Vijaya Krishna Nivarthi <quic_vnivarth@quicinc.com> Reviewed-by: Douglas Anderson <dianders@chromium.org> Link: https://lore.kernel.org/r/1670509544-15977-1-git-send-email-quic_vnivarth@quicinc.com Signed-off-by: Mark Brown <broonie@kernel.org>
This commit is contained in:
parent
1b929c02af
commit
e5f0dfa78a
@ -87,6 +87,8 @@ struct spi_geni_master {
|
||||
struct completion cs_done;
|
||||
struct completion cancel_done;
|
||||
struct completion abort_done;
|
||||
struct completion tx_reset_done;
|
||||
struct completion rx_reset_done;
|
||||
unsigned int oversampling;
|
||||
spinlock_t lock;
|
||||
int irq;
|
||||
@ -95,6 +97,8 @@ struct spi_geni_master {
|
||||
struct dma_chan *tx;
|
||||
struct dma_chan *rx;
|
||||
int cur_xfer_mode;
|
||||
dma_addr_t tx_se_dma;
|
||||
dma_addr_t rx_se_dma;
|
||||
};
|
||||
|
||||
static int get_spi_clk_cfg(unsigned int speed_hz,
|
||||
@ -129,23 +133,27 @@ static int get_spi_clk_cfg(unsigned int speed_hz,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void handle_fifo_timeout(struct spi_master *spi,
|
||||
static void handle_se_timeout(struct spi_master *spi,
|
||||
struct spi_message *msg)
|
||||
{
|
||||
struct spi_geni_master *mas = spi_master_get_devdata(spi);
|
||||
unsigned long time_left;
|
||||
struct geni_se *se = &mas->se;
|
||||
const struct spi_transfer *xfer;
|
||||
|
||||
spin_lock_irq(&mas->lock);
|
||||
reinit_completion(&mas->cancel_done);
|
||||
writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
|
||||
if (mas->cur_xfer_mode == GENI_SE_FIFO)
|
||||
writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
|
||||
|
||||
xfer = mas->cur_xfer;
|
||||
mas->cur_xfer = NULL;
|
||||
geni_se_cancel_m_cmd(se);
|
||||
spin_unlock_irq(&mas->lock);
|
||||
|
||||
time_left = wait_for_completion_timeout(&mas->cancel_done, HZ);
|
||||
if (time_left)
|
||||
return;
|
||||
goto unmap_if_dma;
|
||||
|
||||
spin_lock_irq(&mas->lock);
|
||||
reinit_completion(&mas->abort_done);
|
||||
@ -162,6 +170,39 @@ static void handle_fifo_timeout(struct spi_master *spi,
|
||||
*/
|
||||
mas->abort_failed = true;
|
||||
}
|
||||
|
||||
unmap_if_dma:
|
||||
if (mas->cur_xfer_mode == GENI_SE_DMA) {
|
||||
if (xfer) {
|
||||
if (xfer->tx_buf && mas->tx_se_dma) {
|
||||
spin_lock_irq(&mas->lock);
|
||||
reinit_completion(&mas->tx_reset_done);
|
||||
writel(1, se->base + SE_DMA_TX_FSM_RST);
|
||||
spin_unlock_irq(&mas->lock);
|
||||
time_left = wait_for_completion_timeout(&mas->tx_reset_done, HZ);
|
||||
if (!time_left)
|
||||
dev_err(mas->dev, "DMA TX RESET failed\n");
|
||||
geni_se_tx_dma_unprep(se, mas->tx_se_dma, xfer->len);
|
||||
}
|
||||
if (xfer->rx_buf && mas->rx_se_dma) {
|
||||
spin_lock_irq(&mas->lock);
|
||||
reinit_completion(&mas->rx_reset_done);
|
||||
writel(1, se->base + SE_DMA_RX_FSM_RST);
|
||||
spin_unlock_irq(&mas->lock);
|
||||
time_left = wait_for_completion_timeout(&mas->rx_reset_done, HZ);
|
||||
if (!time_left)
|
||||
dev_err(mas->dev, "DMA RX RESET failed\n");
|
||||
geni_se_rx_dma_unprep(se, mas->rx_se_dma, xfer->len);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* This can happen if a timeout happened and we had to wait
|
||||
* for lock in this function because isr was holding the lock
|
||||
* and handling transfer completion at that time.
|
||||
*/
|
||||
dev_warn(mas->dev, "Cancel/Abort on completed SPI transfer\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void handle_gpi_timeout(struct spi_master *spi, struct spi_message *msg)
|
||||
@ -178,7 +219,8 @@ static void spi_geni_handle_err(struct spi_master *spi, struct spi_message *msg)
|
||||
|
||||
switch (mas->cur_xfer_mode) {
|
||||
case GENI_SE_FIFO:
|
||||
handle_fifo_timeout(spi, msg);
|
||||
case GENI_SE_DMA:
|
||||
handle_se_timeout(spi, msg);
|
||||
break;
|
||||
case GENI_GPI_DMA:
|
||||
handle_gpi_timeout(spi, msg);
|
||||
@ -250,6 +292,8 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
|
||||
}
|
||||
|
||||
mas->cs_flag = set_flag;
|
||||
/* set xfer_mode to FIFO to complete cs_done in isr */
|
||||
mas->cur_xfer_mode = GENI_SE_FIFO;
|
||||
reinit_completion(&mas->cs_done);
|
||||
if (set_flag)
|
||||
geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
|
||||
@ -260,7 +304,7 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
|
||||
time_left = wait_for_completion_timeout(&mas->cs_done, HZ);
|
||||
if (!time_left) {
|
||||
dev_warn(mas->dev, "Timeout setting chip select\n");
|
||||
handle_fifo_timeout(spi, NULL);
|
||||
handle_se_timeout(spi, NULL);
|
||||
}
|
||||
|
||||
exit:
|
||||
@ -482,8 +526,12 @@ static bool geni_can_dma(struct spi_controller *ctlr,
|
||||
{
|
||||
struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
|
||||
|
||||
/* check if dma is supported */
|
||||
return mas->cur_xfer_mode != GENI_SE_FIFO;
|
||||
/*
|
||||
* Return true if transfer needs to be mapped prior to
|
||||
* calling transfer_one which is the case only for GPI_DMA.
|
||||
* For SE_DMA mode, map/unmap is done in geni_se_*x_dma_prep.
|
||||
*/
|
||||
return mas->cur_xfer_mode == GENI_GPI_DMA;
|
||||
}
|
||||
|
||||
static int spi_geni_prepare_message(struct spi_master *spi,
|
||||
@ -494,6 +542,7 @@ static int spi_geni_prepare_message(struct spi_master *spi,
|
||||
|
||||
switch (mas->cur_xfer_mode) {
|
||||
case GENI_SE_FIFO:
|
||||
case GENI_SE_DMA:
|
||||
if (spi_geni_is_abort_still_pending(mas))
|
||||
return -EBUSY;
|
||||
ret = setup_fifo_params(spi_msg->spi, spi);
|
||||
@ -597,7 +646,7 @@ static int spi_geni_init(struct spi_geni_master *mas)
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* in case of failure to get dma channel, we can still do the
|
||||
* in case of failure to get gpi dma channel, we can still do the
|
||||
* FIFO mode, so fallthrough
|
||||
*/
|
||||
dev_warn(mas->dev, "FIFO mode disabled, but couldn't get DMA, fall back to FIFO mode\n");
|
||||
@ -716,12 +765,12 @@ static void geni_spi_handle_rx(struct spi_geni_master *mas)
|
||||
mas->rx_rem_bytes -= rx_bytes;
|
||||
}
|
||||
|
||||
static void setup_fifo_xfer(struct spi_transfer *xfer,
|
||||
static int setup_se_xfer(struct spi_transfer *xfer,
|
||||
struct spi_geni_master *mas,
|
||||
u16 mode, struct spi_master *spi)
|
||||
{
|
||||
u32 m_cmd = 0;
|
||||
u32 len;
|
||||
u32 len, fifo_size;
|
||||
struct geni_se *se = &mas->se;
|
||||
int ret;
|
||||
|
||||
@ -748,7 +797,7 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
|
||||
/* Speed and bits per word can be overridden per transfer */
|
||||
ret = geni_spi_set_clock_and_bw(mas, xfer->speed_hz);
|
||||
if (ret)
|
||||
return;
|
||||
return ret;
|
||||
|
||||
mas->tx_rem_bytes = 0;
|
||||
mas->rx_rem_bytes = 0;
|
||||
@ -772,17 +821,50 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
|
||||
mas->rx_rem_bytes = xfer->len;
|
||||
}
|
||||
|
||||
/* Select transfer mode based on transfer length */
|
||||
fifo_size = mas->tx_fifo_depth * mas->fifo_width_bits / mas->cur_bits_per_word;
|
||||
mas->cur_xfer_mode = (len <= fifo_size) ? GENI_SE_FIFO : GENI_SE_DMA;
|
||||
geni_se_select_mode(se, mas->cur_xfer_mode);
|
||||
|
||||
/*
|
||||
* Lock around right before we start the transfer since our
|
||||
* interrupt could come in at any time now.
|
||||
*/
|
||||
spin_lock_irq(&mas->lock);
|
||||
geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
|
||||
if (m_cmd & SPI_TX_ONLY) {
|
||||
|
||||
if (mas->cur_xfer_mode == GENI_SE_DMA) {
|
||||
if (m_cmd & SPI_RX_ONLY) {
|
||||
ret = geni_se_rx_dma_prep(se, xfer->rx_buf,
|
||||
xfer->len, &mas->rx_se_dma);
|
||||
if (ret) {
|
||||
dev_err(mas->dev, "Failed to setup Rx dma %d\n", ret);
|
||||
mas->rx_se_dma = 0;
|
||||
goto unlock_and_return;
|
||||
}
|
||||
}
|
||||
if (m_cmd & SPI_TX_ONLY) {
|
||||
ret = geni_se_tx_dma_prep(se, (void *)xfer->tx_buf,
|
||||
xfer->len, &mas->tx_se_dma);
|
||||
if (ret) {
|
||||
dev_err(mas->dev, "Failed to setup Tx dma %d\n", ret);
|
||||
mas->tx_se_dma = 0;
|
||||
if (m_cmd & SPI_RX_ONLY) {
|
||||
/* Unmap rx buffer if duplex transfer */
|
||||
geni_se_rx_dma_unprep(se, mas->rx_se_dma, xfer->len);
|
||||
mas->rx_se_dma = 0;
|
||||
}
|
||||
goto unlock_and_return;
|
||||
}
|
||||
}
|
||||
} else if (m_cmd & SPI_TX_ONLY) {
|
||||
if (geni_spi_handle_tx(mas))
|
||||
writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
|
||||
}
|
||||
|
||||
unlock_and_return:
|
||||
spin_unlock_irq(&mas->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int spi_geni_transfer_one(struct spi_master *spi,
|
||||
@ -790,6 +872,7 @@ static int spi_geni_transfer_one(struct spi_master *spi,
|
||||
struct spi_transfer *xfer)
|
||||
{
|
||||
struct spi_geni_master *mas = spi_master_get_devdata(spi);
|
||||
int ret;
|
||||
|
||||
if (spi_geni_is_abort_still_pending(mas))
|
||||
return -EBUSY;
|
||||
@ -798,9 +881,12 @@ static int spi_geni_transfer_one(struct spi_master *spi,
|
||||
if (!xfer->len)
|
||||
return 0;
|
||||
|
||||
if (mas->cur_xfer_mode == GENI_SE_FIFO) {
|
||||
setup_fifo_xfer(xfer, mas, slv->mode, spi);
|
||||
return 1;
|
||||
if (mas->cur_xfer_mode == GENI_SE_FIFO || mas->cur_xfer_mode == GENI_SE_DMA) {
|
||||
ret = setup_se_xfer(xfer, mas, slv->mode, spi);
|
||||
/* SPI framework expects +ve ret code to wait for transfer complete */
|
||||
if (!ret)
|
||||
ret = 1;
|
||||
return ret;
|
||||
}
|
||||
return setup_gsi_xfer(xfer, mas, slv, spi);
|
||||
}
|
||||
@ -823,39 +909,70 @@ static irqreturn_t geni_spi_isr(int irq, void *data)
|
||||
|
||||
spin_lock(&mas->lock);
|
||||
|
||||
if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
|
||||
geni_spi_handle_rx(mas);
|
||||
if (mas->cur_xfer_mode == GENI_SE_FIFO) {
|
||||
if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
|
||||
geni_spi_handle_rx(mas);
|
||||
|
||||
if (m_irq & M_TX_FIFO_WATERMARK_EN)
|
||||
geni_spi_handle_tx(mas);
|
||||
if (m_irq & M_TX_FIFO_WATERMARK_EN)
|
||||
geni_spi_handle_tx(mas);
|
||||
|
||||
if (m_irq & M_CMD_DONE_EN) {
|
||||
if (mas->cur_xfer) {
|
||||
if (m_irq & M_CMD_DONE_EN) {
|
||||
if (mas->cur_xfer) {
|
||||
spi_finalize_current_transfer(spi);
|
||||
mas->cur_xfer = NULL;
|
||||
/*
|
||||
* If this happens, then a CMD_DONE came before all the
|
||||
* Tx buffer bytes were sent out. This is unusual, log
|
||||
* this condition and disable the WM interrupt to
|
||||
* prevent the system from stalling due an interrupt
|
||||
* storm.
|
||||
*
|
||||
* If this happens when all Rx bytes haven't been
|
||||
* received, log the condition. The only known time
|
||||
* this can happen is if bits_per_word != 8 and some
|
||||
* registers that expect xfer lengths in num spi_words
|
||||
* weren't written correctly.
|
||||
*/
|
||||
if (mas->tx_rem_bytes) {
|
||||
writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
|
||||
dev_err(mas->dev, "Premature done. tx_rem = %d bpw%d\n",
|
||||
mas->tx_rem_bytes, mas->cur_bits_per_word);
|
||||
}
|
||||
if (mas->rx_rem_bytes)
|
||||
dev_err(mas->dev, "Premature done. rx_rem = %d bpw%d\n",
|
||||
mas->rx_rem_bytes, mas->cur_bits_per_word);
|
||||
} else {
|
||||
complete(&mas->cs_done);
|
||||
}
|
||||
}
|
||||
} else if (mas->cur_xfer_mode == GENI_SE_DMA) {
|
||||
const struct spi_transfer *xfer = mas->cur_xfer;
|
||||
u32 dma_tx_status = readl_relaxed(se->base + SE_DMA_TX_IRQ_STAT);
|
||||
u32 dma_rx_status = readl_relaxed(se->base + SE_DMA_RX_IRQ_STAT);
|
||||
|
||||
if (dma_tx_status)
|
||||
writel(dma_tx_status, se->base + SE_DMA_TX_IRQ_CLR);
|
||||
if (dma_rx_status)
|
||||
writel(dma_rx_status, se->base + SE_DMA_RX_IRQ_CLR);
|
||||
if (dma_tx_status & TX_DMA_DONE)
|
||||
mas->tx_rem_bytes = 0;
|
||||
if (dma_rx_status & RX_DMA_DONE)
|
||||
mas->rx_rem_bytes = 0;
|
||||
if (dma_tx_status & TX_RESET_DONE)
|
||||
complete(&mas->tx_reset_done);
|
||||
if (dma_rx_status & RX_RESET_DONE)
|
||||
complete(&mas->rx_reset_done);
|
||||
if (!mas->tx_rem_bytes && !mas->rx_rem_bytes && xfer) {
|
||||
if (xfer->tx_buf && mas->tx_se_dma) {
|
||||
geni_se_tx_dma_unprep(se, mas->tx_se_dma, xfer->len);
|
||||
mas->tx_se_dma = 0;
|
||||
}
|
||||
if (xfer->rx_buf && mas->rx_se_dma) {
|
||||
geni_se_rx_dma_unprep(se, mas->rx_se_dma, xfer->len);
|
||||
mas->rx_se_dma = 0;
|
||||
}
|
||||
spi_finalize_current_transfer(spi);
|
||||
mas->cur_xfer = NULL;
|
||||
/*
|
||||
* If this happens, then a CMD_DONE came before all the
|
||||
* Tx buffer bytes were sent out. This is unusual, log
|
||||
* this condition and disable the WM interrupt to
|
||||
* prevent the system from stalling due an interrupt
|
||||
* storm.
|
||||
*
|
||||
* If this happens when all Rx bytes haven't been
|
||||
* received, log the condition. The only known time
|
||||
* this can happen is if bits_per_word != 8 and some
|
||||
* registers that expect xfer lengths in num spi_words
|
||||
* weren't written correctly.
|
||||
*/
|
||||
if (mas->tx_rem_bytes) {
|
||||
writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
|
||||
dev_err(mas->dev, "Premature done. tx_rem = %d bpw%d\n",
|
||||
mas->tx_rem_bytes, mas->cur_bits_per_word);
|
||||
}
|
||||
if (mas->rx_rem_bytes)
|
||||
dev_err(mas->dev, "Premature done. rx_rem = %d bpw%d\n",
|
||||
mas->rx_rem_bytes, mas->cur_bits_per_word);
|
||||
} else {
|
||||
complete(&mas->cs_done);
|
||||
}
|
||||
}
|
||||
|
||||
@ -949,6 +1066,8 @@ static int spi_geni_probe(struct platform_device *pdev)
|
||||
init_completion(&mas->cs_done);
|
||||
init_completion(&mas->cancel_done);
|
||||
init_completion(&mas->abort_done);
|
||||
init_completion(&mas->tx_reset_done);
|
||||
init_completion(&mas->rx_reset_done);
|
||||
spin_lock_init(&mas->lock);
|
||||
pm_runtime_use_autosuspend(&pdev->dev);
|
||||
pm_runtime_set_autosuspend_delay(&pdev->dev, 250);
|
||||
|
Loading…
Reference in New Issue
Block a user