This commit adds support for basic MHI PM operations such as mhi_async_power_up, mhi_sync_power_up, and mhi_power_down. These routines places the MHI bus into respective power domain states and calls the state_transition APIs when necessary. The MHI controller driver is expected to call these PM routines for MHI powerup and powerdown. This is based on the patch submitted by Sujeev Dias: https://lkml.org/lkml/2018/7/9/989 Signed-off-by: Sujeev Dias <sdias@codeaurora.org> Signed-off-by: Siddartha Mohanadoss <smohanad@codeaurora.org> [mani: splitted the pm patch and cleaned up for upstream] Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> Reviewed-by: Jeffrey Hugo <jhugo@codeaurora.org> Tested-by: Jeffrey Hugo <jhugo@codeaurora.org> Link: https://lore.kernel.org/r/20200220095854.4804-8-manivannan.sadhasivam@linaro.org Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
338 lines
8.2 KiB
C
338 lines
8.2 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
|
|
*
|
|
*/
|
|
|
|
#include <linux/device.h>
|
|
#include <linux/dma-direction.h>
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/interrupt.h>
|
|
#include <linux/list.h>
|
|
#include <linux/mhi.h>
|
|
#include <linux/module.h>
|
|
#include <linux/skbuff.h>
|
|
#include <linux/slab.h>
|
|
#include "internal.h"
|
|
|
|
int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl,
|
|
void __iomem *base, u32 offset, u32 *out)
|
|
{
|
|
u32 tmp = readl(base + offset);
|
|
|
|
/* If there is any unexpected value, query the link status */
|
|
if (PCI_INVALID_READ(tmp) &&
|
|
mhi_cntrl->link_status(mhi_cntrl))
|
|
return -EIO;
|
|
|
|
*out = tmp;
|
|
|
|
return 0;
|
|
}
|
|
|
|
int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl,
|
|
void __iomem *base, u32 offset,
|
|
u32 mask, u32 shift, u32 *out)
|
|
{
|
|
u32 tmp;
|
|
int ret;
|
|
|
|
ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
|
|
if (ret)
|
|
return ret;
|
|
|
|
*out = (tmp & mask) >> shift;
|
|
|
|
return 0;
|
|
}
|
|
|
|
void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base,
|
|
u32 offset, u32 val)
|
|
{
|
|
writel(val, base + offset);
|
|
}
|
|
|
|
void mhi_write_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base,
|
|
u32 offset, u32 mask, u32 shift, u32 val)
|
|
{
|
|
int ret;
|
|
u32 tmp;
|
|
|
|
ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp);
|
|
if (ret)
|
|
return;
|
|
|
|
tmp &= ~mask;
|
|
tmp |= (val << shift);
|
|
mhi_write_reg(mhi_cntrl, base, offset, tmp);
|
|
}
|
|
|
|
void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr,
|
|
dma_addr_t db_val)
|
|
{
|
|
mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val));
|
|
mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val));
|
|
}
|
|
|
|
void mhi_db_brstmode(struct mhi_controller *mhi_cntrl,
|
|
struct db_cfg *db_cfg,
|
|
void __iomem *db_addr,
|
|
dma_addr_t db_val)
|
|
{
|
|
if (db_cfg->db_mode) {
|
|
db_cfg->db_val = db_val;
|
|
mhi_write_db(mhi_cntrl, db_addr, db_val);
|
|
db_cfg->db_mode = 0;
|
|
}
|
|
}
|
|
|
|
void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl,
|
|
struct db_cfg *db_cfg,
|
|
void __iomem *db_addr,
|
|
dma_addr_t db_val)
|
|
{
|
|
db_cfg->db_val = db_val;
|
|
mhi_write_db(mhi_cntrl, db_addr, db_val);
|
|
}
|
|
|
|
void mhi_ring_er_db(struct mhi_event *mhi_event)
|
|
{
|
|
struct mhi_ring *ring = &mhi_event->ring;
|
|
|
|
mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg,
|
|
ring->db_addr, *ring->ctxt_wp);
|
|
}
|
|
|
|
void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd)
|
|
{
|
|
dma_addr_t db;
|
|
struct mhi_ring *ring = &mhi_cmd->ring;
|
|
|
|
db = ring->iommu_base + (ring->wp - ring->base);
|
|
*ring->ctxt_wp = db;
|
|
mhi_write_db(mhi_cntrl, ring->db_addr, db);
|
|
}
|
|
|
|
void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
|
|
struct mhi_chan *mhi_chan)
|
|
{
|
|
struct mhi_ring *ring = &mhi_chan->tre_ring;
|
|
dma_addr_t db;
|
|
|
|
db = ring->iommu_base + (ring->wp - ring->base);
|
|
*ring->ctxt_wp = db;
|
|
mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
|
|
ring->db_addr, db);
|
|
}
|
|
|
|
enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl)
|
|
{
|
|
u32 exec;
|
|
int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec);
|
|
|
|
return (ret) ? MHI_EE_MAX : exec;
|
|
}
|
|
|
|
enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
|
|
{
|
|
u32 state;
|
|
int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS,
|
|
MHISTATUS_MHISTATE_MASK,
|
|
MHISTATUS_MHISTATE_SHIFT, &state);
|
|
return ret ? MHI_STATE_MAX : state;
|
|
}
|
|
|
|
static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr)
|
|
{
|
|
return (addr - ring->iommu_base) + ring->base;
|
|
}
|
|
|
|
int mhi_destroy_device(struct device *dev, void *data)
|
|
{
|
|
struct mhi_device *mhi_dev;
|
|
struct mhi_controller *mhi_cntrl;
|
|
|
|
if (dev->bus != &mhi_bus_type)
|
|
return 0;
|
|
|
|
mhi_dev = to_mhi_device(dev);
|
|
mhi_cntrl = mhi_dev->mhi_cntrl;
|
|
|
|
/* Only destroy virtual devices thats attached to bus */
|
|
if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER)
|
|
return 0;
|
|
|
|
dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n",
|
|
mhi_dev->chan_name);
|
|
|
|
/* Notify the client and remove the device from MHI bus */
|
|
device_del(dev);
|
|
put_device(dev);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
|
|
{
|
|
struct mhi_driver *mhi_drv;
|
|
|
|
if (!mhi_dev->dev.driver)
|
|
return;
|
|
|
|
mhi_drv = to_mhi_driver(mhi_dev->dev.driver);
|
|
|
|
if (mhi_drv->status_cb)
|
|
mhi_drv->status_cb(mhi_dev, cb_reason);
|
|
}
|
|
|
|
/* Bind MHI channels to MHI devices */
|
|
void mhi_create_devices(struct mhi_controller *mhi_cntrl)
|
|
{
|
|
struct mhi_chan *mhi_chan;
|
|
struct mhi_device *mhi_dev;
|
|
struct device *dev = &mhi_cntrl->mhi_dev->dev;
|
|
int i, ret;
|
|
|
|
mhi_chan = mhi_cntrl->mhi_chan;
|
|
for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) {
|
|
if (!mhi_chan->configured || mhi_chan->mhi_dev ||
|
|
!(mhi_chan->ee_mask & BIT(mhi_cntrl->ee)))
|
|
continue;
|
|
mhi_dev = mhi_alloc_device(mhi_cntrl);
|
|
if (!mhi_dev)
|
|
return;
|
|
|
|
mhi_dev->dev_type = MHI_DEVICE_XFER;
|
|
switch (mhi_chan->dir) {
|
|
case DMA_TO_DEVICE:
|
|
mhi_dev->ul_chan = mhi_chan;
|
|
mhi_dev->ul_chan_id = mhi_chan->chan;
|
|
break;
|
|
case DMA_FROM_DEVICE:
|
|
/* We use dl_chan as offload channels */
|
|
mhi_dev->dl_chan = mhi_chan;
|
|
mhi_dev->dl_chan_id = mhi_chan->chan;
|
|
break;
|
|
default:
|
|
dev_err(dev, "Direction not supported\n");
|
|
put_device(&mhi_dev->dev);
|
|
return;
|
|
}
|
|
|
|
get_device(&mhi_dev->dev);
|
|
mhi_chan->mhi_dev = mhi_dev;
|
|
|
|
/* Check next channel if it matches */
|
|
if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) {
|
|
if (!strcmp(mhi_chan[1].name, mhi_chan->name)) {
|
|
i++;
|
|
mhi_chan++;
|
|
if (mhi_chan->dir == DMA_TO_DEVICE) {
|
|
mhi_dev->ul_chan = mhi_chan;
|
|
mhi_dev->ul_chan_id = mhi_chan->chan;
|
|
} else {
|
|
mhi_dev->dl_chan = mhi_chan;
|
|
mhi_dev->dl_chan_id = mhi_chan->chan;
|
|
}
|
|
get_device(&mhi_dev->dev);
|
|
mhi_chan->mhi_dev = mhi_dev;
|
|
}
|
|
}
|
|
|
|
/* Channel name is same for both UL and DL */
|
|
mhi_dev->chan_name = mhi_chan->name;
|
|
dev_set_name(&mhi_dev->dev, "%04x_%s", mhi_chan->chan,
|
|
mhi_dev->chan_name);
|
|
|
|
/* Init wakeup source if available */
|
|
if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable)
|
|
device_init_wakeup(&mhi_dev->dev, true);
|
|
|
|
ret = device_add(&mhi_dev->dev);
|
|
if (ret)
|
|
put_device(&mhi_dev->dev);
|
|
}
|
|
}
|
|
|
|
irqreturn_t mhi_irq_handler(int irq_number, void *dev)
|
|
{
|
|
struct mhi_event *mhi_event = dev;
|
|
struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl;
|
|
struct mhi_event_ctxt *er_ctxt =
|
|
&mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index];
|
|
struct mhi_ring *ev_ring = &mhi_event->ring;
|
|
void *dev_rp = mhi_to_virtual(ev_ring, er_ctxt->rp);
|
|
|
|
/* Only proceed if event ring has pending events */
|
|
if (ev_ring->rp == dev_rp)
|
|
return IRQ_HANDLED;
|
|
|
|
/* For client managed event ring, notify pending data */
|
|
if (mhi_event->cl_manage) {
|
|
struct mhi_chan *mhi_chan = mhi_event->mhi_chan;
|
|
struct mhi_device *mhi_dev = mhi_chan->mhi_dev;
|
|
|
|
if (mhi_dev)
|
|
mhi_notify(mhi_dev, MHI_CB_PENDING_DATA);
|
|
} else {
|
|
tasklet_schedule(&mhi_event->task);
|
|
}
|
|
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *dev)
|
|
{
|
|
struct mhi_controller *mhi_cntrl = dev;
|
|
enum mhi_state state = MHI_STATE_MAX;
|
|
enum mhi_pm_state pm_state = 0;
|
|
enum mhi_ee_type ee = 0;
|
|
|
|
write_lock_irq(&mhi_cntrl->pm_lock);
|
|
if (MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) {
|
|
state = mhi_get_mhi_state(mhi_cntrl);
|
|
ee = mhi_cntrl->ee;
|
|
mhi_cntrl->ee = mhi_get_exec_env(mhi_cntrl);
|
|
}
|
|
|
|
if (state == MHI_STATE_SYS_ERR) {
|
|
dev_dbg(&mhi_cntrl->mhi_dev->dev, "System error detected\n");
|
|
pm_state = mhi_tryset_pm_state(mhi_cntrl,
|
|
MHI_PM_SYS_ERR_DETECT);
|
|
}
|
|
write_unlock_irq(&mhi_cntrl->pm_lock);
|
|
|
|
/* If device in RDDM don't bother processing SYS error */
|
|
if (mhi_cntrl->ee == MHI_EE_RDDM) {
|
|
if (mhi_cntrl->ee != ee) {
|
|
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM);
|
|
wake_up_all(&mhi_cntrl->state_event);
|
|
}
|
|
goto exit_intvec;
|
|
}
|
|
|
|
if (pm_state == MHI_PM_SYS_ERR_DETECT) {
|
|
wake_up_all(&mhi_cntrl->state_event);
|
|
|
|
/* For fatal errors, we let controller decide next step */
|
|
if (MHI_IN_PBL(ee))
|
|
mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR);
|
|
else
|
|
schedule_work(&mhi_cntrl->syserr_worker);
|
|
}
|
|
|
|
exit_intvec:
|
|
|
|
return IRQ_HANDLED;
|
|
}
|
|
|
|
irqreturn_t mhi_intvec_handler(int irq_number, void *dev)
|
|
{
|
|
struct mhi_controller *mhi_cntrl = dev;
|
|
|
|
/* Wake up events waiting for state change */
|
|
wake_up_all(&mhi_cntrl->state_event);
|
|
|
|
return IRQ_WAKE_THREAD;
|
|
}
|