Merge branch 'pci/controller/qcom-edma'

- Pass the Qcom Endpoint 4K alignment requirement for outbound windows to
  the EPF core so EPF drivers can use it (Manivannan Sadhasivam)

- Use alignment restriction from EPF core in Qcom EPF MHI driver
  (Manivannan Sadhasivam)

- Add Qcom Endpoint eDMA support by enabling the eDMA IRQ (Manivannan
  Sadhasivam)

- Add Qcom MHI eDMA support (Manivannan Sadhasivam)

- Add Qcom Snapdragon SM8450 support to the EPF MHI driver (Manivannan
  Sadhasivam)

- Use iATU for EPF MHI transfers smaller than 4K to avoid eDMA setup
  latency (Manivannan Sadhasivam)

- Add pci_epc_mem_init() kernel-doc (Manivannan Sadhasivam)

* pci/controller/qcom-edma:
  PCI: endpoint: Add kernel-doc for pci_epc_mem_init() API
  PCI: epf-mhi: Use iATU for small transfers
  PCI: epf-mhi: Add support for SM8450
  PCI: epf-mhi: Add eDMA support
  PCI: qcom-ep: Add eDMA support
  PCI: epf-mhi: Make use of the alignment restriction from EPF core
  PCI: qcom-ep: Pass alignment restriction to the EPF core
This commit is contained in:
Bjorn Helgaas 2023-08-29 11:03:54 -05:00
commit e8ce465fd4
3 changed files with 284 additions and 17 deletions

View File

@ -74,6 +74,7 @@
#define PARF_INT_ALL_PLS_ERR BIT(15)
#define PARF_INT_ALL_PME_LEGACY BIT(16)
#define PARF_INT_ALL_PLS_PME BIT(17)
#define PARF_INT_ALL_EDMA BIT(22)
/* PARF_BDF_TO_SID_CFG register fields */
#define PARF_BDF_TO_SID_BYPASS BIT(0)
@ -395,7 +396,7 @@ static int qcom_pcie_perst_deassert(struct dw_pcie *pci)
writel_relaxed(0, pcie_ep->parf + PARF_INT_ALL_MASK);
val = PARF_INT_ALL_LINK_DOWN | PARF_INT_ALL_BME |
PARF_INT_ALL_PM_TURNOFF | PARF_INT_ALL_DSTATE_CHANGE |
PARF_INT_ALL_LINK_UP;
PARF_INT_ALL_LINK_UP | PARF_INT_ALL_EDMA;
writel_relaxed(val, pcie_ep->parf + PARF_INT_ALL_MASK);
ret = dw_pcie_ep_init_complete(&pcie_ep->pci.ep);
@ -706,6 +707,7 @@ static const struct pci_epc_features qcom_pcie_epc_features = {
.core_init_notifier = true,
.msi_capable = true,
.msix_capable = false,
.align = SZ_4K,
};
static const struct pci_epc_features *
@ -743,6 +745,7 @@ static int qcom_pcie_ep_probe(struct platform_device *pdev)
pcie_ep->pci.dev = dev;
pcie_ep->pci.ops = &pci_ops;
pcie_ep->pci.ep.ops = &pci_ep_ops;
pcie_ep->pci.edma.nr_irqs = 1;
platform_set_drvdata(pdev, pcie_ep);
ret = qcom_pcie_ep_get_resources(pdev, pcie_ep);

View File

@ -6,8 +6,10 @@
* Author: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
*/
#include <linux/dmaengine.h>
#include <linux/mhi_ep.h>
#include <linux/module.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pci-epc.h>
#include <linux/pci-epf.h>
@ -16,6 +18,9 @@
#define to_epf_mhi(cntrl) container_of(cntrl, struct pci_epf_mhi, cntrl)
/* Platform specific flags */
#define MHI_EPF_USE_DMA BIT(0)
struct pci_epf_mhi_ep_info {
const struct mhi_ep_cntrl_config *config;
struct pci_epf_header *epf_header;
@ -23,6 +28,7 @@ struct pci_epf_mhi_ep_info {
u32 epf_flags;
u32 msi_count;
u32 mru;
u32 flags;
};
#define MHI_EP_CHANNEL_CONFIG(ch_num, ch_name, direction) \
@ -91,17 +97,42 @@ static const struct pci_epf_mhi_ep_info sdx55_info = {
.mru = 0x8000,
};
static struct pci_epf_header sm8450_header = {
.vendorid = PCI_VENDOR_ID_QCOM,
.deviceid = 0x0306,
.baseclass_code = PCI_CLASS_OTHERS,
.interrupt_pin = PCI_INTERRUPT_INTA,
};
static const struct pci_epf_mhi_ep_info sm8450_info = {
.config = &mhi_v1_config,
.epf_header = &sm8450_header,
.bar_num = BAR_0,
.epf_flags = PCI_BASE_ADDRESS_MEM_TYPE_32,
.msi_count = 32,
.mru = 0x8000,
.flags = MHI_EPF_USE_DMA,
};
struct pci_epf_mhi {
const struct pci_epc_features *epc_features;
const struct pci_epf_mhi_ep_info *info;
struct mhi_ep_cntrl mhi_cntrl;
struct pci_epf *epf;
struct mutex lock;
void __iomem *mmio;
resource_size_t mmio_phys;
struct dma_chan *dma_chan_tx;
struct dma_chan *dma_chan_rx;
u32 mmio_size;
int irq;
};
static size_t get_align_offset(struct pci_epf_mhi *epf_mhi, u64 addr)
{
return addr & (epf_mhi->epc_features->align -1);
}
static int __pci_epf_mhi_alloc_map(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
phys_addr_t *paddr, void __iomem **vaddr,
size_t offset, size_t size)
@ -133,8 +164,7 @@ static int pci_epf_mhi_alloc_map(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
size_t size)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
struct pci_epc *epc = epf_mhi->epf->epc;
size_t offset = pci_addr & (epc->mem->window.page_size - 1);
size_t offset = get_align_offset(epf_mhi, pci_addr);
return __pci_epf_mhi_alloc_map(mhi_cntrl, pci_addr, paddr, vaddr,
offset, size);
@ -159,9 +189,7 @@ static void pci_epf_mhi_unmap_free(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr,
size_t size)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
struct pci_epf *epf = epf_mhi->epf;
struct pci_epc *epc = epf->epc;
size_t offset = pci_addr & (epc->mem->window.page_size - 1);
size_t offset = get_align_offset(epf_mhi, pci_addr);
__pci_epf_mhi_unmap_free(mhi_cntrl, pci_addr, paddr, vaddr, offset,
size);
@ -181,11 +209,11 @@ static void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl, u32 vector)
vector + 1);
}
static int pci_epf_mhi_read_from_host(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
void *to, size_t size)
static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
void *to, size_t size)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
size_t offset = from % SZ_4K;
size_t offset = get_align_offset(epf_mhi, from);
void __iomem *tre_buf;
phys_addr_t tre_phys;
int ret;
@ -209,11 +237,11 @@ static int pci_epf_mhi_read_from_host(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
return 0;
}
static int pci_epf_mhi_write_to_host(struct mhi_ep_cntrl *mhi_cntrl,
void *from, u64 to, size_t size)
static int pci_epf_mhi_iatu_write(struct mhi_ep_cntrl *mhi_cntrl,
void *from, u64 to, size_t size)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
size_t offset = to % SZ_4K;
size_t offset = get_align_offset(epf_mhi, to);
void __iomem *tre_buf;
phys_addr_t tre_phys;
int ret;
@ -237,6 +265,206 @@ static int pci_epf_mhi_write_to_host(struct mhi_ep_cntrl *mhi_cntrl,
return 0;
}
static void pci_epf_mhi_dma_callback(void *param)
{
complete(param);
}
static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from,
void *to, size_t size)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
struct dma_chan *chan = epf_mhi->dma_chan_rx;
struct device *dev = &epf_mhi->epf->dev;
DECLARE_COMPLETION_ONSTACK(complete);
struct dma_async_tx_descriptor *desc;
struct dma_slave_config config = {};
dma_cookie_t cookie;
dma_addr_t dst_addr;
int ret;
if (size < SZ_4K)
return pci_epf_mhi_iatu_read(mhi_cntrl, from, to, size);
mutex_lock(&epf_mhi->lock);
config.direction = DMA_DEV_TO_MEM;
config.src_addr = from;
ret = dmaengine_slave_config(chan, &config);
if (ret) {
dev_err(dev, "Failed to configure DMA channel\n");
goto err_unlock;
}
dst_addr = dma_map_single(dma_dev, to, size, DMA_FROM_DEVICE);
ret = dma_mapping_error(dma_dev, dst_addr);
if (ret) {
dev_err(dev, "Failed to map remote memory\n");
goto err_unlock;
}
desc = dmaengine_prep_slave_single(chan, dst_addr, size, DMA_DEV_TO_MEM,
DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(dev, "Failed to prepare DMA\n");
ret = -EIO;
goto err_unmap;
}
desc->callback = pci_epf_mhi_dma_callback;
desc->callback_param = &complete;
cookie = dmaengine_submit(desc);
ret = dma_submit_error(cookie);
if (ret) {
dev_err(dev, "Failed to do DMA submit\n");
goto err_unmap;
}
dma_async_issue_pending(chan);
ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
if (!ret) {
dev_err(dev, "DMA transfer timeout\n");
dmaengine_terminate_sync(chan);
ret = -ETIMEDOUT;
}
err_unmap:
dma_unmap_single(dma_dev, dst_addr, size, DMA_FROM_DEVICE);
err_unlock:
mutex_unlock(&epf_mhi->lock);
return ret;
}
static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from,
u64 to, size_t size)
{
struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl);
struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
struct dma_chan *chan = epf_mhi->dma_chan_tx;
struct device *dev = &epf_mhi->epf->dev;
DECLARE_COMPLETION_ONSTACK(complete);
struct dma_async_tx_descriptor *desc;
struct dma_slave_config config = {};
dma_cookie_t cookie;
dma_addr_t src_addr;
int ret;
if (size < SZ_4K)
return pci_epf_mhi_iatu_write(mhi_cntrl, from, to, size);
mutex_lock(&epf_mhi->lock);
config.direction = DMA_MEM_TO_DEV;
config.dst_addr = to;
ret = dmaengine_slave_config(chan, &config);
if (ret) {
dev_err(dev, "Failed to configure DMA channel\n");
goto err_unlock;
}
src_addr = dma_map_single(dma_dev, from, size, DMA_TO_DEVICE);
ret = dma_mapping_error(dma_dev, src_addr);
if (ret) {
dev_err(dev, "Failed to map remote memory\n");
goto err_unlock;
}
desc = dmaengine_prep_slave_single(chan, src_addr, size, DMA_MEM_TO_DEV,
DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(dev, "Failed to prepare DMA\n");
ret = -EIO;
goto err_unmap;
}
desc->callback = pci_epf_mhi_dma_callback;
desc->callback_param = &complete;
cookie = dmaengine_submit(desc);
ret = dma_submit_error(cookie);
if (ret) {
dev_err(dev, "Failed to do DMA submit\n");
goto err_unmap;
}
dma_async_issue_pending(chan);
ret = wait_for_completion_timeout(&complete, msecs_to_jiffies(1000));
if (!ret) {
dev_err(dev, "DMA transfer timeout\n");
dmaengine_terminate_sync(chan);
ret = -ETIMEDOUT;
}
err_unmap:
dma_unmap_single(dma_dev, src_addr, size, DMA_FROM_DEVICE);
err_unlock:
mutex_unlock(&epf_mhi->lock);
return ret;
}
struct epf_dma_filter {
struct device *dev;
u32 dma_mask;
};
static bool pci_epf_mhi_filter(struct dma_chan *chan, void *node)
{
struct epf_dma_filter *filter = node;
struct dma_slave_caps caps;
memset(&caps, 0, sizeof(caps));
dma_get_slave_caps(chan, &caps);
return chan->device->dev == filter->dev && filter->dma_mask &
caps.directions;
}
static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi)
{
struct device *dma_dev = epf_mhi->epf->epc->dev.parent;
struct device *dev = &epf_mhi->epf->dev;
struct epf_dma_filter filter;
dma_cap_mask_t mask;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
filter.dev = dma_dev;
filter.dma_mask = BIT(DMA_MEM_TO_DEV);
epf_mhi->dma_chan_tx = dma_request_channel(mask, pci_epf_mhi_filter,
&filter);
if (IS_ERR_OR_NULL(epf_mhi->dma_chan_tx)) {
dev_err(dev, "Failed to request tx channel\n");
return -ENODEV;
}
filter.dma_mask = BIT(DMA_DEV_TO_MEM);
epf_mhi->dma_chan_rx = dma_request_channel(mask, pci_epf_mhi_filter,
&filter);
if (IS_ERR_OR_NULL(epf_mhi->dma_chan_rx)) {
dev_err(dev, "Failed to request rx channel\n");
dma_release_channel(epf_mhi->dma_chan_tx);
epf_mhi->dma_chan_tx = NULL;
return -ENODEV;
}
return 0;
}
static void pci_epf_mhi_dma_deinit(struct pci_epf_mhi *epf_mhi)
{
dma_release_channel(epf_mhi->dma_chan_tx);
dma_release_channel(epf_mhi->dma_chan_rx);
epf_mhi->dma_chan_tx = NULL;
epf_mhi->dma_chan_rx = NULL;
}
static int pci_epf_mhi_core_init(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
@ -270,6 +498,10 @@ static int pci_epf_mhi_core_init(struct pci_epf *epf)
return ret;
}
epf_mhi->epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
if (!epf_mhi->epc_features)
return -ENODATA;
return 0;
}
@ -282,6 +514,14 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
struct device *dev = &epf->dev;
int ret;
if (info->flags & MHI_EPF_USE_DMA) {
ret = pci_epf_mhi_dma_init(epf_mhi);
if (ret) {
dev_err(dev, "Failed to initialize DMA: %d\n", ret);
return ret;
}
}
mhi_cntrl->mmio = epf_mhi->mmio;
mhi_cntrl->irq = epf_mhi->irq;
mhi_cntrl->mru = info->mru;
@ -291,13 +531,20 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq;
mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map;
mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free;
mhi_cntrl->read_from_host = pci_epf_mhi_read_from_host;
mhi_cntrl->write_to_host = pci_epf_mhi_write_to_host;
if (info->flags & MHI_EPF_USE_DMA) {
mhi_cntrl->read_from_host = pci_epf_mhi_edma_read;
mhi_cntrl->write_to_host = pci_epf_mhi_edma_write;
} else {
mhi_cntrl->read_from_host = pci_epf_mhi_iatu_read;
mhi_cntrl->write_to_host = pci_epf_mhi_iatu_write;
}
/* Register the MHI EP controller */
ret = mhi_ep_register_controller(mhi_cntrl, info->config);
if (ret) {
dev_err(dev, "Failed to register MHI EP controller: %d\n", ret);
if (info->flags & MHI_EPF_USE_DMA)
pci_epf_mhi_dma_deinit(epf_mhi);
return ret;
}
@ -307,10 +554,13 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf)
static int pci_epf_mhi_link_down(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
if (mhi_cntrl->mhi_dev) {
mhi_ep_power_down(mhi_cntrl);
if (info->flags & MHI_EPF_USE_DMA)
pci_epf_mhi_dma_deinit(epf_mhi);
mhi_ep_unregister_controller(mhi_cntrl);
}
@ -320,6 +570,7 @@ static int pci_epf_mhi_link_down(struct pci_epf *epf)
static int pci_epf_mhi_bme(struct pci_epf *epf)
{
struct pci_epf_mhi *epf_mhi = epf_get_drvdata(epf);
const struct pci_epf_mhi_ep_info *info = epf_mhi->info;
struct mhi_ep_cntrl *mhi_cntrl = &epf_mhi->mhi_cntrl;
struct device *dev = &epf->dev;
int ret;
@ -332,6 +583,8 @@ static int pci_epf_mhi_bme(struct pci_epf *epf)
ret = mhi_ep_power_up(mhi_cntrl);
if (ret) {
dev_err(dev, "Failed to power up MHI EP: %d\n", ret);
if (info->flags & MHI_EPF_USE_DMA)
pci_epf_mhi_dma_deinit(epf_mhi);
mhi_ep_unregister_controller(mhi_cntrl);
}
}
@ -382,6 +635,8 @@ static void pci_epf_mhi_unbind(struct pci_epf *epf)
*/
if (mhi_cntrl->mhi_dev) {
mhi_ep_power_down(mhi_cntrl);
if (info->flags & MHI_EPF_USE_DMA)
pci_epf_mhi_dma_deinit(epf_mhi);
mhi_ep_unregister_controller(mhi_cntrl);
}
@ -422,9 +677,8 @@ static int pci_epf_mhi_probe(struct pci_epf *epf,
}
static const struct pci_epf_device_id pci_epf_mhi_ids[] = {
{
.name = "sdx55", .driver_data = (kernel_ulong_t)&sdx55_info,
},
{ .name = "sdx55", .driver_data = (kernel_ulong_t)&sdx55_info },
{ .name = "sm8450", .driver_data = (kernel_ulong_t)&sm8450_info },
{},
};

View File

@ -115,6 +115,16 @@ err_mem:
}
EXPORT_SYMBOL_GPL(pci_epc_multi_mem_init);
/**
* pci_epc_mem_init() - Initialize the pci_epc_mem structure
* @epc: the EPC device that invoked pci_epc_mem_init
* @base: Physical address of the window region
* @size: Total Size of the window region
* @page_size: Page size of the window region
*
* Invoke to initialize a single pci_epc_mem structure used by the
* endpoint functions to allocate memory for mapping the PCI host memory
*/
int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t base,
size_t size, size_t page_size)
{