Revert "cxl: Add preliminary workaround for CX4 interrupt limitation"

Remove abandonned capi support for the Mellanox CX4.

This reverts commit cbce0917e2.

Signed-off-by: Alastair D'Silva <alastair@d-silva.org>
Acked-by: Andrew Donnellan <andrew.donnellan@au1.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Alastair D'Silva 2018-06-28 12:05:02 +02:00 committed by Michael Ellerman
parent 0cfd7335d1
commit 17d2903938
6 changed files with 0 additions and 64 deletions

View File

@ -181,21 +181,6 @@ static irq_hw_number_t cxl_find_afu_irq(struct cxl_context *ctx, int num)
return 0;
}
int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq)
{
if (*ctx == NULL || *afu_irq == 0) {
*afu_irq = 1;
*ctx = cxl_get_context(pdev);
} else {
(*afu_irq)++;
if (*afu_irq > cxl_get_max_irqs_per_process(pdev)) {
*ctx = list_next_entry(*ctx, extra_irq_contexts);
*afu_irq = 1;
}
}
return cxl_find_afu_irq(*ctx, *afu_irq);
}
/* Exported via cxl_base */
int cxl_set_priv(struct cxl_context *ctx, void *priv)
{

View File

@ -141,23 +141,6 @@ void cxl_pci_disable_device(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(cxl_pci_disable_device);
int cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq)
{
int ret;
struct cxl_calls *calls;
calls = cxl_calls_get();
if (!calls)
return -EBUSY;
ret = calls->cxl_next_msi_hwirq(pdev, ctx, afu_irq);
cxl_calls_put(calls);
return ret;
}
EXPORT_SYMBOL_GPL(cxl_next_msi_hwirq);
static int __init cxl_base_init(void)
{
struct device_node *np;

View File

@ -74,7 +74,6 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
ctx->pending_afu_err = false;
INIT_LIST_HEAD(&ctx->irq_names);
INIT_LIST_HEAD(&ctx->extra_irq_contexts);
/*
* When we have to destroy all contexts in cxl_context_detach_all() we

View File

@ -623,14 +623,6 @@ struct cxl_context {
struct rcu_head rcu;
/*
* Only used when more interrupts are allocated via
* pci_enable_msix_range than are supported in the default context, to
* use additional contexts to overcome the limitation. i.e. Mellanox
* CX4 only:
*/
struct list_head extra_irq_contexts;
struct mm_struct *mm;
u16 tidr;
@ -878,13 +870,11 @@ ssize_t cxl_pci_afu_read_err_buffer(struct cxl_afu *afu, char *buf,
/* Internal functions wrapped in cxl_base to allow PHB to call them */
bool _cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu);
void _cxl_pci_disable_device(struct pci_dev *dev);
int _cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq);
struct cxl_calls {
void (*cxl_slbia)(struct mm_struct *mm);
bool (*cxl_pci_associate_default_context)(struct pci_dev *dev, struct cxl_afu *afu);
void (*cxl_pci_disable_device)(struct pci_dev *dev);
int (*cxl_next_msi_hwirq)(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq);
struct module *owner;
};

View File

@ -106,7 +106,6 @@ static struct cxl_calls cxl_calls = {
.cxl_slbia = cxl_slbia_core,
.cxl_pci_associate_default_context = _cxl_pci_associate_default_context,
.cxl_pci_disable_device = _cxl_pci_disable_device,
.cxl_next_msi_hwirq = _cxl_next_msi_hwirq,
.owner = THIS_MODULE,
};

View File

@ -183,26 +183,6 @@ void cxl_psa_unmap(void __iomem *addr);
/* Get the process element for this context */
int cxl_process_element(struct cxl_context *ctx);
/*
* Limit the number of interrupts that a single context can allocate via
* cxl_start_work. If using the api with a real phb, this may be used to
* request that additional default contexts be created when allocating
* interrupts via pci_enable_msix_range. These will be set to the same running
* state as the default context, and if that is running it will reuse the
* parameters previously passed to cxl_start_context for the default context.
*/
int cxl_set_max_irqs_per_process(struct pci_dev *dev, int irqs);
int cxl_get_max_irqs_per_process(struct pci_dev *dev);
/*
* Use to simultaneously iterate over hardware interrupt numbers, contexts and
* afu interrupt numbers allocated for the device via pci_enable_msix_range and
* is a useful convenience function when working with hardware that has
* limitations on the number of interrupts per process. *ctx and *afu_irq
* should be NULL and 0 to start the iteration.
*/
int cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq);
/*
* These calls allow drivers to create their own file descriptors and make them
* identical to the cxl file descriptor user API. An example use case: