forked from Minki/linux
Merge branch 'msix'
Alexander Gordeev says: ==================== net: Use pci_enable_msix_range() instead of pci_enable_msix() As result of deprecation of MSI-X/MSI enablement functions pci_enable_msix() and pci_enable_msi_block() all drivers using these two interfaces need to be updated to use the new pci_enable_msi_range() and pci_enable_msix_range() interfaces. Cc: e1000-devel@lists.sourceforge.net Cc: linux-driver@qlogic.com Cc: linux-net-drivers@solarflare.com Cc: linux-pci@vger.kernel.org Cc: linux-rdma@vger.kernel.org Cc: netdev@vger.kernel.org Cc: pv-drivers@vmware.com Cc: wil6210@qca.qualcomm.com ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
9fa9e49829
@ -6206,7 +6206,7 @@ bnx2_free_irq(struct bnx2 *bp)
|
||||
static void
|
||||
bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
|
||||
{
|
||||
int i, total_vecs, rc;
|
||||
int i, total_vecs;
|
||||
struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
|
||||
struct net_device *dev = bp->dev;
|
||||
const int len = sizeof(bp->irq_tbl[0].name);
|
||||
@ -6229,16 +6229,9 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
|
||||
#ifdef BCM_CNIC
|
||||
total_vecs++;
|
||||
#endif
|
||||
rc = -ENOSPC;
|
||||
while (total_vecs >= BNX2_MIN_MSIX_VEC) {
|
||||
rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
|
||||
if (rc <= 0)
|
||||
break;
|
||||
if (rc > 0)
|
||||
total_vecs = rc;
|
||||
}
|
||||
|
||||
if (rc != 0)
|
||||
total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
|
||||
BNX2_MIN_MSIX_VEC, total_vecs);
|
||||
if (total_vecs < 0)
|
||||
return;
|
||||
|
||||
msix_vecs = total_vecs;
|
||||
|
@ -1638,36 +1638,16 @@ int bnx2x_enable_msix(struct bnx2x *bp)
|
||||
DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
|
||||
msix_vec);
|
||||
|
||||
rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
|
||||
|
||||
rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
|
||||
BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
|
||||
/*
|
||||
* reconfigure number of tx/rx queues according to available
|
||||
* MSI-X vectors
|
||||
*/
|
||||
if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
|
||||
/* how less vectors we will have? */
|
||||
int diff = msix_vec - rc;
|
||||
|
||||
BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
|
||||
|
||||
rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
|
||||
|
||||
if (rc) {
|
||||
BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
|
||||
goto no_msix;
|
||||
}
|
||||
/*
|
||||
* decrease number of queues by number of unallocated entries
|
||||
*/
|
||||
bp->num_ethernet_queues -= diff;
|
||||
bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
|
||||
|
||||
BNX2X_DEV_INFO("New queue configuration set: %d\n",
|
||||
bp->num_queues);
|
||||
} else if (rc > 0) {
|
||||
if (rc == -ENOSPC) {
|
||||
/* Get by with single vector */
|
||||
rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
|
||||
if (rc) {
|
||||
rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
|
||||
if (rc < 0) {
|
||||
BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
|
||||
rc);
|
||||
goto no_msix;
|
||||
@ -1680,8 +1660,22 @@ int bnx2x_enable_msix(struct bnx2x *bp)
|
||||
bp->num_ethernet_queues = 1;
|
||||
bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
|
||||
} else if (rc < 0) {
|
||||
BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
|
||||
BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
|
||||
goto no_msix;
|
||||
} else if (rc < msix_vec) {
|
||||
/* how less vectors we will have? */
|
||||
int diff = msix_vec - rc;
|
||||
|
||||
BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
|
||||
|
||||
/*
|
||||
* decrease number of queues by number of unallocated entries
|
||||
*/
|
||||
bp->num_ethernet_queues -= diff;
|
||||
bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
|
||||
|
||||
BNX2X_DEV_INFO("New queue configuration set: %d\n",
|
||||
bp->num_queues);
|
||||
}
|
||||
|
||||
bp->flags |= USING_MSIX_FLAG;
|
||||
|
@ -11362,12 +11362,10 @@ static bool tg3_enable_msix(struct tg3 *tp)
|
||||
msix_ent[i].vector = 0;
|
||||
}
|
||||
|
||||
rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
|
||||
rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
|
||||
if (rc < 0) {
|
||||
return false;
|
||||
} else if (rc != 0) {
|
||||
if (pci_enable_msix(tp->pdev, msix_ent, rc))
|
||||
return false;
|
||||
} else if (rc < tp->irq_cnt) {
|
||||
netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
|
||||
tp->irq_cnt, rc);
|
||||
tp->irq_cnt = rc;
|
||||
|
@ -2666,9 +2666,11 @@ bnad_enable_msix(struct bnad *bnad)
|
||||
for (i = 0; i < bnad->msix_num; i++)
|
||||
bnad->msix_table[i].entry = i;
|
||||
|
||||
ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
|
||||
if (ret > 0) {
|
||||
/* Not enough MSI-X vectors. */
|
||||
ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
|
||||
1, bnad->msix_num);
|
||||
if (ret < 0) {
|
||||
goto intx_mode;
|
||||
} else if (ret < bnad->msix_num) {
|
||||
pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
|
||||
ret, bnad->msix_num);
|
||||
|
||||
@ -2681,18 +2683,11 @@ bnad_enable_msix(struct bnad *bnad)
|
||||
bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
|
||||
BNAD_MAILBOX_MSIX_VECTORS;
|
||||
|
||||
if (bnad->msix_num > ret)
|
||||
if (bnad->msix_num > ret) {
|
||||
pci_disable_msix(bnad->pcidev);
|
||||
goto intx_mode;
|
||||
|
||||
/* Try once more with adjusted numbers */
|
||||
/* If this fails, fall back to INTx */
|
||||
ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
|
||||
bnad->msix_num);
|
||||
if (ret)
|
||||
goto intx_mode;
|
||||
|
||||
} else if (ret < 0)
|
||||
goto intx_mode;
|
||||
}
|
||||
}
|
||||
|
||||
pci_intx(bnad->pcidev, 0);
|
||||
|
||||
|
@ -3088,30 +3088,22 @@ static int cxgb_enable_msix(struct adapter *adap)
|
||||
{
|
||||
struct msix_entry entries[SGE_QSETS + 1];
|
||||
int vectors;
|
||||
int i, err;
|
||||
int i;
|
||||
|
||||
vectors = ARRAY_SIZE(entries);
|
||||
for (i = 0; i < vectors; ++i)
|
||||
entries[i].entry = i;
|
||||
|
||||
while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
|
||||
vectors = err;
|
||||
vectors = pci_enable_msix_range(adap->pdev, entries,
|
||||
adap->params.nports + 1, vectors);
|
||||
if (vectors < 0)
|
||||
return vectors;
|
||||
|
||||
if (err < 0)
|
||||
pci_disable_msix(adap->pdev);
|
||||
for (i = 0; i < vectors; ++i)
|
||||
adap->msix_info[i].vec = entries[i].vector;
|
||||
adap->msix_nvectors = vectors;
|
||||
|
||||
if (!err && vectors < (adap->params.nports + 1)) {
|
||||
pci_disable_msix(adap->pdev);
|
||||
err = -1;
|
||||
}
|
||||
|
||||
if (!err) {
|
||||
for (i = 0; i < vectors; ++i)
|
||||
adap->msix_info[i].vec = entries[i].vector;
|
||||
adap->msix_nvectors = vectors;
|
||||
}
|
||||
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
|
||||
|
@ -5737,7 +5737,7 @@ static void reduce_ethqs(struct adapter *adap, int n)
|
||||
static int enable_msix(struct adapter *adap)
|
||||
{
|
||||
int ofld_need = 0;
|
||||
int i, err, want, need;
|
||||
int i, want, need;
|
||||
struct sge *s = &adap->sge;
|
||||
unsigned int nchan = adap->params.nports;
|
||||
struct msix_entry entries[MAX_INGQ + 1];
|
||||
@ -5753,32 +5753,30 @@ static int enable_msix(struct adapter *adap)
|
||||
}
|
||||
need = adap->params.nports + EXTRA_VECS + ofld_need;
|
||||
|
||||
while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
|
||||
want = err;
|
||||
want = pci_enable_msix_range(adap->pdev, entries, need, want);
|
||||
if (want < 0)
|
||||
return want;
|
||||
|
||||
if (!err) {
|
||||
/*
|
||||
* Distribute available vectors to the various queue groups.
|
||||
* Every group gets its minimum requirement and NIC gets top
|
||||
* priority for leftovers.
|
||||
*/
|
||||
i = want - EXTRA_VECS - ofld_need;
|
||||
if (i < s->max_ethqsets) {
|
||||
s->max_ethqsets = i;
|
||||
if (i < s->ethqsets)
|
||||
reduce_ethqs(adap, i);
|
||||
}
|
||||
if (is_offload(adap)) {
|
||||
i = want - EXTRA_VECS - s->max_ethqsets;
|
||||
i -= ofld_need - nchan;
|
||||
s->ofldqsets = (i / nchan) * nchan; /* round down */
|
||||
}
|
||||
for (i = 0; i < want; ++i)
|
||||
adap->msix_info[i].vec = entries[i].vector;
|
||||
} else if (err > 0)
|
||||
dev_info(adap->pdev_dev,
|
||||
"only %d MSI-X vectors left, not using MSI-X\n", err);
|
||||
return err;
|
||||
/*
|
||||
* Distribute available vectors to the various queue groups.
|
||||
* Every group gets its minimum requirement and NIC gets top
|
||||
* priority for leftovers.
|
||||
*/
|
||||
i = want - EXTRA_VECS - ofld_need;
|
||||
if (i < s->max_ethqsets) {
|
||||
s->max_ethqsets = i;
|
||||
if (i < s->ethqsets)
|
||||
reduce_ethqs(adap, i);
|
||||
}
|
||||
if (is_offload(adap)) {
|
||||
i = want - EXTRA_VECS - s->max_ethqsets;
|
||||
i -= ofld_need - nchan;
|
||||
s->ofldqsets = (i / nchan) * nchan; /* round down */
|
||||
}
|
||||
for (i = 0; i < want; ++i)
|
||||
adap->msix_info[i].vec = entries[i].vector;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#undef EXTRA_VECS
|
||||
|
@ -2444,7 +2444,7 @@ static void reduce_ethqs(struct adapter *adapter, int n)
|
||||
*/
|
||||
static int enable_msix(struct adapter *adapter)
|
||||
{
|
||||
int i, err, want, need;
|
||||
int i, want, need, nqsets;
|
||||
struct msix_entry entries[MSIX_ENTRIES];
|
||||
struct sge *s = &adapter->sge;
|
||||
|
||||
@ -2460,26 +2460,23 @@ static int enable_msix(struct adapter *adapter)
|
||||
*/
|
||||
want = s->max_ethqsets + MSIX_EXTRAS;
|
||||
need = adapter->params.nports + MSIX_EXTRAS;
|
||||
while ((err = pci_enable_msix(adapter->pdev, entries, want)) >= need)
|
||||
want = err;
|
||||
|
||||
if (err == 0) {
|
||||
int nqsets = want - MSIX_EXTRAS;
|
||||
if (nqsets < s->max_ethqsets) {
|
||||
dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
|
||||
" for %d Queue Sets\n", nqsets);
|
||||
s->max_ethqsets = nqsets;
|
||||
if (nqsets < s->ethqsets)
|
||||
reduce_ethqs(adapter, nqsets);
|
||||
}
|
||||
for (i = 0; i < want; ++i)
|
||||
adapter->msix_info[i].vec = entries[i].vector;
|
||||
} else if (err > 0) {
|
||||
pci_disable_msix(adapter->pdev);
|
||||
dev_info(adapter->pdev_dev, "only %d MSI-X vectors left,"
|
||||
" not using MSI-X\n", err);
|
||||
want = pci_enable_msix_range(adapter->pdev, entries, need, want);
|
||||
if (want < 0)
|
||||
return want;
|
||||
|
||||
nqsets = want - MSIX_EXTRAS;
|
||||
if (nqsets < s->max_ethqsets) {
|
||||
dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
|
||||
" for %d Queue Sets\n", nqsets);
|
||||
s->max_ethqsets = nqsets;
|
||||
if (nqsets < s->ethqsets)
|
||||
reduce_ethqs(adapter, nqsets);
|
||||
}
|
||||
return err;
|
||||
for (i = 0; i < want; ++i)
|
||||
adapter->msix_info[i].vec = entries[i].vector;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct net_device_ops cxgb4vf_netdev_ops = {
|
||||
|
@ -1796,7 +1796,8 @@ static int enic_set_intr_mode(struct enic *enic)
|
||||
enic->cq_count >= n + m &&
|
||||
enic->intr_count >= n + m + 2) {
|
||||
|
||||
if (!pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {
|
||||
if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
|
||||
n + m + 2, n + m + 2) > 0) {
|
||||
|
||||
enic->rq_count = n;
|
||||
enic->wq_count = m;
|
||||
@ -1815,7 +1816,8 @@ static int enic_set_intr_mode(struct enic *enic)
|
||||
enic->wq_count >= m &&
|
||||
enic->cq_count >= 1 + m &&
|
||||
enic->intr_count >= 1 + m + 2) {
|
||||
if (!pci_enable_msix(enic->pdev, enic->msix_entry, 1 + m + 2)) {
|
||||
if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
|
||||
1 + m + 2, 1 + m + 2) > 0) {
|
||||
|
||||
enic->rq_count = 1;
|
||||
enic->wq_count = m;
|
||||
|
@ -2507,7 +2507,7 @@ static void be_msix_disable(struct be_adapter *adapter)
|
||||
|
||||
static int be_msix_enable(struct be_adapter *adapter)
|
||||
{
|
||||
int i, status, num_vec;
|
||||
int i, num_vec;
|
||||
struct device *dev = &adapter->pdev->dev;
|
||||
|
||||
/* If RoCE is supported, program the max number of NIC vectors that
|
||||
@ -2523,24 +2523,11 @@ static int be_msix_enable(struct be_adapter *adapter)
|
||||
for (i = 0; i < num_vec; i++)
|
||||
adapter->msix_entries[i].entry = i;
|
||||
|
||||
status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
|
||||
if (status == 0) {
|
||||
goto done;
|
||||
} else if (status >= MIN_MSIX_VECTORS) {
|
||||
num_vec = status;
|
||||
status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
|
||||
num_vec);
|
||||
if (!status)
|
||||
goto done;
|
||||
}
|
||||
num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
|
||||
MIN_MSIX_VECTORS, num_vec);
|
||||
if (num_vec < 0)
|
||||
goto fail;
|
||||
|
||||
dev_warn(dev, "MSIx enable failed\n");
|
||||
|
||||
/* INTx is not supported in VFs, so fail probe if enable_msix fails */
|
||||
if (!be_physfn(adapter))
|
||||
return status;
|
||||
return 0;
|
||||
done:
|
||||
if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
|
||||
adapter->num_msix_roce_vec = num_vec / 2;
|
||||
dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
|
||||
@ -2552,6 +2539,14 @@ done:
|
||||
dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
|
||||
adapter->num_msix_vec);
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
dev_warn(dev, "MSIx enable failed\n");
|
||||
|
||||
/* INTx is not supported in VFs, so fail probe if enable_msix fails */
|
||||
if (!be_physfn(adapter))
|
||||
return num_vec;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int be_msix_vec_get(struct be_adapter *adapter,
|
||||
|
@ -2038,13 +2038,16 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
|
||||
msix_entry),
|
||||
GFP_KERNEL);
|
||||
if (adapter->msix_entries) {
|
||||
struct e1000_adapter *a = adapter;
|
||||
|
||||
for (i = 0; i < adapter->num_vectors; i++)
|
||||
adapter->msix_entries[i].entry = i;
|
||||
|
||||
err = pci_enable_msix(adapter->pdev,
|
||||
adapter->msix_entries,
|
||||
adapter->num_vectors);
|
||||
if (err == 0)
|
||||
err = pci_enable_msix_range(a->pdev,
|
||||
a->msix_entries,
|
||||
a->num_vectors,
|
||||
a->num_vectors);
|
||||
if (err > 0)
|
||||
return;
|
||||
}
|
||||
/* MSI-X failed, so fall through and try MSI */
|
||||
|
@ -5856,37 +5856,16 @@ err_out:
|
||||
**/
|
||||
static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
pf->num_msix_entries = 0;
|
||||
while (vectors >= I40E_MIN_MSIX) {
|
||||
err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors);
|
||||
if (err == 0) {
|
||||
/* good to go */
|
||||
pf->num_msix_entries = vectors;
|
||||
break;
|
||||
} else if (err < 0) {
|
||||
/* total failure */
|
||||
dev_info(&pf->pdev->dev,
|
||||
"MSI-X vector reservation failed: %d\n", err);
|
||||
vectors = 0;
|
||||
break;
|
||||
} else {
|
||||
/* err > 0 is the hint for retry */
|
||||
dev_info(&pf->pdev->dev,
|
||||
"MSI-X vectors wanted %d, retrying with %d\n",
|
||||
vectors, err);
|
||||
vectors = err;
|
||||
}
|
||||
}
|
||||
|
||||
if (vectors > 0 && vectors < I40E_MIN_MSIX) {
|
||||
vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
|
||||
I40E_MIN_MSIX, vectors);
|
||||
if (vectors < 0) {
|
||||
dev_info(&pf->pdev->dev,
|
||||
"Couldn't get enough vectors, only %d available\n",
|
||||
vectors);
|
||||
"MSI-X vector reservation failed: %d\n", vectors);
|
||||
vectors = 0;
|
||||
}
|
||||
|
||||
pf->num_msix_entries = vectors;
|
||||
|
||||
return vectors;
|
||||
}
|
||||
|
||||
|
@ -1111,10 +1111,11 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
|
||||
for (i = 0; i < numvecs; i++)
|
||||
adapter->msix_entries[i].entry = i;
|
||||
|
||||
err = pci_enable_msix(adapter->pdev,
|
||||
adapter->msix_entries,
|
||||
numvecs);
|
||||
if (err == 0)
|
||||
err = pci_enable_msix_range(adapter->pdev,
|
||||
adapter->msix_entries,
|
||||
numvecs,
|
||||
numvecs);
|
||||
if (err > 0)
|
||||
return;
|
||||
|
||||
igb_reset_interrupt_capability(adapter);
|
||||
|
@ -1043,11 +1043,11 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
|
||||
for (i = 0; i < 3; i++)
|
||||
adapter->msix_entries[i].entry = i;
|
||||
|
||||
err = pci_enable_msix(adapter->pdev,
|
||||
adapter->msix_entries, 3);
|
||||
err = pci_enable_msix_range(adapter->pdev,
|
||||
adapter->msix_entries, 3, 3);
|
||||
}
|
||||
|
||||
if (err) {
|
||||
if (err < 0) {
|
||||
/* MSI-X failed */
|
||||
dev_err(&adapter->pdev->dev,
|
||||
"Failed to initialize MSI-X interrupts.\n");
|
||||
|
@ -698,7 +698,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
|
||||
static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
|
||||
int vectors)
|
||||
{
|
||||
int err, vector_threshold;
|
||||
int vector_threshold;
|
||||
|
||||
/* We'll want at least 2 (vector_threshold):
|
||||
* 1) TxQ[0] + RxQ[0] handler
|
||||
@ -712,18 +712,10 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
|
||||
* Right now, we simply care about how many we'll get; we'll
|
||||
* set them up later while requesting irq's.
|
||||
*/
|
||||
while (vectors >= vector_threshold) {
|
||||
err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
|
||||
vectors);
|
||||
if (!err) /* Success in acquiring all requested vectors. */
|
||||
break;
|
||||
else if (err < 0)
|
||||
vectors = 0; /* Nasty failure, quit now */
|
||||
else /* err == number of vectors we should try again with */
|
||||
vectors = err;
|
||||
}
|
||||
vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
|
||||
vector_threshold, vectors);
|
||||
|
||||
if (vectors < vector_threshold) {
|
||||
if (vectors < 0) {
|
||||
/* Can't allocate enough MSI-X interrupts? Oh well.
|
||||
* This just means we'll go with either a single MSI
|
||||
* vector or fall back to legacy interrupts.
|
||||
|
@ -1817,7 +1817,6 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
|
||||
static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
|
||||
int vectors)
|
||||
{
|
||||
int err = 0;
|
||||
int vector_threshold;
|
||||
|
||||
/* We'll want at least 2 (vector_threshold):
|
||||
@ -1831,33 +1830,24 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
|
||||
* Right now, we simply care about how many we'll get; we'll
|
||||
* set them up later while requesting irq's.
|
||||
*/
|
||||
while (vectors >= vector_threshold) {
|
||||
err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
|
||||
vectors);
|
||||
if (!err || err < 0) /* Success or a nasty failure. */
|
||||
break;
|
||||
else /* err == number of vectors we should try again with */
|
||||
vectors = err;
|
||||
}
|
||||
vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
|
||||
vector_threshold, vectors);
|
||||
|
||||
if (vectors < vector_threshold)
|
||||
err = -ENOMEM;
|
||||
|
||||
if (err) {
|
||||
if (vectors < 0) {
|
||||
dev_err(&adapter->pdev->dev,
|
||||
"Unable to allocate MSI-X interrupts\n");
|
||||
kfree(adapter->msix_entries);
|
||||
adapter->msix_entries = NULL;
|
||||
} else {
|
||||
/*
|
||||
* Adjust for only the vectors we'll use, which is minimum
|
||||
* of max_msix_q_vectors + NON_Q_VECTORS, or the number of
|
||||
* vectors we were allocated.
|
||||
*/
|
||||
adapter->num_msix_vectors = vectors;
|
||||
return vectors;
|
||||
}
|
||||
|
||||
return err;
|
||||
/* Adjust for only the vectors we'll use, which is minimum
|
||||
* of max_msix_q_vectors + NON_Q_VECTORS, or the number of
|
||||
* vectors we were allocated.
|
||||
*/
|
||||
adapter->num_msix_vectors = vectors;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1976,7 +1976,6 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
|
||||
int nreq = min_t(int, dev->caps.num_ports *
|
||||
min_t(int, netif_get_num_default_rss_queues() + 1,
|
||||
MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
|
||||
int err;
|
||||
int i;
|
||||
|
||||
if (msi_x) {
|
||||
@ -1990,23 +1989,13 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
|
||||
for (i = 0; i < nreq; ++i)
|
||||
entries[i].entry = i;
|
||||
|
||||
retry:
|
||||
err = pci_enable_msix(dev->pdev, entries, nreq);
|
||||
if (err) {
|
||||
/* Try again if at least 2 vectors are available */
|
||||
if (err > 1) {
|
||||
mlx4_info(dev, "Requested %d vectors, "
|
||||
"but only %d MSI-X vectors available, "
|
||||
"trying again\n", nreq, err);
|
||||
nreq = err;
|
||||
goto retry;
|
||||
}
|
||||
nreq = pci_enable_msix_range(dev->pdev, entries, 2, nreq);
|
||||
|
||||
if (nreq < 0) {
|
||||
kfree(entries);
|
||||
goto no_msi;
|
||||
}
|
||||
|
||||
if (nreq <
|
||||
MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
|
||||
} else if (nreq < MSIX_LEGACY_SZ +
|
||||
dev->caps.num_ports * MIN_MSIX_P_PORT) {
|
||||
/*Working in legacy mode , all EQ's shared*/
|
||||
dev->caps.comp_pool = 0;
|
||||
dev->caps.num_comp_vectors = nreq - 1;
|
||||
|
@ -116,7 +116,6 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
|
||||
struct mlx5_eq_table *table = &dev->priv.eq_table;
|
||||
int num_eqs = 1 << dev->caps.log_max_eq;
|
||||
int nvec;
|
||||
int err;
|
||||
int i;
|
||||
|
||||
nvec = dev->caps.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE;
|
||||
@ -131,17 +130,12 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
|
||||
for (i = 0; i < nvec; i++)
|
||||
table->msix_arr[i].entry = i;
|
||||
|
||||
retry:
|
||||
table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
|
||||
err = pci_enable_msix(dev->pdev, table->msix_arr, nvec);
|
||||
if (err <= 0) {
|
||||
return err;
|
||||
} else if (err > 2) {
|
||||
nvec = err;
|
||||
goto retry;
|
||||
}
|
||||
nvec = pci_enable_msix_range(dev->pdev, table->msix_arr,
|
||||
MLX5_EQ_VEC_COMP_BASE, nvec);
|
||||
if (nvec < 0)
|
||||
return nvec;
|
||||
|
||||
mlx5_core_dbg(dev, "received %d MSI vectors out of %d requested\n", err, nvec);
|
||||
table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -2329,16 +2329,14 @@ static int myri10ge_request_irq(struct myri10ge_priv *mgp)
|
||||
status = 0;
|
||||
if (myri10ge_msi) {
|
||||
if (mgp->num_slices > 1) {
|
||||
status =
|
||||
pci_enable_msix(pdev, mgp->msix_vectors,
|
||||
mgp->num_slices);
|
||||
if (status == 0) {
|
||||
mgp->msix_enabled = 1;
|
||||
} else {
|
||||
status = pci_enable_msix_range(pdev, mgp->msix_vectors,
|
||||
mgp->num_slices, mgp->num_slices);
|
||||
if (status < 0) {
|
||||
dev_err(&pdev->dev,
|
||||
"Error %d setting up MSI-X\n", status);
|
||||
return status;
|
||||
}
|
||||
mgp->msix_enabled = 1;
|
||||
}
|
||||
if (mgp->msix_enabled == 0) {
|
||||
status = pci_enable_msi(pdev);
|
||||
@ -3895,32 +3893,34 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
|
||||
mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors),
|
||||
GFP_KERNEL);
|
||||
if (mgp->msix_vectors == NULL)
|
||||
goto disable_msix;
|
||||
goto no_msix;
|
||||
for (i = 0; i < mgp->num_slices; i++) {
|
||||
mgp->msix_vectors[i].entry = i;
|
||||
}
|
||||
|
||||
while (mgp->num_slices > 1) {
|
||||
/* make sure it is a power of two */
|
||||
while (!is_power_of_2(mgp->num_slices))
|
||||
mgp->num_slices--;
|
||||
mgp->num_slices = rounddown_pow_of_two(mgp->num_slices);
|
||||
if (mgp->num_slices == 1)
|
||||
goto disable_msix;
|
||||
status = pci_enable_msix(pdev, mgp->msix_vectors,
|
||||
mgp->num_slices);
|
||||
if (status == 0) {
|
||||
pci_disable_msix(pdev);
|
||||
goto no_msix;
|
||||
status = pci_enable_msix_range(pdev,
|
||||
mgp->msix_vectors,
|
||||
mgp->num_slices,
|
||||
mgp->num_slices);
|
||||
if (status < 0)
|
||||
goto no_msix;
|
||||
|
||||
pci_disable_msix(pdev);
|
||||
|
||||
if (status == mgp->num_slices) {
|
||||
if (old_allocated)
|
||||
kfree(old_fw);
|
||||
return;
|
||||
}
|
||||
if (status > 0)
|
||||
} else {
|
||||
mgp->num_slices = status;
|
||||
else
|
||||
goto disable_msix;
|
||||
}
|
||||
}
|
||||
|
||||
disable_msix:
|
||||
no_msix:
|
||||
if (mgp->msix_vectors != NULL) {
|
||||
kfree(mgp->msix_vectors);
|
||||
mgp->msix_vectors = NULL;
|
||||
|
@ -3792,9 +3792,10 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
|
||||
writeq(rx_mat, &bar0->rx_mat);
|
||||
readq(&bar0->rx_mat);
|
||||
|
||||
ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
|
||||
ret = pci_enable_msix_range(nic->pdev, nic->entries,
|
||||
nic->num_entries, nic->num_entries);
|
||||
/* We fail init if error or we get less vectors than min required */
|
||||
if (ret) {
|
||||
if (ret < 0) {
|
||||
DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
|
||||
kfree(nic->entries);
|
||||
swstats->mem_freed += nic->num_entries *
|
||||
|
@ -2349,12 +2349,18 @@ start:
|
||||
vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
|
||||
vdev->vxge_entries[j].in_use = 0;
|
||||
|
||||
ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
|
||||
if (ret > 0) {
|
||||
ret = pci_enable_msix_range(vdev->pdev,
|
||||
vdev->entries, 3, vdev->intr_cnt);
|
||||
if (ret < 0) {
|
||||
ret = -ENODEV;
|
||||
goto enable_msix_failed;
|
||||
} else if (ret < vdev->intr_cnt) {
|
||||
pci_disable_msix(vdev->pdev);
|
||||
|
||||
vxge_debug_init(VXGE_ERR,
|
||||
"%s: MSI-X enable failed for %d vectors, ret: %d",
|
||||
VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
|
||||
if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) {
|
||||
if (max_config_vpath != VXGE_USE_DEFAULT) {
|
||||
ret = -ENODEV;
|
||||
goto enable_msix_failed;
|
||||
}
|
||||
@ -2368,9 +2374,6 @@ start:
|
||||
vxge_close_vpaths(vdev, temp);
|
||||
vdev->no_of_vpath = temp;
|
||||
goto start;
|
||||
} else if (ret < 0) {
|
||||
ret = -ENODEV;
|
||||
goto enable_msix_failed;
|
||||
}
|
||||
return 0;
|
||||
|
||||
|
@ -3930,7 +3930,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
|
||||
{
|
||||
struct fe_priv *np = get_nvpriv(dev);
|
||||
u8 __iomem *base = get_hwbase(dev);
|
||||
int ret = 1;
|
||||
int ret;
|
||||
int i;
|
||||
irqreturn_t (*handler)(int foo, void *data);
|
||||
|
||||
@ -3946,14 +3946,18 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
|
||||
if (np->msi_flags & NV_MSI_X_CAPABLE) {
|
||||
for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
|
||||
np->msi_x_entry[i].entry = i;
|
||||
ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK));
|
||||
if (ret == 0) {
|
||||
ret = pci_enable_msix_range(np->pci_dev,
|
||||
np->msi_x_entry,
|
||||
np->msi_flags & NV_MSI_X_VECTORS_MASK,
|
||||
np->msi_flags & NV_MSI_X_VECTORS_MASK);
|
||||
if (ret > 0) {
|
||||
np->msi_flags |= NV_MSI_X_ENABLED;
|
||||
if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
|
||||
/* Request irq for rx handling */
|
||||
sprintf(np->name_rx, "%s-rx", dev->name);
|
||||
if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
|
||||
nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
|
||||
ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
|
||||
nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev);
|
||||
if (ret) {
|
||||
netdev_info(dev,
|
||||
"request_irq failed for rx %d\n",
|
||||
ret);
|
||||
@ -3963,8 +3967,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
|
||||
}
|
||||
/* Request irq for tx handling */
|
||||
sprintf(np->name_tx, "%s-tx", dev->name);
|
||||
if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
|
||||
nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
|
||||
ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
|
||||
nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev);
|
||||
if (ret) {
|
||||
netdev_info(dev,
|
||||
"request_irq failed for tx %d\n",
|
||||
ret);
|
||||
@ -3974,8 +3979,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
|
||||
}
|
||||
/* Request irq for link and timer handling */
|
||||
sprintf(np->name_other, "%s-other", dev->name);
|
||||
if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
|
||||
nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
|
||||
ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
|
||||
nv_nic_irq_other, IRQF_SHARED, np->name_other, dev);
|
||||
if (ret) {
|
||||
netdev_info(dev,
|
||||
"request_irq failed for link %d\n",
|
||||
ret);
|
||||
@ -3991,7 +3997,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
|
||||
set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
|
||||
} else {
|
||||
/* Request irq for all interrupts */
|
||||
if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
|
||||
ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector,
|
||||
handler, IRQF_SHARED, dev->name, dev);
|
||||
if (ret) {
|
||||
netdev_info(dev,
|
||||
"request_irq failed %d\n",
|
||||
ret);
|
||||
@ -4005,13 +4013,15 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
|
||||
writel(0, base + NvRegMSIXMap1);
|
||||
}
|
||||
netdev_info(dev, "MSI-X enabled\n");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
|
||||
if (np->msi_flags & NV_MSI_CAPABLE) {
|
||||
ret = pci_enable_msi(np->pci_dev);
|
||||
if (ret == 0) {
|
||||
np->msi_flags |= NV_MSI_ENABLED;
|
||||
if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
|
||||
ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev);
|
||||
if (ret) {
|
||||
netdev_info(dev, "request_irq failed %d\n",
|
||||
ret);
|
||||
pci_disable_msi(np->pci_dev);
|
||||
@ -4025,13 +4035,12 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
|
||||
/* enable msi vector 0 */
|
||||
writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
|
||||
netdev_info(dev, "MSI enabled\n");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
if (ret != 0) {
|
||||
if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
|
||||
goto out_err;
|
||||
|
||||
}
|
||||
if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
|
||||
goto out_err;
|
||||
|
||||
return 0;
|
||||
out_free_tx:
|
||||
|
@ -643,8 +643,9 @@ static int netxen_setup_msi_interrupts(struct netxen_adapter *adapter,
|
||||
|
||||
if (adapter->msix_supported) {
|
||||
netxen_init_msix_entries(adapter, num_msix);
|
||||
err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
|
||||
if (err == 0) {
|
||||
err = pci_enable_msix_range(pdev, adapter->msix_entries,
|
||||
num_msix, num_msix);
|
||||
if (err > 0) {
|
||||
adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
|
||||
netxen_set_msix_bit(pdev, 1);
|
||||
|
||||
|
@ -684,7 +684,7 @@ restore:
|
||||
int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
|
||||
{
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
int err = -1, vector;
|
||||
int err, vector;
|
||||
|
||||
if (!adapter->msix_entries) {
|
||||
adapter->msix_entries = kcalloc(num_msix,
|
||||
@ -701,13 +701,17 @@ enable_msix:
|
||||
for (vector = 0; vector < num_msix; vector++)
|
||||
adapter->msix_entries[vector].entry = vector;
|
||||
|
||||
err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
|
||||
if (err == 0) {
|
||||
err = pci_enable_msix_range(pdev,
|
||||
adapter->msix_entries, 1, num_msix);
|
||||
|
||||
if (err == num_msix) {
|
||||
adapter->flags |= QLCNIC_MSIX_ENABLED;
|
||||
adapter->ahw->num_msix = num_msix;
|
||||
dev_info(&pdev->dev, "using msi-x interrupts\n");
|
||||
return err;
|
||||
return 0;
|
||||
} else if (err > 0) {
|
||||
pci_disable_msix(pdev);
|
||||
|
||||
dev_info(&pdev->dev,
|
||||
"Unable to allocate %d MSI-X vectors, Available vectors %d\n",
|
||||
num_msix, err);
|
||||
@ -715,12 +719,12 @@ enable_msix:
|
||||
if (qlcnic_82xx_check(adapter)) {
|
||||
num_msix = rounddown_pow_of_two(err);
|
||||
if (err < QLCNIC_82XX_MINIMUM_VECTOR)
|
||||
return -EIO;
|
||||
return -ENOSPC;
|
||||
} else {
|
||||
num_msix = rounddown_pow_of_two(err - 1);
|
||||
num_msix += 1;
|
||||
if (err < QLCNIC_83XX_MINIMUM_VECTOR)
|
||||
return -EIO;
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (qlcnic_82xx_check(adapter) &&
|
||||
@ -747,7 +751,7 @@ enable_msix:
|
||||
}
|
||||
}
|
||||
|
||||
return err;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static int qlcnic_82xx_calculate_msix_vector(struct qlcnic_adapter *adapter)
|
||||
|
@ -3331,24 +3331,16 @@ static void ql_enable_msix(struct ql_adapter *qdev)
|
||||
for (i = 0; i < qdev->intr_count; i++)
|
||||
qdev->msi_x_entry[i].entry = i;
|
||||
|
||||
/* Loop to get our vectors. We start with
|
||||
* what we want and settle for what we get.
|
||||
*/
|
||||
do {
|
||||
err = pci_enable_msix(qdev->pdev,
|
||||
qdev->msi_x_entry, qdev->intr_count);
|
||||
if (err > 0)
|
||||
qdev->intr_count = err;
|
||||
} while (err > 0);
|
||||
|
||||
err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
|
||||
1, qdev->intr_count);
|
||||
if (err < 0) {
|
||||
kfree(qdev->msi_x_entry);
|
||||
qdev->msi_x_entry = NULL;
|
||||
netif_warn(qdev, ifup, qdev->ndev,
|
||||
"MSI-X Enable failed, trying MSI.\n");
|
||||
qdev->intr_count = 1;
|
||||
qlge_irq_type = MSI_IRQ;
|
||||
} else if (err == 0) {
|
||||
} else {
|
||||
qdev->intr_count = err;
|
||||
set_bit(QL_MSIX_ENABLED, &qdev->flags);
|
||||
netif_info(qdev, ifup, qdev->ndev,
|
||||
"MSI-X Enabled, got %d vectors.\n",
|
||||
|
@ -1344,20 +1344,23 @@ static int efx_probe_interrupts(struct efx_nic *efx)
|
||||
|
||||
for (i = 0; i < n_channels; i++)
|
||||
xentries[i].entry = i;
|
||||
rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
|
||||
if (rc > 0) {
|
||||
rc = pci_enable_msix_range(efx->pci_dev,
|
||||
xentries, 1, n_channels);
|
||||
if (rc < 0) {
|
||||
/* Fall back to single channel MSI */
|
||||
efx->interrupt_mode = EFX_INT_MODE_MSI;
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"could not enable MSI-X\n");
|
||||
} else if (rc < n_channels) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"WARNING: Insufficient MSI-X vectors"
|
||||
" available (%d < %u).\n", rc, n_channels);
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"WARNING: Performance may be reduced.\n");
|
||||
EFX_BUG_ON_PARANOID(rc >= n_channels);
|
||||
n_channels = rc;
|
||||
rc = pci_enable_msix(efx->pci_dev, xentries,
|
||||
n_channels);
|
||||
}
|
||||
|
||||
if (rc == 0) {
|
||||
if (rc > 0) {
|
||||
efx->n_channels = n_channels;
|
||||
if (n_channels > extra_channels)
|
||||
n_channels -= extra_channels;
|
||||
@ -1373,11 +1376,6 @@ static int efx_probe_interrupts(struct efx_nic *efx)
|
||||
for (i = 0; i < efx->n_channels; i++)
|
||||
efx_get_channel(efx, i)->irq =
|
||||
xentries[i].vector;
|
||||
} else {
|
||||
/* Fall back to single channel MSI */
|
||||
efx->interrupt_mode = EFX_INT_MODE_MSI;
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"could not enable MSI-X\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -9041,7 +9041,7 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
|
||||
struct msix_entry msi_vec[NIU_NUM_LDG];
|
||||
struct niu_parent *parent = np->parent;
|
||||
struct pci_dev *pdev = np->pdev;
|
||||
int i, num_irqs, err;
|
||||
int i, num_irqs;
|
||||
u8 first_ldg;
|
||||
|
||||
first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
|
||||
@ -9053,21 +9053,16 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
|
||||
(np->port == 0 ? 3 : 1));
|
||||
BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
|
||||
|
||||
retry:
|
||||
for (i = 0; i < num_irqs; i++) {
|
||||
msi_vec[i].vector = 0;
|
||||
msi_vec[i].entry = i;
|
||||
}
|
||||
|
||||
err = pci_enable_msix(pdev, msi_vec, num_irqs);
|
||||
if (err < 0) {
|
||||
num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs);
|
||||
if (num_irqs < 0) {
|
||||
np->flags &= ~NIU_FLAGS_MSIX;
|
||||
return;
|
||||
}
|
||||
if (err > 0) {
|
||||
num_irqs = err;
|
||||
goto retry;
|
||||
}
|
||||
|
||||
np->flags |= NIU_FLAGS_MSIX;
|
||||
for (i = 0; i < num_irqs; i++)
|
||||
|
@ -2729,47 +2729,35 @@ vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
|
||||
/*
|
||||
* Enable MSIx vectors.
|
||||
* Returns :
|
||||
* 0 on successful enabling of required vectors,
|
||||
* VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
|
||||
* could be enabled.
|
||||
* number of vectors which can be enabled otherwise (this number is smaller
|
||||
* were enabled.
|
||||
* number of vectors which were enabled otherwise (this number is greater
|
||||
* than VMXNET3_LINUX_MIN_MSIX_VECT)
|
||||
*/
|
||||
|
||||
static int
|
||||
vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
|
||||
int vectors)
|
||||
vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
|
||||
{
|
||||
int err = 0, vector_threshold;
|
||||
vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
|
||||
int ret = pci_enable_msix_range(adapter->pdev,
|
||||
adapter->intr.msix_entries, nvec, nvec);
|
||||
|
||||
while (vectors >= vector_threshold) {
|
||||
err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
|
||||
vectors);
|
||||
if (!err) {
|
||||
adapter->intr.num_intrs = vectors;
|
||||
return 0;
|
||||
} else if (err < 0) {
|
||||
dev_err(&adapter->netdev->dev,
|
||||
"Failed to enable MSI-X, error: %d\n", err);
|
||||
vectors = 0;
|
||||
} else if (err < vector_threshold) {
|
||||
break;
|
||||
} else {
|
||||
/* If fails to enable required number of MSI-x vectors
|
||||
* try enabling minimum number of vectors required.
|
||||
*/
|
||||
dev_err(&adapter->netdev->dev,
|
||||
"Failed to enable %d MSI-X, trying %d instead\n",
|
||||
vectors, vector_threshold);
|
||||
vectors = vector_threshold;
|
||||
}
|
||||
if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
|
||||
dev_err(&adapter->netdev->dev,
|
||||
"Failed to enable %d MSI-X, trying %d\n",
|
||||
nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
|
||||
|
||||
ret = pci_enable_msix_range(adapter->pdev,
|
||||
adapter->intr.msix_entries,
|
||||
VMXNET3_LINUX_MIN_MSIX_VECT,
|
||||
VMXNET3_LINUX_MIN_MSIX_VECT);
|
||||
}
|
||||
|
||||
dev_info(&adapter->pdev->dev,
|
||||
"Number of MSI-X interrupts which can be allocated "
|
||||
"is lower than min threshold required.\n");
|
||||
return err;
|
||||
if (ret < 0) {
|
||||
dev_err(&adapter->netdev->dev,
|
||||
"Failed to enable MSI-X, error: %d\n", ret);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
@ -2796,56 +2784,50 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
|
||||
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
if (adapter->intr.type == VMXNET3_IT_MSIX) {
|
||||
int vector, err = 0;
|
||||
int i, nvec;
|
||||
|
||||
adapter->intr.num_intrs = (adapter->share_intr ==
|
||||
VMXNET3_INTR_TXSHARE) ? 1 :
|
||||
adapter->num_tx_queues;
|
||||
adapter->intr.num_intrs += (adapter->share_intr ==
|
||||
VMXNET3_INTR_BUDDYSHARE) ? 0 :
|
||||
adapter->num_rx_queues;
|
||||
adapter->intr.num_intrs += 1; /* for link event */
|
||||
nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
|
||||
1 : adapter->num_tx_queues;
|
||||
nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
|
||||
0 : adapter->num_rx_queues;
|
||||
nvec += 1; /* for link event */
|
||||
nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
|
||||
nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
|
||||
|
||||
adapter->intr.num_intrs = (adapter->intr.num_intrs >
|
||||
VMXNET3_LINUX_MIN_MSIX_VECT
|
||||
? adapter->intr.num_intrs :
|
||||
VMXNET3_LINUX_MIN_MSIX_VECT);
|
||||
for (i = 0; i < nvec; i++)
|
||||
adapter->intr.msix_entries[i].entry = i;
|
||||
|
||||
for (vector = 0; vector < adapter->intr.num_intrs; vector++)
|
||||
adapter->intr.msix_entries[vector].entry = vector;
|
||||
nvec = vmxnet3_acquire_msix_vectors(adapter, nvec);
|
||||
if (nvec < 0)
|
||||
goto msix_err;
|
||||
|
||||
err = vmxnet3_acquire_msix_vectors(adapter,
|
||||
adapter->intr.num_intrs);
|
||||
/* If we cannot allocate one MSIx vector per queue
|
||||
* then limit the number of rx queues to 1
|
||||
*/
|
||||
if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
|
||||
if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) {
|
||||
if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
|
||||
|| adapter->num_rx_queues != 1) {
|
||||
adapter->share_intr = VMXNET3_INTR_TXSHARE;
|
||||
netdev_err(adapter->netdev,
|
||||
"Number of rx queues : 1\n");
|
||||
adapter->num_rx_queues = 1;
|
||||
adapter->intr.num_intrs =
|
||||
VMXNET3_LINUX_MIN_MSIX_VECT;
|
||||
}
|
||||
return;
|
||||
}
|
||||
if (!err)
|
||||
return;
|
||||
|
||||
adapter->intr.num_intrs = nvec;
|
||||
return;
|
||||
|
||||
msix_err:
|
||||
/* If we cannot allocate MSIx vectors use only one rx queue */
|
||||
dev_info(&adapter->pdev->dev,
|
||||
"Failed to enable MSI-X, error %d. "
|
||||
"Limiting #rx queues to 1, try MSI.\n", err);
|
||||
"Limiting #rx queues to 1, try MSI.\n", nvec);
|
||||
|
||||
adapter->intr.type = VMXNET3_IT_MSI;
|
||||
}
|
||||
|
||||
if (adapter->intr.type == VMXNET3_IT_MSI) {
|
||||
int err;
|
||||
err = pci_enable_msi(adapter->pdev);
|
||||
if (!err) {
|
||||
if (!pci_enable_msi(adapter->pdev)) {
|
||||
adapter->num_rx_queues = 1;
|
||||
adapter->intr.num_intrs = 1;
|
||||
return;
|
||||
|
@ -41,30 +41,28 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
|
||||
switch (use_msi) {
|
||||
case 3:
|
||||
case 1:
|
||||
wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
|
||||
break;
|
||||
case 0:
|
||||
wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
|
||||
break;
|
||||
default:
|
||||
wil_err(wil, "Invalid use_msi=%d, default to 1\n",
|
||||
use_msi);
|
||||
wil_err(wil, "Invalid use_msi=%d, default to 1\n", use_msi);
|
||||
use_msi = 1;
|
||||
}
|
||||
wil->n_msi = use_msi;
|
||||
if (wil->n_msi) {
|
||||
wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
|
||||
rc = pci_enable_msi_block(pdev, wil->n_msi);
|
||||
if (rc && (wil->n_msi == 3)) {
|
||||
wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
|
||||
wil->n_msi = 1;
|
||||
rc = pci_enable_msi_block(pdev, wil->n_msi);
|
||||
}
|
||||
if (rc) {
|
||||
wil_err(wil, "pci_enable_msi failed, use INTx\n");
|
||||
wil->n_msi = 0;
|
||||
}
|
||||
} else {
|
||||
wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
|
||||
|
||||
if (use_msi == 3 && pci_enable_msi_range(pdev, 3, 3) < 0) {
|
||||
wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
|
||||
use_msi = 1;
|
||||
}
|
||||
|
||||
if (use_msi == 1 && pci_enable_msi(pdev)) {
|
||||
wil_err(wil, "pci_enable_msi failed, use INTx\n");
|
||||
use_msi = 0;
|
||||
}
|
||||
|
||||
wil->n_msi = use_msi;
|
||||
|
||||
rc = wil6210_init_irq(wil, pdev->irq);
|
||||
if (rc)
|
||||
goto stop_master;
|
||||
|
Loading…
Reference in New Issue
Block a user