Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

This commit is contained in:
David S. Miller 2012-03-19 17:24:27 -04:00
commit f24fd89ab5
6 changed files with 1295 additions and 1115 deletions

View File

@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
ixgbe_mbx.o ixgbe_x540.o
ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o

View File

@ -101,8 +101,6 @@
#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
#define IXGBE_MAX_RSC_INT_RATE 162760
#define IXGBE_MAX_VF_MC_ENTRIES 30
#define IXGBE_MAX_VF_FUNCTIONS 64
#define IXGBE_MAX_VFTA_ENTRIES 128
@ -152,6 +150,7 @@ struct ixgbe_tx_buffer {
struct sk_buff *skb;
unsigned int bytecount;
unsigned short gso_segs;
__be16 protocol;
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags;
@ -207,15 +206,18 @@ enum ixgbe_ring_state_t {
clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
struct ixgbe_ring {
struct ixgbe_ring *next; /* pointer to next ring in q_vector */
void *desc; /* descriptor ring memory */
struct device *dev; /* device for DMA mapping */
struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
struct net_device *netdev; /* netdev ring belongs to */
struct device *dev; /* device for DMA mapping */
void *desc; /* descriptor ring memory */
union {
struct ixgbe_tx_buffer *tx_buffer_info;
struct ixgbe_rx_buffer *rx_buffer_info;
};
unsigned long state;
u8 __iomem *tail;
dma_addr_t dma; /* phys. address of descriptor ring */
unsigned int size; /* length in bytes */
u16 count; /* amount of descriptors */
@ -225,17 +227,17 @@ struct ixgbe_ring {
* associated with this ring, which is
* different for DCB and RSS modes
*/
u16 next_to_use;
u16 next_to_clean;
union {
u16 next_to_alloc;
struct {
u8 atr_sample_rate;
u8 atr_count;
};
u16 next_to_alloc;
};
u16 next_to_use;
u16 next_to_clean;
u8 dcb_tc;
struct ixgbe_queue_stats stats;
struct u64_stats_sync syncp;
@ -243,9 +245,6 @@ struct ixgbe_ring {
struct ixgbe_tx_queue_stats tx_stats;
struct ixgbe_rx_queue_stats rx_stats;
};
unsigned int size; /* length in bytes */
dma_addr_t dma; /* phys. address of descriptor ring */
struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */
} ____cacheline_internodealigned_in_smp;
enum ixgbe_ring_f_enum {
@ -437,7 +436,8 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5)
#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6)
#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
/* Tx fast path data */
int num_tx_queues;
@ -581,7 +581,9 @@ extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
extern char ixgbe_driver_name[];
extern const char ixgbe_driver_version[];
#ifdef IXGBE_FCOE
extern char ixgbe_default_device_descr[];
#endif /* IXGBE_FCOE */
extern void ixgbe_up(struct ixgbe_adapter *adapter);
extern void ixgbe_down(struct ixgbe_adapter *adapter);
@ -606,6 +608,7 @@ extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
struct ixgbe_tx_buffer *);
extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
extern int ixgbe_poll(struct napi_struct *napi, int budget);
extern int ethtool_ioctl(struct ifreq *ifr);
extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
@ -625,14 +628,16 @@ extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
union ixgbe_atr_input *mask);
extern void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
#endif
extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
extern void ixgbe_do_reset(struct net_device *netdev);
#ifdef IXGBE_FCOE
extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
u32 tx_flags, u8 *hdr_len);
u8 *hdr_len);
extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc,

View File

@ -2137,31 +2137,29 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
* this function must be called before setting the new value of
* rx_itr_setting
*/
static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter,
struct ethtool_coalesce *ec)
static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
/* nothing to do if LRO or RSC are not enabled */
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
!(netdev->features & NETIF_F_LRO))
return false;
/* if interrupt rate is too high then disable RSC */
if (ec->rx_coalesce_usecs != 1 &&
ec->rx_coalesce_usecs <= (IXGBE_MIN_RSC_ITR >> 2)) {
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
e_info(probe, "rx-usecs set too low, disabling RSC\n");
adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
return true;
}
} else {
/* check the feature flag value and enable RSC if necessary */
if ((netdev->features & NETIF_F_LRO) &&
!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
e_info(probe, "rx-usecs set to %d, re-enabling RSC\n",
ec->rx_coalesce_usecs);
if (adapter->rx_itr_setting == 1 ||
adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
e_info(probe, "rx-usecs value high enough "
"to re-enable RSC\n");
return true;
}
/* if interrupt rate is too high then disable RSC */
} else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
e_info(probe, "rx-usecs set too low, disabling RSC\n");
return true;
}
return false;
}
@ -2185,9 +2183,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
(ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
return -EINVAL;
/* check the old value and enable RSC if necessary */
need_reset = ixgbe_update_rsc(adapter, ec);
if (ec->rx_coalesce_usecs > 1)
adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
else
@ -2208,6 +2203,9 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
else
tx_itr_param = adapter->tx_itr_setting;
/* check the old value and enable RSC if necessary */
need_reset = ixgbe_update_rsc(adapter);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
else
@ -2328,6 +2326,48 @@ static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
return 0;
}
static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
struct ethtool_rxnfc *cmd)
{
cmd->data = 0;
/* if RSS is disabled then report no hashing */
if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
return 0;
/* Report default options for RSS on ixgbe */
switch (cmd->flow_type) {
case TCP_V4_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
case UDP_V4_FLOW:
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
case ESP_V4_FLOW:
case IPV4_FLOW:
cmd->data |= RXH_IP_SRC | RXH_IP_DST;
break;
case TCP_V6_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
case UDP_V6_FLOW:
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
case ESP_V6_FLOW:
case IPV6_FLOW:
cmd->data |= RXH_IP_SRC | RXH_IP_DST;
break;
default:
return -EINVAL;
}
return 0;
}
static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@ -2349,6 +2389,9 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLALL:
ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
break;
case ETHTOOL_GRXFH:
ret = ixgbe_get_rss_hash_opts(adapter, cmd);
break;
default:
break;
}
@ -2583,6 +2626,111 @@ static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
return err;
}
#define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
struct ethtool_rxnfc *nfc)
{
u32 flags2 = adapter->flags2;
/*
* RSS does not support anything other than hashing
* to queues on src and dst IPs and ports
*/
if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3))
return -EINVAL;
switch (nfc->flow_type) {
case TCP_V4_FLOW:
case TCP_V6_FLOW:
if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST) ||
!(nfc->data & RXH_L4_B_0_1) ||
!(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
break;
case UDP_V4_FLOW:
if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST))
return -EINVAL;
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
break;
default:
return -EINVAL;
}
break;
case UDP_V6_FLOW:
if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST))
return -EINVAL;
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
break;
default:
return -EINVAL;
}
break;
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
case ESP_V4_FLOW:
case SCTP_V4_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
case ESP_V6_FLOW:
case SCTP_V6_FLOW:
if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST) ||
(nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
break;
default:
return -EINVAL;
}
/* if we changed something we need to update flags */
if (flags2 != adapter->flags2) {
struct ixgbe_hw *hw = &adapter->hw;
u32 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
if ((flags2 & UDP_RSS_FLAGS) &&
!(adapter->flags2 & UDP_RSS_FLAGS))
e_warn(drv, "enabling UDP RSS: fragmented packets"
" may arrive out of order to the stack above\n");
adapter->flags2 = flags2;
/* Perform hash on these packet types */
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
| IXGBE_MRQC_RSS_FIELD_IPV4_TCP
| IXGBE_MRQC_RSS_FIELD_IPV6
| IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
}
return 0;
}
static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
@ -2595,6 +2743,9 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXCLSRLDEL:
ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
break;
case ETHTOOL_SRXFH:
ret = ixgbe_set_rss_hash_opt(adapter, cmd);
break;
default:
break;
}

View File

@ -448,16 +448,15 @@ ddp_out:
* ixgbe_fso - ixgbe FCoE Sequence Offload (FSO)
* @tx_ring: tx desc ring
* @first: first tx_buffer structure containing skb, tx_flags, and protocol
* @tx_flags: tx flags
* @hdr_len: hdr_len to be returned
*
* This sets up large send offload for FCoE
*
* Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error
* Returns : 0 indicates success, < 0 for error
*/
int ixgbe_fso(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
u32 tx_flags, u8 *hdr_len)
u8 *hdr_len)
{
struct sk_buff *skb = first->skb;
struct fc_frame_header *fh;
@ -539,8 +538,12 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring,
first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len,
skb_shinfo(skb)->gso_size);
first->bytecount += (first->gso_segs - 1) * *hdr_len;
first->tx_flags |= IXGBE_TX_FLAGS_FSO;
}
/* set flag indicating FCOE to ixgbe_tx_map call */
first->tx_flags |= IXGBE_TX_FLAGS_FCOE;
/* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
@ -550,13 +553,13 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring,
sizeof(struct fc_frame_header);
vlan_macip_lens |= (skb_transport_offset(skb) - 4)
<< IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
/* write context desc */
ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof,
IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx);
return skb_is_gso(skb);
return 0;
}
static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe)

View File

@ -0,0 +1,929 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "ixgbe.h"
#include "ixgbe_sriov.h"
/**
* ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
* @adapter: board private structure to initialize
*
* Cache the descriptor ring offsets for RSS to the assigned rings.
*
**/
static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
{
int i;
if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
return false;
for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = i;
for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i]->reg_idx = i;
return true;
}
#ifdef CONFIG_IXGBE_DCB
/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
unsigned int *tx, unsigned int *rx)
{
struct net_device *dev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
u8 num_tcs = netdev_get_num_tc(dev);
*tx = 0;
*rx = 0;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
*tx = tc << 2;
*rx = tc << 3;
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
if (num_tcs > 4) {
if (tc < 3) {
*tx = tc << 5;
*rx = tc << 4;
} else if (tc < 5) {
*tx = ((tc + 2) << 4);
*rx = tc << 4;
} else if (tc < num_tcs) {
*tx = ((tc + 8) << 3);
*rx = tc << 4;
}
} else {
*rx = tc << 5;
switch (tc) {
case 0:
*tx = 0;
break;
case 1:
*tx = 64;
break;
case 2:
*tx = 96;
break;
case 3:
*tx = 112;
break;
default:
break;
}
}
break;
default:
break;
}
}
/**
* ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
* @adapter: board private structure to initialize
*
* Cache the descriptor ring offsets for DCB to the assigned rings.
*
**/
static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
{
struct net_device *dev = adapter->netdev;
int i, j, k;
u8 num_tcs = netdev_get_num_tc(dev);
if (!num_tcs)
return false;
for (i = 0, k = 0; i < num_tcs; i++) {
unsigned int tx_s, rx_s;
u16 count = dev->tc_to_txq[i].count;
ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s);
for (j = 0; j < count; j++, k++) {
adapter->tx_ring[k]->reg_idx = tx_s + j;
adapter->rx_ring[k]->reg_idx = rx_s + j;
adapter->tx_ring[k]->dcb_tc = i;
adapter->rx_ring[k]->dcb_tc = i;
}
}
return true;
}
#endif
/**
* ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
* @adapter: board private structure to initialize
*
* Cache the descriptor ring offsets for Flow Director to the assigned rings.
*
**/
static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
{
int i;
bool ret = false;
if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = i;
for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i]->reg_idx = i;
ret = true;
}
return ret;
}
#ifdef IXGBE_FCOE
/**
* ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
* @adapter: board private structure to initialize
*
* Cache the descriptor ring offsets for FCoE mode to the assigned rings.
*
*/
static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
{
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
int i;
u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
return false;
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
ixgbe_cache_ring_fdir(adapter);
else
ixgbe_cache_ring_rss(adapter);
fcoe_rx_i = f->mask;
fcoe_tx_i = f->mask;
}
for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
}
return true;
}
#endif /* IXGBE_FCOE */
/**
* ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
* @adapter: board private structure to initialize
*
* SR-IOV doesn't use any descriptor rings but changes the default if
* no other mapping is used.
*
*/
static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
{
adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
if (adapter->num_vfs)
return true;
else
return false;
}
/**
* ixgbe_cache_ring_register - Descriptor ring to register mapping
* @adapter: board private structure to initialize
*
* Once we know the feature-set enabled for the device, we'll cache
* the register offset the descriptor ring is assigned to.
*
* Note, the order the various feature calls is important. It must start with
* the "most" features enabled at the same time, then trickle down to the
* least amount of features turned on at once.
**/
static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
{
/* start with default case */
adapter->rx_ring[0]->reg_idx = 0;
adapter->tx_ring[0]->reg_idx = 0;
if (ixgbe_cache_ring_sriov(adapter))
return;
#ifdef CONFIG_IXGBE_DCB
if (ixgbe_cache_ring_dcb(adapter))
return;
#endif
#ifdef IXGBE_FCOE
if (ixgbe_cache_ring_fcoe(adapter))
return;
#endif /* IXGBE_FCOE */
if (ixgbe_cache_ring_fdir(adapter))
return;
if (ixgbe_cache_ring_rss(adapter))
return;
}
/**
* ixgbe_set_sriov_queues: Allocate queues for IOV use
* @adapter: board private structure to initialize
*
* IOV doesn't actually use anything, so just NAK the
* request for now and let the other queue routines
* figure out what to do.
*/
static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
{
return false;
}
/**
* ixgbe_set_rss_queues: Allocate queues for RSS
* @adapter: board private structure to initialize
*
* This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
* to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
*
**/
static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
{
bool ret = false;
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
f->mask = 0xF;
adapter->num_rx_queues = f->indices;
adapter->num_tx_queues = f->indices;
ret = true;
}
return ret;
}
/**
* ixgbe_set_fdir_queues: Allocate queues for Flow Director
* @adapter: board private structure to initialize
*
* Flow Director is an advanced Rx filter, attempting to get Rx flows back
* to the original CPU that initiated the Tx session. This runs in addition
* to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
* Rx load across CPUs using RSS.
*
**/
static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
{
bool ret = false;
struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
f_fdir->indices = min_t(int, num_online_cpus(), f_fdir->indices);
f_fdir->mask = 0;
/*
* Use RSS in addition to Flow Director to ensure the best
* distribution of flows across cores, even when an FDIR flow
* isn't matched.
*/
if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
adapter->num_tx_queues = f_fdir->indices;
adapter->num_rx_queues = f_fdir->indices;
ret = true;
} else {
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
}
return ret;
}
#ifdef IXGBE_FCOE
/**
* ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
* @adapter: board private structure to initialize
*
* FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
* The ring feature mask is not used as a mask for FCoE, as it can take any 8
* rx queues out of the max number of rx queues, instead, it is used as the
* index of the first rx queue used by FCoE.
*
**/
static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
{
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
return false;
f->indices = min_t(int, num_online_cpus(), f->indices);
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
e_info(probe, "FCoE enabled with RSS\n");
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
ixgbe_set_fdir_queues(adapter);
else
ixgbe_set_rss_queues(adapter);
}
/* adding FCoE rx rings to the end */
f->mask = adapter->num_rx_queues;
adapter->num_rx_queues += f->indices;
adapter->num_tx_queues += f->indices;
return true;
}
#endif /* IXGBE_FCOE */
/* Artificial max queue cap per traffic class in DCB mode */
#define DCB_QUEUE_CAP 8
#ifdef CONFIG_IXGBE_DCB
static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
{
int per_tc_q, q, i, offset = 0;
struct net_device *dev = adapter->netdev;
int tcs = netdev_get_num_tc(dev);
if (!tcs)
return false;
/* Map queue offset and counts onto allocated tx queues */
per_tc_q = min_t(unsigned int, dev->num_tx_queues / tcs, DCB_QUEUE_CAP);
q = min_t(int, num_online_cpus(), per_tc_q);
for (i = 0; i < tcs; i++) {
netdev_set_tc_queue(dev, i, q, offset);
offset += q;
}
adapter->num_tx_queues = q * tcs;
adapter->num_rx_queues = q * tcs;
#ifdef IXGBE_FCOE
/* FCoE enabled queues require special configuration indexed
* by feature specific indices and mask. Here we map FCoE
* indices onto the DCB queue pairs allowing FCoE to own
* configuration later.
*/
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
u8 prio_tc[MAX_USER_PRIORITY] = {0};
int tc;
struct ixgbe_ring_feature *f =
&adapter->ring_feature[RING_F_FCOE];
ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
tc = prio_tc[adapter->fcoe.up];
f->indices = dev->tc_to_txq[tc].count;
f->mask = dev->tc_to_txq[tc].offset;
}
#endif
return true;
}
#endif
/**
* ixgbe_set_num_queues: Allocate queues for device, feature dependent
* @adapter: board private structure to initialize
*
* This is the top level queue allocation routine. The order here is very
* important, starting with the "most" number of features turned on at once,
* and ending with the smallest set of features. This way large combinations
* can be allocated if they're turned on, and smaller combinations are the
* fallthrough conditions.
*
**/
static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
{
/* Start with base case */
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
adapter->num_rx_pools = adapter->num_rx_queues;
adapter->num_rx_queues_per_pool = 1;
if (ixgbe_set_sriov_queues(adapter))
goto done;
#ifdef CONFIG_IXGBE_DCB
if (ixgbe_set_dcb_queues(adapter))
goto done;
#endif
#ifdef IXGBE_FCOE
if (ixgbe_set_fcoe_queues(adapter))
goto done;
#endif /* IXGBE_FCOE */
if (ixgbe_set_fdir_queues(adapter))
goto done;
if (ixgbe_set_rss_queues(adapter))
goto done;
/* fallback to base case */
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
done:
if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) ||
(adapter->netdev->reg_state == NETREG_UNREGISTERING))
return 0;
/* Notify the stack of the (possibly) reduced queue counts. */
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
return netif_set_real_num_rx_queues(adapter->netdev,
adapter->num_rx_queues);
}
static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
int vectors)
{
int err, vector_threshold;
/* We'll want at least 2 (vector_threshold):
* 1) TxQ[0] + RxQ[0] handler
* 2) Other (Link Status Change, etc.)
*/
vector_threshold = MIN_MSIX_COUNT;
/*
* The more we get, the more we will assign to Tx/Rx Cleanup
* for the separate queues...where Rx Cleanup >= Tx Cleanup.
* Right now, we simply care about how many we'll get; we'll
* set them up later while requesting irq's.
*/
while (vectors >= vector_threshold) {
err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
vectors);
if (!err) /* Success in acquiring all requested vectors. */
break;
else if (err < 0)
vectors = 0; /* Nasty failure, quit now */
else /* err == number of vectors we should try again with */
vectors = err;
}
if (vectors < vector_threshold) {
/* Can't allocate enough MSI-X interrupts? Oh well.
* This just means we'll go with either a single MSI
* vector or fall back to legacy interrupts.
*/
netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
"Unable to allocate MSI-X interrupts\n");
adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
} else {
adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
/*
* Adjust for only the vectors we'll use, which is minimum
* of max_msix_q_vectors + NON_Q_VECTORS, or the number of
* vectors we were allocated.
*/
adapter->num_msix_vectors = min(vectors,
adapter->max_msix_q_vectors + NON_Q_VECTORS);
}
}
static void ixgbe_add_ring(struct ixgbe_ring *ring,
struct ixgbe_ring_container *head)
{
ring->next = head->ring;
head->ring = ring;
head->count++;
}
/**
* ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
* @adapter: board private structure to initialize
* @v_idx: index of vector in adapter struct
*
* We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx,
int txr_count, int txr_idx,
int rxr_count, int rxr_idx)
{
struct ixgbe_q_vector *q_vector;
struct ixgbe_ring *ring;
int node = -1;
int cpu = -1;
int ring_count, size;
ring_count = txr_count + rxr_count;
size = sizeof(struct ixgbe_q_vector) +
(sizeof(struct ixgbe_ring) * ring_count);
/* customize cpu for Flow Director mapping */
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
if (cpu_online(v_idx)) {
cpu = v_idx;
node = cpu_to_node(cpu);
}
}
/* allocate q_vector and rings */
q_vector = kzalloc_node(size, GFP_KERNEL, node);
if (!q_vector)
q_vector = kzalloc(size, GFP_KERNEL);
if (!q_vector)
return -ENOMEM;
/* setup affinity mask and node */
if (cpu != -1)
cpumask_set_cpu(cpu, &q_vector->affinity_mask);
else
cpumask_copy(&q_vector->affinity_mask, cpu_online_mask);
q_vector->numa_node = node;
/* initialize NAPI */
netif_napi_add(adapter->netdev, &q_vector->napi,
ixgbe_poll, 64);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
q_vector->adapter = adapter;
q_vector->v_idx = v_idx;
/* initialize work limits */
q_vector->tx.work_limit = adapter->tx_work_limit;
/* initialize pointer to rings */
ring = q_vector->ring;
while (txr_count) {
/* assign generic ring traits */
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
/* configure backlink on ring */
ring->q_vector = q_vector;
/* update q_vector Tx values */
ixgbe_add_ring(ring, &q_vector->tx);
/* apply Tx specific ring traits */
ring->count = adapter->tx_ring_count;
ring->queue_index = txr_idx;
/* assign ring to adapter */
adapter->tx_ring[txr_idx] = ring;
/* update count and index */
txr_count--;
txr_idx++;
/* push pointer to next ring */
ring++;
}
while (rxr_count) {
/* assign generic ring traits */
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
/* configure backlink on ring */
ring->q_vector = q_vector;
/* update q_vector Rx values */
ixgbe_add_ring(ring, &q_vector->rx);
/*
* 82599 errata, UDP frames with a 0 checksum
* can be marked as checksum errors.
*/
if (adapter->hw.mac.type == ixgbe_mac_82599EB)
set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
/* apply Rx specific ring traits */
ring->count = adapter->rx_ring_count;
ring->queue_index = rxr_idx;
/* assign ring to adapter */
adapter->rx_ring[rxr_idx] = ring;
/* update count and index */
rxr_count--;
rxr_idx++;
/* push pointer to next ring */
ring++;
}
return 0;
}
/**
* ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
* @adapter: board private structure to initialize
* @v_idx: Index of vector to be freed
*
* This function frees the memory allocated to the q_vector. In addition if
* NAPI is enabled it will delete any references to the NAPI struct prior
* to freeing the q_vector.
**/
static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
{
struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
struct ixgbe_ring *ring;
ixgbe_for_each_ring(ring, q_vector->tx)
adapter->tx_ring[ring->queue_index] = NULL;
ixgbe_for_each_ring(ring, q_vector->rx)
adapter->rx_ring[ring->queue_index] = NULL;
adapter->q_vector[v_idx] = NULL;
netif_napi_del(&q_vector->napi);
/*
* ixgbe_get_stats64() might access the rings on this vector,
* we must wait a grace period before freeing it.
*/
kfree_rcu(q_vector, rcu);
}
/**
* ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
* @adapter: board private structure to initialize
*
* We allocate one q_vector per queue interrupt. If allocation fails we
* return -ENOMEM.
**/
static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
{
int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
int rxr_remaining = adapter->num_rx_queues;
int txr_remaining = adapter->num_tx_queues;
int rxr_idx = 0, txr_idx = 0, v_idx = 0;
int err;
/* only one q_vector if MSI-X is disabled. */
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
q_vectors = 1;
if (q_vectors >= (rxr_remaining + txr_remaining)) {
for (; rxr_remaining; v_idx++, q_vectors--) {
int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
err = ixgbe_alloc_q_vector(adapter, v_idx,
0, 0, rqpv, rxr_idx);
if (err)
goto err_out;
/* update counts and index */
rxr_remaining -= rqpv;
rxr_idx += rqpv;
}
}
for (; q_vectors; v_idx++, q_vectors--) {
int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);
err = ixgbe_alloc_q_vector(adapter, v_idx,
tqpv, txr_idx,
rqpv, rxr_idx);
if (err)
goto err_out;
/* update counts and index */
rxr_remaining -= rqpv;
rxr_idx += rqpv;
txr_remaining -= tqpv;
txr_idx += tqpv;
}
return 0;
err_out:
while (v_idx) {
v_idx--;
ixgbe_free_q_vector(adapter, v_idx);
}
return -ENOMEM;
}
/**
* ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
* @adapter: board private structure to initialize
*
* This function frees the memory allocated to the q_vectors. In addition if
* NAPI is enabled it will delete any references to the NAPI struct prior
* to freeing the q_vector.
**/
static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
{
int v_idx, q_vectors;
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
else
q_vectors = 1;
for (v_idx = 0; v_idx < q_vectors; v_idx++)
ixgbe_free_q_vector(adapter, v_idx);
}
static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
{
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
pci_disable_msi(adapter->pdev);
}
}
/**
* ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
* @adapter: board private structure to initialize
*
* Attempt to configure the interrupts using the best available
* capabilities of the hardware and the kernel.
**/
static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int err = 0;
int vector, v_budget;
/*
* It's easy to be greedy for MSI-X vectors, but it really
* doesn't do us much good if we have a lot more vectors
* than CPU's. So let's be conservative and only ask for
* (roughly) the same number of vectors as there are CPU's.
* The default is to use pairs of vectors.
*/
v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
v_budget = min_t(int, v_budget, num_online_cpus());
v_budget += NON_Q_VECTORS;
/*
* At the same time, hardware can only support a maximum of
* hw.mac->max_msix_vectors vectors. With features
* such as RSS and VMDq, we can easily surpass the number of Rx and Tx
* descriptor queues supported by our device. Thus, we cap it off in
* those rare cases where the cpu count also exceeds our vector limit.
*/
v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
/* A failure in MSI-X entry allocation isn't fatal, but it does
* mean we disable MSI-X capabilities of the adapter. */
adapter->msix_entries = kcalloc(v_budget,
sizeof(struct msix_entry), GFP_KERNEL);
if (adapter->msix_entries) {
for (vector = 0; vector < v_budget; vector++)
adapter->msix_entries[vector].entry = vector;
ixgbe_acquire_msix_vectors(adapter, v_budget);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
goto out;
}
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
e_err(probe,
"ATR is not supported while multiple "
"queues are disabled. Disabling Flow Director\n");
}
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->atr_sample_rate = 0;
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
ixgbe_disable_sriov(adapter);
err = ixgbe_set_num_queues(adapter);
if (err)
return err;
err = pci_enable_msi(adapter->pdev);
if (!err) {
adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
} else {
netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
"Unable to allocate MSI interrupt, "
"falling back to legacy. Error: %d\n", err);
/* reset err */
err = 0;
}
out:
return err;
}
/**
* ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
* @adapter: board private structure to initialize
*
* We determine which interrupt scheme to use based on...
* - Kernel support (MSI, MSI-X)
* - which can be user-defined (via MODULE_PARAM)
* - Hardware queue count (num_*_queues)
* - defined by miscellaneous hardware support/features (RSS, etc.)
**/
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
{
int err;
/* Number of supported queues */
err = ixgbe_set_num_queues(adapter);
if (err)
return err;
err = ixgbe_set_interrupt_capability(adapter);
if (err) {
e_dev_err("Unable to setup interrupt capabilities\n");
goto err_set_interrupt;
}
err = ixgbe_alloc_q_vectors(adapter);
if (err) {
e_dev_err("Unable to allocate memory for queue vectors\n");
goto err_alloc_q_vectors;
}
ixgbe_cache_ring_register(adapter);
e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
(adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
adapter->num_rx_queues, adapter->num_tx_queues);
set_bit(__IXGBE_DOWN, &adapter->state);
return 0;
err_alloc_q_vectors:
ixgbe_reset_interrupt_capability(adapter);
err_set_interrupt:
return err;
}
/**
* ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
* @adapter: board private structure to clear interrupt scheme on
*
* We go through and clear interrupt specific resources and reset the structure
* to pre-load conditions
**/
void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
{
adapter->num_tx_queues = 0;
adapter->num_rx_queues = 0;
ixgbe_free_q_vectors(adapter);
ixgbe_reset_interrupt_capability(adapter);
}
void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
{
struct ixgbe_adv_tx_context_desc *context_desc;
u16 i = tx_ring->next_to_use;
context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
i++;
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
/* set bits to identify this as an advanced context descriptor */
type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
}

File diff suppressed because it is too large Load Diff