ice: Implement transmit and NAPI support

This patch implements ice_start_xmit (the handler for ndo_start_xmit) and
related functions. ice_start_xmit ultimately calls ice_tx_map, where the
Tx descriptor is built and posted to the hardware by bumping the ring tail.

This patch also implements ice_napi_poll, which is invoked when there's an
interrupt on the VSI's queues. The interrupt can be due to either a
completed Tx or an Rx event. In case of a completed Tx/Rx event, resources
are reclaimed. Additionally, in case of an Rx event, the skb is fetched
and passed up to the network stack.

Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: Tony Brelinski <tonyx.brelinski@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
Anirudh Venkataramanan 2018-03-20 07:58:14 -07:00 committed by Jeff Kirsher
parent cdedef59de
commit 2b245cb294
5 changed files with 1171 additions and 2 deletions

View File

@ -60,6 +60,7 @@
(((val) << ICE_AQ_VSI_UP_TABLE_UP##i##_S) & \
ICE_AQ_VSI_UP_TABLE_UP##i##_M)
#define ICE_TX_DESC(R, i) (&(((struct ice_tx_desc *)((R)->desc))[i]))
#define ICE_RX_DESC(R, i) (&(((union ice_32b_rx_flex_desc *)((R)->desc))[i]))
#define ice_for_each_txq(vsi, i) \

View File

@ -131,6 +131,33 @@ enum ice_rx_flg64_bits {
ICE_RXFLG_RSVD = 63
};
/* for ice_32byte_rx_flex_desc.ptype_flexi_flags0 member */
#define ICE_RX_FLEX_DESC_PTYPE_M (0x3FF) /* 10-bits */
/* for ice_32byte_rx_flex_desc.pkt_length member */
#define ICE_RX_FLX_DESC_PKT_LEN_M (0x3FFF) /* 14-bits */
enum ice_rx_flex_desc_status_error_0_bits {
/* Note: These are predefined bit offsets */
ICE_RX_FLEX_DESC_STATUS0_DD_S = 0,
ICE_RX_FLEX_DESC_STATUS0_EOF_S,
ICE_RX_FLEX_DESC_STATUS0_HBO_S,
ICE_RX_FLEX_DESC_STATUS0_L3L4P_S,
ICE_RX_FLEX_DESC_STATUS0_XSUM_IPE_S,
ICE_RX_FLEX_DESC_STATUS0_XSUM_L4E_S,
ICE_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S,
ICE_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S,
ICE_RX_FLEX_DESC_STATUS0_LPBK_S,
ICE_RX_FLEX_DESC_STATUS0_IPV6EXADD_S,
ICE_RX_FLEX_DESC_STATUS0_RXE_S,
ICE_RX_FLEX_DESC_STATUS0_CRCP_S,
ICE_RX_FLEX_DESC_STATUS0_RSS_VALID_S,
ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S,
ICE_RX_FLEX_DESC_STATUS0_XTRMD0_VALID_S,
ICE_RX_FLEX_DESC_STATUS0_XTRMD1_VALID_S,
ICE_RX_FLEX_DESC_STATUS0_LAST /* this entry must be last!!! */
};
#define ICE_RXQ_CTX_SIZE_DWORDS 8
#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
@ -201,6 +228,25 @@ struct ice_tx_desc {
__le64 cmd_type_offset_bsz;
};
enum ice_tx_desc_dtype_value {
ICE_TX_DESC_DTYPE_DATA = 0x0,
ICE_TX_DESC_DTYPE_CTX = 0x1,
/* DESC_DONE - HW has completed write-back of descriptor */
ICE_TX_DESC_DTYPE_DESC_DONE = 0xF,
};
#define ICE_TXD_QW1_CMD_S 4
#define ICE_TXD_QW1_CMD_M (0xFFFUL << ICE_TXD_QW1_CMD_S)
enum ice_tx_desc_cmd_bits {
ICE_TX_DESC_CMD_EOP = 0x0001,
ICE_TX_DESC_CMD_RS = 0x0002,
};
#define ICE_TXD_QW1_OFFSET_S 16
#define ICE_TXD_QW1_TX_BUF_SZ_S 34
#define ICE_TXD_QW1_L2TAG1_S 48
#define ICE_LAN_TXQ_MAX_QGRPS 127
#define ICE_LAN_TXQ_MAX_QDIS 1023

View File

@ -1258,6 +1258,23 @@ err_txrings:
return -ENOMEM;
}
/**
* ice_msix_clean_rings - MSIX mode Interrupt Handler
* @irq: interrupt number
* @data: pointer to a q_vector
*/
static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
{
struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
if (!q_vector->tx.ring && !q_vector->rx.ring)
return IRQ_HANDLED;
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
}
/**
* ice_vsi_alloc - Allocates the next available struct vsi in the PF
* @pf: board private structure
@ -1298,6 +1315,8 @@ static struct ice_vsi *ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type type)
if (ice_vsi_alloc_arrays(vsi, true))
goto err_rings;
/* Setup default MSIX irq handler for VSI */
vsi->irq_handler = ice_msix_clean_rings;
break;
default:
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
@ -1741,6 +1760,9 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
if (cpu_online(v_idx))
cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
if (vsi->netdev)
netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
NAPI_POLL_WEIGHT);
/* tie q_vector and vsi together */
vsi->q_vectors[v_idx] = q_vector;
@ -2914,6 +2936,21 @@ static int ice_vsi_stop_tx_rx_rings(struct ice_vsi *vsi)
return 0;
}
/**
* ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
* @vsi: the VSI being configured
*/
static void ice_napi_enable_all(struct ice_vsi *vsi)
{
int q_idx;
if (!vsi->netdev)
return;
for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
napi_enable(&vsi->q_vectors[q_idx]->napi);
}
/**
* ice_up_complete - Finish the last steps of bringing up a connection
* @vsi: The VSI being configured
@ -2939,6 +2976,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
return err;
clear_bit(__ICE_DOWN, vsi->state);
ice_napi_enable_all(vsi);
ice_vsi_ena_irq(vsi);
if (vsi->port_info &&
@ -2954,6 +2992,21 @@ static int ice_up_complete(struct ice_vsi *vsi)
return err;
}
/**
* ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
* @vsi: VSI having NAPI disabled
*/
static void ice_napi_disable_all(struct ice_vsi *vsi)
{
int q_idx;
if (!vsi->netdev)
return;
for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++)
napi_disable(&vsi->q_vectors[q_idx]->napi);
}
/**
* ice_down - Shutdown the connection
* @vsi: The VSI being stopped
@ -2972,6 +3025,7 @@ static int ice_down(struct ice_vsi *vsi)
ice_vsi_dis_irq(vsi);
err = ice_vsi_stop_tx_rx_rings(vsi);
ice_napi_disable_all(vsi);
ice_for_each_txq(vsi, i)
ice_clean_tx_ring(vsi->tx_rings[i]);
@ -3251,4 +3305,5 @@ static int ice_stop(struct net_device *netdev)
static const struct net_device_ops ice_netdev_ops = {
.ndo_open = ice_open,
.ndo_stop = ice_stop,
.ndo_start_xmit = ice_start_xmit,
};

File diff suppressed because it is too large Load Diff

View File

@ -7,8 +7,23 @@
#define ICE_DFLT_IRQ_WORK 256
#define ICE_RXBUF_2048 2048
#define ICE_MAX_CHAINED_RX_BUFS 5
#define ICE_MAX_BUF_TXD 8
#define ICE_MIN_TX_LEN 17
/* The size limit for a transmit buffer in a descriptor is (16K - 1).
* In order to align with the read requests we will align the value to
* the nearest 4K which represents our maximum read request size.
*/
#define ICE_MAX_READ_REQ_SIZE 4096
#define ICE_MAX_DATA_PER_TXD (16 * 1024 - 1)
#define ICE_MAX_DATA_PER_TXD_ALIGNED \
(~(ICE_MAX_READ_REQ_SIZE - 1) & ICE_MAX_DATA_PER_TXD)
#define ICE_RX_BUF_WRITE 16 /* Must be power of 2 */
#define ICE_MAX_TXQ_PER_TXQG 128
/* Tx Descriptors needed, worst case */
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define ICE_DESC_UNUSED(R) \
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
@ -30,6 +45,24 @@ struct ice_rx_buf {
unsigned int page_offset;
};
struct ice_q_stats {
u64 pkts;
u64 bytes;
};
struct ice_txq_stats {
u64 restart_q;
u64 tx_busy;
u64 tx_linearize;
};
struct ice_rxq_stats {
u64 non_eop_descs;
u64 alloc_page_failed;
u64 alloc_buf_failed;
u64 page_reuse_count;
};
/* this enum matches hardware bits and is meant to be used by DYN_CTLN
* registers and QINT registers or more generally anywhere in the manual
* mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
@ -94,6 +127,15 @@ struct ice_ring {
u16 next_to_clean;
bool ring_active; /* is ring online or not */
/* stats structs */
struct ice_q_stats stats;
struct u64_stats_sync syncp;
union {
struct ice_txq_stats tx_stats;
struct ice_rxq_stats rx_stats;
};
unsigned int size; /* length of descriptor ring in bytes */
dma_addr_t dma; /* physical address of ring */
struct rcu_head rcu; /* to avoid race on free */
@ -121,10 +163,13 @@ struct ice_ring_container {
for (pos = (head).ring; pos; pos = pos->next)
bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);
netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
void ice_clean_tx_ring(struct ice_ring *tx_ring);
void ice_clean_rx_ring(struct ice_ring *rx_ring);
int ice_setup_tx_ring(struct ice_ring *tx_ring);
int ice_setup_rx_ring(struct ice_ring *rx_ring);
void ice_free_tx_ring(struct ice_ring *tx_ring);
void ice_free_rx_ring(struct ice_ring *rx_ring);
int ice_napi_poll(struct napi_struct *napi, int budget);
#endif /* _ICE_TXRX_H_ */