86e85bf698
XDP-redirect is broken in this driver sfc. XDP_REDIRECT requires
tailroom for skb_shared_info when creating an SKB based on the
redirected xdp_frame (both in cpumap and veth).
The fix requires some initial explaining. The driver uses RX page-split
when possible. It reserves the top 64 bytes in the RX-page for storing
dma_addr (struct efx_rx_page_state). It also have the XDP recommended
headroom of XDP_PACKET_HEADROOM (256 bytes). As it doesn't reserve any
tailroom, it can still fit two standard MTU (1500) frames into one page.
The sizeof struct skb_shared_info in 320 bytes. Thus drivers like ixgbe
and i40e, reduce their XDP headroom to 192 bytes, which allows them to
fit two frames with max 1536 bytes into a 4K page (192+1536+320=2048).
The fix is to reduce this drivers headroom to 128 bytes and add the 320
bytes tailroom. This account for reserved top 64 bytes in the page, and
still fit two frame in a page for normal MTUs.
We must never go below 128 bytes of headroom for XDP, as one cacheline
is for xdp_frame area and next cacheline is reserved for metadata area.
Fixes: eb9a36be7f
("sfc: perform XDP processing on received packets")
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Acked-by: Edward Cree <ecree@solarflare.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
653 lines
19 KiB
C
653 lines
19 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/****************************************************************************
|
|
* Driver for Solarflare network controllers and boards
|
|
* Copyright 2005-2006 Fen Systems Ltd.
|
|
* Copyright 2005-2013 Solarflare Communications Inc.
|
|
*/
|
|
|
|
#include <linux/socket.h>
|
|
#include <linux/in.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/ip.h>
|
|
#include <linux/ipv6.h>
|
|
#include <linux/tcp.h>
|
|
#include <linux/udp.h>
|
|
#include <linux/prefetch.h>
|
|
#include <linux/moduleparam.h>
|
|
#include <linux/iommu.h>
|
|
#include <net/ip.h>
|
|
#include <net/checksum.h>
|
|
#include <net/xdp.h>
|
|
#include <linux/bpf_trace.h>
|
|
#include "net_driver.h"
|
|
#include "efx.h"
|
|
#include "rx_common.h"
|
|
#include "filter.h"
|
|
#include "nic.h"
|
|
#include "selftest.h"
|
|
#include "workarounds.h"
|
|
|
|
/* Preferred number of descriptors to fill at once */
|
|
#define EFX_RX_PREFERRED_BATCH 8U
|
|
|
|
/* Maximum rx prefix used by any architecture. */
|
|
#define EFX_MAX_RX_PREFIX_SIZE 16
|
|
|
|
/* Size of buffer allocated for skb header area. */
|
|
#define EFX_SKB_HEADERS 128u
|
|
|
|
/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
|
|
#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
|
|
EFX_RX_USR_BUF_SIZE)
|
|
|
|
static inline void efx_sync_rx_buffer(struct efx_nic *efx,
|
|
struct efx_rx_buffer *rx_buf,
|
|
unsigned int len)
|
|
{
|
|
dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
|
|
DMA_FROM_DEVICE);
|
|
}
|
|
|
|
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
|
|
struct efx_rx_buffer *rx_buf,
|
|
int len)
|
|
{
|
|
struct efx_nic *efx = rx_queue->efx;
|
|
unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
|
|
|
|
if (likely(len <= max_len))
|
|
return;
|
|
|
|
/* The packet must be discarded, but this is only a fatal error
|
|
* if the caller indicated it was
|
|
*/
|
|
rx_buf->flags |= EFX_RX_PKT_DISCARD;
|
|
|
|
if (net_ratelimit())
|
|
netif_err(efx, rx_err, efx->net_dev,
|
|
"RX queue %d overlength RX event (%#x > %#x)\n",
|
|
efx_rx_queue_index(rx_queue), len, max_len);
|
|
|
|
efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
|
|
}
|
|
|
|
/* Allocate and construct an SKB around page fragments */
|
|
static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
|
|
struct efx_rx_buffer *rx_buf,
|
|
unsigned int n_frags,
|
|
u8 *eh, int hdr_len)
|
|
{
|
|
struct efx_nic *efx = channel->efx;
|
|
struct sk_buff *skb;
|
|
|
|
/* Allocate an SKB to store the headers */
|
|
skb = netdev_alloc_skb(efx->net_dev,
|
|
efx->rx_ip_align + efx->rx_prefix_size +
|
|
hdr_len);
|
|
if (unlikely(skb == NULL)) {
|
|
atomic_inc(&efx->n_rx_noskb_drops);
|
|
return NULL;
|
|
}
|
|
|
|
EFX_WARN_ON_ONCE_PARANOID(rx_buf->len < hdr_len);
|
|
|
|
memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
|
|
efx->rx_prefix_size + hdr_len);
|
|
skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
|
|
__skb_put(skb, hdr_len);
|
|
|
|
/* Append the remaining page(s) onto the frag list */
|
|
if (rx_buf->len > hdr_len) {
|
|
rx_buf->page_offset += hdr_len;
|
|
rx_buf->len -= hdr_len;
|
|
|
|
for (;;) {
|
|
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
|
|
rx_buf->page, rx_buf->page_offset,
|
|
rx_buf->len);
|
|
rx_buf->page = NULL;
|
|
skb->len += rx_buf->len;
|
|
skb->data_len += rx_buf->len;
|
|
if (skb_shinfo(skb)->nr_frags == n_frags)
|
|
break;
|
|
|
|
rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
|
|
}
|
|
} else {
|
|
__free_pages(rx_buf->page, efx->rx_buffer_order);
|
|
rx_buf->page = NULL;
|
|
n_frags = 0;
|
|
}
|
|
|
|
skb->truesize += n_frags * efx->rx_buffer_truesize;
|
|
|
|
/* Move past the ethernet header */
|
|
skb->protocol = eth_type_trans(skb, efx->net_dev);
|
|
|
|
skb_mark_napi_id(skb, &channel->napi_str);
|
|
|
|
return skb;
|
|
}
|
|
|
|
void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
|
|
unsigned int n_frags, unsigned int len, u16 flags)
|
|
{
|
|
struct efx_nic *efx = rx_queue->efx;
|
|
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
|
|
struct efx_rx_buffer *rx_buf;
|
|
|
|
rx_queue->rx_packets++;
|
|
|
|
rx_buf = efx_rx_buffer(rx_queue, index);
|
|
rx_buf->flags |= flags;
|
|
|
|
/* Validate the number of fragments and completed length */
|
|
if (n_frags == 1) {
|
|
if (!(flags & EFX_RX_PKT_PREFIX_LEN))
|
|
efx_rx_packet__check_len(rx_queue, rx_buf, len);
|
|
} else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
|
|
unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
|
|
unlikely(len > n_frags * efx->rx_dma_len) ||
|
|
unlikely(!efx->rx_scatter)) {
|
|
/* If this isn't an explicit discard request, either
|
|
* the hardware or the driver is broken.
|
|
*/
|
|
WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
|
|
rx_buf->flags |= EFX_RX_PKT_DISCARD;
|
|
}
|
|
|
|
netif_vdbg(efx, rx_status, efx->net_dev,
|
|
"RX queue %d received ids %x-%x len %d %s%s\n",
|
|
efx_rx_queue_index(rx_queue), index,
|
|
(index + n_frags - 1) & rx_queue->ptr_mask, len,
|
|
(rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
|
|
(rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
|
|
|
|
/* Discard packet, if instructed to do so. Process the
|
|
* previous receive first.
|
|
*/
|
|
if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
|
|
efx_rx_flush_packet(channel);
|
|
efx_discard_rx_packet(channel, rx_buf, n_frags);
|
|
return;
|
|
}
|
|
|
|
if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
|
|
rx_buf->len = len;
|
|
|
|
/* Release and/or sync the DMA mapping - assumes all RX buffers
|
|
* consumed in-order per RX queue.
|
|
*/
|
|
efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
|
|
|
|
/* Prefetch nice and early so data will (hopefully) be in cache by
|
|
* the time we look at it.
|
|
*/
|
|
prefetch(efx_rx_buf_va(rx_buf));
|
|
|
|
rx_buf->page_offset += efx->rx_prefix_size;
|
|
rx_buf->len -= efx->rx_prefix_size;
|
|
|
|
if (n_frags > 1) {
|
|
/* Release/sync DMA mapping for additional fragments.
|
|
* Fix length for last fragment.
|
|
*/
|
|
unsigned int tail_frags = n_frags - 1;
|
|
|
|
for (;;) {
|
|
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
|
|
if (--tail_frags == 0)
|
|
break;
|
|
efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
|
|
}
|
|
rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
|
|
efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
|
|
}
|
|
|
|
/* All fragments have been DMA-synced, so recycle pages. */
|
|
rx_buf = efx_rx_buffer(rx_queue, index);
|
|
efx_recycle_rx_pages(channel, rx_buf, n_frags);
|
|
|
|
/* Pipeline receives so that we give time for packet headers to be
|
|
* prefetched into cache.
|
|
*/
|
|
efx_rx_flush_packet(channel);
|
|
channel->rx_pkt_n_frags = n_frags;
|
|
channel->rx_pkt_index = index;
|
|
}
|
|
|
|
static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
|
|
struct efx_rx_buffer *rx_buf,
|
|
unsigned int n_frags)
|
|
{
|
|
struct sk_buff *skb;
|
|
u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
|
|
|
|
skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
|
|
if (unlikely(skb == NULL)) {
|
|
struct efx_rx_queue *rx_queue;
|
|
|
|
rx_queue = efx_channel_get_rx_queue(channel);
|
|
efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
|
|
return;
|
|
}
|
|
skb_record_rx_queue(skb, channel->rx_queue.core_index);
|
|
|
|
/* Set the SKB flags */
|
|
skb_checksum_none_assert(skb);
|
|
if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) {
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
|
|
}
|
|
|
|
efx_rx_skb_attach_timestamp(channel, skb);
|
|
|
|
if (channel->type->receive_skb)
|
|
if (channel->type->receive_skb(channel, skb))
|
|
return;
|
|
|
|
/* Pass the packet up */
|
|
if (channel->rx_list != NULL)
|
|
/* Add to list, will pass up later */
|
|
list_add_tail(&skb->list, channel->rx_list);
|
|
else
|
|
/* No list, so pass it up now */
|
|
netif_receive_skb(skb);
|
|
}
|
|
|
|
/** efx_do_xdp: perform XDP processing on a received packet
|
|
*
|
|
* Returns true if packet should still be delivered.
|
|
*/
|
|
static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
|
|
struct efx_rx_buffer *rx_buf, u8 **ehp)
|
|
{
|
|
u8 rx_prefix[EFX_MAX_RX_PREFIX_SIZE];
|
|
struct efx_rx_queue *rx_queue;
|
|
struct bpf_prog *xdp_prog;
|
|
struct xdp_frame *xdpf;
|
|
struct xdp_buff xdp;
|
|
u32 xdp_act;
|
|
s16 offset;
|
|
int err;
|
|
|
|
rcu_read_lock();
|
|
xdp_prog = rcu_dereference(efx->xdp_prog);
|
|
if (!xdp_prog) {
|
|
rcu_read_unlock();
|
|
return true;
|
|
}
|
|
|
|
rx_queue = efx_channel_get_rx_queue(channel);
|
|
|
|
if (unlikely(channel->rx_pkt_n_frags > 1)) {
|
|
/* We can't do XDP on fragmented packets - drop. */
|
|
rcu_read_unlock();
|
|
efx_free_rx_buffers(rx_queue, rx_buf,
|
|
channel->rx_pkt_n_frags);
|
|
if (net_ratelimit())
|
|
netif_err(efx, rx_err, efx->net_dev,
|
|
"XDP is not possible with multiple receive fragments (%d)\n",
|
|
channel->rx_pkt_n_frags);
|
|
channel->n_rx_xdp_bad_drops++;
|
|
return false;
|
|
}
|
|
|
|
dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr,
|
|
rx_buf->len, DMA_FROM_DEVICE);
|
|
|
|
/* Save the rx prefix. */
|
|
EFX_WARN_ON_PARANOID(efx->rx_prefix_size > EFX_MAX_RX_PREFIX_SIZE);
|
|
memcpy(rx_prefix, *ehp - efx->rx_prefix_size,
|
|
efx->rx_prefix_size);
|
|
|
|
xdp.data = *ehp;
|
|
xdp.data_hard_start = xdp.data - EFX_XDP_HEADROOM;
|
|
|
|
/* No support yet for XDP metadata */
|
|
xdp_set_data_meta_invalid(&xdp);
|
|
xdp.data_end = xdp.data + rx_buf->len;
|
|
xdp.rxq = &rx_queue->xdp_rxq_info;
|
|
|
|
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
|
|
rcu_read_unlock();
|
|
|
|
offset = (u8 *)xdp.data - *ehp;
|
|
|
|
switch (xdp_act) {
|
|
case XDP_PASS:
|
|
/* Fix up rx prefix. */
|
|
if (offset) {
|
|
*ehp += offset;
|
|
rx_buf->page_offset += offset;
|
|
rx_buf->len -= offset;
|
|
memcpy(*ehp - efx->rx_prefix_size, rx_prefix,
|
|
efx->rx_prefix_size);
|
|
}
|
|
break;
|
|
|
|
case XDP_TX:
|
|
/* Buffer ownership passes to tx on success. */
|
|
xdpf = convert_to_xdp_frame(&xdp);
|
|
err = efx_xdp_tx_buffers(efx, 1, &xdpf, true);
|
|
if (unlikely(err != 1)) {
|
|
efx_free_rx_buffers(rx_queue, rx_buf, 1);
|
|
if (net_ratelimit())
|
|
netif_err(efx, rx_err, efx->net_dev,
|
|
"XDP TX failed (%d)\n", err);
|
|
channel->n_rx_xdp_bad_drops++;
|
|
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
|
|
} else {
|
|
channel->n_rx_xdp_tx++;
|
|
}
|
|
break;
|
|
|
|
case XDP_REDIRECT:
|
|
err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog);
|
|
if (unlikely(err)) {
|
|
efx_free_rx_buffers(rx_queue, rx_buf, 1);
|
|
if (net_ratelimit())
|
|
netif_err(efx, rx_err, efx->net_dev,
|
|
"XDP redirect failed (%d)\n", err);
|
|
channel->n_rx_xdp_bad_drops++;
|
|
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
|
|
} else {
|
|
channel->n_rx_xdp_redirect++;
|
|
}
|
|
break;
|
|
|
|
default:
|
|
bpf_warn_invalid_xdp_action(xdp_act);
|
|
efx_free_rx_buffers(rx_queue, rx_buf, 1);
|
|
channel->n_rx_xdp_bad_drops++;
|
|
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
|
|
break;
|
|
|
|
case XDP_ABORTED:
|
|
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
|
|
/* Fall through */
|
|
case XDP_DROP:
|
|
efx_free_rx_buffers(rx_queue, rx_buf, 1);
|
|
channel->n_rx_xdp_drops++;
|
|
break;
|
|
}
|
|
|
|
return xdp_act == XDP_PASS;
|
|
}
|
|
|
|
/* Handle a received packet. Second half: Touches packet payload. */
|
|
void __efx_rx_packet(struct efx_channel *channel)
|
|
{
|
|
struct efx_nic *efx = channel->efx;
|
|
struct efx_rx_buffer *rx_buf =
|
|
efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
|
|
u8 *eh = efx_rx_buf_va(rx_buf);
|
|
|
|
/* Read length from the prefix if necessary. This already
|
|
* excludes the length of the prefix itself.
|
|
*/
|
|
if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
|
|
rx_buf->len = le16_to_cpup((__le16 *)
|
|
(eh + efx->rx_packet_len_offset));
|
|
|
|
/* If we're in loopback test, then pass the packet directly to the
|
|
* loopback layer, and free the rx_buf here
|
|
*/
|
|
if (unlikely(efx->loopback_selftest)) {
|
|
struct efx_rx_queue *rx_queue;
|
|
|
|
efx_loopback_rx_packet(efx, eh, rx_buf->len);
|
|
rx_queue = efx_channel_get_rx_queue(channel);
|
|
efx_free_rx_buffers(rx_queue, rx_buf,
|
|
channel->rx_pkt_n_frags);
|
|
goto out;
|
|
}
|
|
|
|
if (!efx_do_xdp(efx, channel, rx_buf, &eh))
|
|
goto out;
|
|
|
|
if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
|
|
rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
|
|
|
|
if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
|
|
efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
|
|
else
|
|
efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
|
|
out:
|
|
channel->rx_pkt_n_frags = 0;
|
|
}
|
|
|
|
#ifdef CONFIG_RFS_ACCEL
|
|
|
|
static void efx_filter_rfs_work(struct work_struct *data)
|
|
{
|
|
struct efx_async_filter_insertion *req = container_of(data, struct efx_async_filter_insertion,
|
|
work);
|
|
struct efx_nic *efx = netdev_priv(req->net_dev);
|
|
struct efx_channel *channel = efx_get_channel(efx, req->rxq_index);
|
|
int slot_idx = req - efx->rps_slot;
|
|
struct efx_arfs_rule *rule;
|
|
u16 arfs_id = 0;
|
|
int rc;
|
|
|
|
rc = efx->type->filter_insert(efx, &req->spec, true);
|
|
if (rc >= 0)
|
|
/* Discard 'priority' part of EF10+ filter ID (mcdi_filters) */
|
|
rc %= efx->type->max_rx_ip_filters;
|
|
if (efx->rps_hash_table) {
|
|
spin_lock_bh(&efx->rps_hash_lock);
|
|
rule = efx_rps_hash_find(efx, &req->spec);
|
|
/* The rule might have already gone, if someone else's request
|
|
* for the same spec was already worked and then expired before
|
|
* we got around to our work. In that case we have nothing
|
|
* tying us to an arfs_id, meaning that as soon as the filter
|
|
* is considered for expiry it will be removed.
|
|
*/
|
|
if (rule) {
|
|
if (rc < 0)
|
|
rule->filter_id = EFX_ARFS_FILTER_ID_ERROR;
|
|
else
|
|
rule->filter_id = rc;
|
|
arfs_id = rule->arfs_id;
|
|
}
|
|
spin_unlock_bh(&efx->rps_hash_lock);
|
|
}
|
|
if (rc >= 0) {
|
|
/* Remember this so we can check whether to expire the filter
|
|
* later.
|
|
*/
|
|
mutex_lock(&efx->rps_mutex);
|
|
if (channel->rps_flow_id[rc] == RPS_FLOW_ID_INVALID)
|
|
channel->rfs_filter_count++;
|
|
channel->rps_flow_id[rc] = req->flow_id;
|
|
mutex_unlock(&efx->rps_mutex);
|
|
|
|
if (req->spec.ether_type == htons(ETH_P_IP))
|
|
netif_info(efx, rx_status, efx->net_dev,
|
|
"steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n",
|
|
(req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
|
|
req->spec.rem_host, ntohs(req->spec.rem_port),
|
|
req->spec.loc_host, ntohs(req->spec.loc_port),
|
|
req->rxq_index, req->flow_id, rc, arfs_id);
|
|
else
|
|
netif_info(efx, rx_status, efx->net_dev,
|
|
"steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n",
|
|
(req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
|
|
req->spec.rem_host, ntohs(req->spec.rem_port),
|
|
req->spec.loc_host, ntohs(req->spec.loc_port),
|
|
req->rxq_index, req->flow_id, rc, arfs_id);
|
|
channel->n_rfs_succeeded++;
|
|
} else {
|
|
if (req->spec.ether_type == htons(ETH_P_IP))
|
|
netif_dbg(efx, rx_status, efx->net_dev,
|
|
"failed to steer %s %pI4:%u:%pI4:%u to queue %u [flow %u rc %d id %u]\n",
|
|
(req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
|
|
req->spec.rem_host, ntohs(req->spec.rem_port),
|
|
req->spec.loc_host, ntohs(req->spec.loc_port),
|
|
req->rxq_index, req->flow_id, rc, arfs_id);
|
|
else
|
|
netif_dbg(efx, rx_status, efx->net_dev,
|
|
"failed to steer %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u rc %d id %u]\n",
|
|
(req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP",
|
|
req->spec.rem_host, ntohs(req->spec.rem_port),
|
|
req->spec.loc_host, ntohs(req->spec.loc_port),
|
|
req->rxq_index, req->flow_id, rc, arfs_id);
|
|
channel->n_rfs_failed++;
|
|
/* We're overloading the NIC's filter tables, so let's do a
|
|
* chunk of extra expiry work.
|
|
*/
|
|
__efx_filter_rfs_expire(channel, min(channel->rfs_filter_count,
|
|
100u));
|
|
}
|
|
|
|
/* Release references */
|
|
clear_bit(slot_idx, &efx->rps_slot_map);
|
|
dev_put(req->net_dev);
|
|
}
|
|
|
|
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
|
|
u16 rxq_index, u32 flow_id)
|
|
{
|
|
struct efx_nic *efx = netdev_priv(net_dev);
|
|
struct efx_async_filter_insertion *req;
|
|
struct efx_arfs_rule *rule;
|
|
struct flow_keys fk;
|
|
int slot_idx;
|
|
bool new;
|
|
int rc;
|
|
|
|
/* find a free slot */
|
|
for (slot_idx = 0; slot_idx < EFX_RPS_MAX_IN_FLIGHT; slot_idx++)
|
|
if (!test_and_set_bit(slot_idx, &efx->rps_slot_map))
|
|
break;
|
|
if (slot_idx >= EFX_RPS_MAX_IN_FLIGHT)
|
|
return -EBUSY;
|
|
|
|
if (flow_id == RPS_FLOW_ID_INVALID) {
|
|
rc = -EINVAL;
|
|
goto out_clear;
|
|
}
|
|
|
|
if (!skb_flow_dissect_flow_keys(skb, &fk, 0)) {
|
|
rc = -EPROTONOSUPPORT;
|
|
goto out_clear;
|
|
}
|
|
|
|
if (fk.basic.n_proto != htons(ETH_P_IP) && fk.basic.n_proto != htons(ETH_P_IPV6)) {
|
|
rc = -EPROTONOSUPPORT;
|
|
goto out_clear;
|
|
}
|
|
if (fk.control.flags & FLOW_DIS_IS_FRAGMENT) {
|
|
rc = -EPROTONOSUPPORT;
|
|
goto out_clear;
|
|
}
|
|
|
|
req = efx->rps_slot + slot_idx;
|
|
efx_filter_init_rx(&req->spec, EFX_FILTER_PRI_HINT,
|
|
efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
|
|
rxq_index);
|
|
req->spec.match_flags =
|
|
EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
|
|
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
|
|
EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
|
|
req->spec.ether_type = fk.basic.n_proto;
|
|
req->spec.ip_proto = fk.basic.ip_proto;
|
|
|
|
if (fk.basic.n_proto == htons(ETH_P_IP)) {
|
|
req->spec.rem_host[0] = fk.addrs.v4addrs.src;
|
|
req->spec.loc_host[0] = fk.addrs.v4addrs.dst;
|
|
} else {
|
|
memcpy(req->spec.rem_host, &fk.addrs.v6addrs.src,
|
|
sizeof(struct in6_addr));
|
|
memcpy(req->spec.loc_host, &fk.addrs.v6addrs.dst,
|
|
sizeof(struct in6_addr));
|
|
}
|
|
|
|
req->spec.rem_port = fk.ports.src;
|
|
req->spec.loc_port = fk.ports.dst;
|
|
|
|
if (efx->rps_hash_table) {
|
|
/* Add it to ARFS hash table */
|
|
spin_lock(&efx->rps_hash_lock);
|
|
rule = efx_rps_hash_add(efx, &req->spec, &new);
|
|
if (!rule) {
|
|
rc = -ENOMEM;
|
|
goto out_unlock;
|
|
}
|
|
if (new)
|
|
rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER;
|
|
rc = rule->arfs_id;
|
|
/* Skip if existing or pending filter already does the right thing */
|
|
if (!new && rule->rxq_index == rxq_index &&
|
|
rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING)
|
|
goto out_unlock;
|
|
rule->rxq_index = rxq_index;
|
|
rule->filter_id = EFX_ARFS_FILTER_ID_PENDING;
|
|
spin_unlock(&efx->rps_hash_lock);
|
|
} else {
|
|
/* Without an ARFS hash table, we just use arfs_id 0 for all
|
|
* filters. This means if multiple flows hash to the same
|
|
* flow_id, all but the most recently touched will be eligible
|
|
* for expiry.
|
|
*/
|
|
rc = 0;
|
|
}
|
|
|
|
/* Queue the request */
|
|
dev_hold(req->net_dev = net_dev);
|
|
INIT_WORK(&req->work, efx_filter_rfs_work);
|
|
req->rxq_index = rxq_index;
|
|
req->flow_id = flow_id;
|
|
schedule_work(&req->work);
|
|
return rc;
|
|
out_unlock:
|
|
spin_unlock(&efx->rps_hash_lock);
|
|
out_clear:
|
|
clear_bit(slot_idx, &efx->rps_slot_map);
|
|
return rc;
|
|
}
|
|
|
|
bool __efx_filter_rfs_expire(struct efx_channel *channel, unsigned int quota)
|
|
{
|
|
bool (*expire_one)(struct efx_nic *efx, u32 flow_id, unsigned int index);
|
|
struct efx_nic *efx = channel->efx;
|
|
unsigned int index, size, start;
|
|
u32 flow_id;
|
|
|
|
if (!mutex_trylock(&efx->rps_mutex))
|
|
return false;
|
|
expire_one = efx->type->filter_rfs_expire_one;
|
|
index = channel->rfs_expire_index;
|
|
start = index;
|
|
size = efx->type->max_rx_ip_filters;
|
|
while (quota) {
|
|
flow_id = channel->rps_flow_id[index];
|
|
|
|
if (flow_id != RPS_FLOW_ID_INVALID) {
|
|
quota--;
|
|
if (expire_one(efx, flow_id, index)) {
|
|
netif_info(efx, rx_status, efx->net_dev,
|
|
"expired filter %d [channel %u flow %u]\n",
|
|
index, channel->channel, flow_id);
|
|
channel->rps_flow_id[index] = RPS_FLOW_ID_INVALID;
|
|
channel->rfs_filter_count--;
|
|
}
|
|
}
|
|
if (++index == size)
|
|
index = 0;
|
|
/* If we were called with a quota that exceeds the total number
|
|
* of filters in the table (which shouldn't happen, but could
|
|
* if two callers race), ensure that we don't loop forever -
|
|
* stop when we've examined every row of the table.
|
|
*/
|
|
if (index == start)
|
|
break;
|
|
}
|
|
|
|
channel->rfs_expire_index = index;
|
|
mutex_unlock(&efx->rps_mutex);
|
|
return true;
|
|
}
|
|
|
|
#endif /* CONFIG_RFS_ACCEL */
|