net/mlx5e: Rx, Fixup skb checksum for packets with tail padding

When an ethernet frame with ip payload is padded, the padding octets are
not covered by the hardware checksum.

Prior to the cited commit, skb checksum was forced to be CHECKSUM_NONE
when padding is detected. After it, the kernel will try to trim the
padding bytes and subtract their checksum from skb->csum.

In this patch we fixup skb->csum for any ip packet with tail padding of
any size, if any padding found.
FCS case is just one special case of this general purpose patch, hence,
it is removed.

Fixes: 88078d98d1 ("net: pskb_trim_rcsum() and CHECKSUM_COMPLETE are friends"),
Cc: Eric Dumazet <edumazet@google.com>
Reviewed-by: Tariq Toukan <tariqt@mellanox.com>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
Saeed Mahameed 2019-03-12 00:24:52 -07:00
parent 5d0bb3bac4
commit 0aa1d18615
3 changed files with 74 additions and 15 deletions

View File

@ -712,17 +712,6 @@ static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
rq->stats->ecn_mark += !!rc;
}
static u32 mlx5e_get_fcs(const struct sk_buff *skb)
{
const void *fcs_bytes;
u32 _fcs_bytes;
fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN,
ETH_FCS_LEN, &_fcs_bytes);
return __get_unaligned_cpu32(fcs_bytes);
}
static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
{
void *ip_p = skb->data + network_depth;
@ -733,6 +722,68 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
#define MAX_PADDING 8
static void
tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
struct mlx5e_rq_stats *stats)
{
stats->csum_complete_tail_slow++;
skb->csum = csum_block_add(skb->csum,
skb_checksum(skb, offset, len, 0),
offset);
}
static void
tail_padding_csum(struct sk_buff *skb, int offset,
struct mlx5e_rq_stats *stats)
{
u8 tail_padding[MAX_PADDING];
int len = skb->len - offset;
void *tail;
if (unlikely(len > MAX_PADDING)) {
tail_padding_csum_slow(skb, offset, len, stats);
return;
}
tail = skb_header_pointer(skb, offset, len, tail_padding);
if (unlikely(!tail)) {
tail_padding_csum_slow(skb, offset, len, stats);
return;
}
stats->csum_complete_tail++;
skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
}
static void
mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto,
struct mlx5e_rq_stats *stats)
{
struct ipv6hdr *ip6;
struct iphdr *ip4;
int pkt_len;
switch (proto) {
case htons(ETH_P_IP):
ip4 = (struct iphdr *)(skb->data + network_depth);
pkt_len = network_depth + ntohs(ip4->tot_len);
break;
case htons(ETH_P_IPV6):
ip6 = (struct ipv6hdr *)(skb->data + network_depth);
pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
break;
default:
return;
}
if (likely(pkt_len >= skb->len))
return;
tail_padding_csum(skb, pkt_len, stats);
}
static inline void mlx5e_handle_csum(struct net_device *netdev,
struct mlx5_cqe64 *cqe,
struct mlx5e_rq *rq,
@ -781,10 +832,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
skb->csum = csum_partial(skb->data + ETH_HLEN,
network_depth - ETH_HLEN,
skb->csum);
if (unlikely(netdev->features & NETIF_F_RXFCS))
skb->csum = csum_block_add(skb->csum,
(__force __wsum)mlx5e_get_fcs(skb),
skb->len - ETH_FCS_LEN);
mlx5e_skb_padding_csum(skb, network_depth, proto, stats);
stats->csum_complete++;
return;
}

View File

@ -59,6 +59,8 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
@ -151,6 +153,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_complete += rq_stats->csum_complete;
s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
s->rx_xdp_drop += rq_stats->xdp_drop;
@ -1190,6 +1194,8 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },

View File

@ -71,6 +71,8 @@ struct mlx5e_sw_stats {
u64 rx_csum_unnecessary;
u64 rx_csum_none;
u64 rx_csum_complete;
u64 rx_csum_complete_tail;
u64 rx_csum_complete_tail_slow;
u64 rx_csum_unnecessary_inner;
u64 rx_xdp_drop;
u64 rx_xdp_redirect;
@ -181,6 +183,8 @@ struct mlx5e_rq_stats {
u64 packets;
u64 bytes;
u64 csum_complete;
u64 csum_complete_tail;
u64 csum_complete_tail_slow;
u64 csum_unnecessary;
u64 csum_unnecessary_inner;
u64 csum_none;