Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue
Jeff Kirsher says: ==================== Intel Wired LAN Driver Updates 2018-01-03 This series contains fixes for i40e and i40evf. Amritha removes the UDP support for big buffer cloud filters since it is not supported and having UDP enabled is a bug. Alex fixes a bug in the __i40e_chk_linearize() which did not take into account large (16K or larger) fragments that are split over 2 descriptors, which could result in a transmit hang. Jake fixes an issue where a devices own MAC address could be removed from the unicast address list, so force a check on every address sync to ensure removal does not happen. Jiri Pirko fixes the return value when a filter configuration is not supported, do not return "invalid" but return "not supported" so that the core can react correctly. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
820d1d5eba
@ -1573,11 +1573,18 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
|
||||
else
|
||||
netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
|
||||
|
||||
/* Copy the address first, so that we avoid a possible race with
|
||||
* .set_rx_mode(). If we copy after changing the address in the filter
|
||||
* list, we might open ourselves to a narrow race window where
|
||||
* .set_rx_mode could delete our dev_addr filter and prevent traffic
|
||||
* from passing.
|
||||
*/
|
||||
ether_addr_copy(netdev->dev_addr, addr->sa_data);
|
||||
|
||||
spin_lock_bh(&vsi->mac_filter_hash_lock);
|
||||
i40e_del_mac_filter(vsi, netdev->dev_addr);
|
||||
i40e_add_mac_filter(vsi, addr->sa_data);
|
||||
spin_unlock_bh(&vsi->mac_filter_hash_lock);
|
||||
ether_addr_copy(netdev->dev_addr, addr->sa_data);
|
||||
if (vsi->type == I40E_VSI_MAIN) {
|
||||
i40e_status ret;
|
||||
|
||||
@ -1923,6 +1930,14 @@ static int i40e_addr_unsync(struct net_device *netdev, const u8 *addr)
|
||||
struct i40e_netdev_priv *np = netdev_priv(netdev);
|
||||
struct i40e_vsi *vsi = np->vsi;
|
||||
|
||||
/* Under some circumstances, we might receive a request to delete
|
||||
* our own device address from our uc list. Because we store the
|
||||
* device address in the VSI's MAC/VLAN filter list, we need to ignore
|
||||
* such requests and not delete our device address from this list.
|
||||
*/
|
||||
if (ether_addr_equal(addr, netdev->dev_addr))
|
||||
return 0;
|
||||
|
||||
i40e_del_mac_filter(vsi, addr);
|
||||
|
||||
return 0;
|
||||
@ -6038,8 +6053,8 @@ static int i40e_validate_and_set_switch_mode(struct i40e_vsi *vsi)
|
||||
/* Set Bit 7 to be valid */
|
||||
mode = I40E_AQ_SET_SWITCH_BIT7_VALID;
|
||||
|
||||
/* Set L4type to both TCP and UDP support */
|
||||
mode |= I40E_AQ_SET_SWITCH_L4_TYPE_BOTH;
|
||||
/* Set L4type for TCP support */
|
||||
mode |= I40E_AQ_SET_SWITCH_L4_TYPE_TCP;
|
||||
|
||||
/* Set cloud filter mode */
|
||||
mode |= I40E_AQ_SET_SWITCH_MODE_NON_TUNNEL;
|
||||
@ -6969,18 +6984,18 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
|
||||
is_valid_ether_addr(filter->src_mac)) ||
|
||||
(is_multicast_ether_addr(filter->dst_mac) &&
|
||||
is_multicast_ether_addr(filter->src_mac)))
|
||||
return -EINVAL;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Make sure port is specified, otherwise bail out, for channel
|
||||
* specific cloud filter needs 'L4 port' to be non-zero
|
||||
/* Big buffer cloud filter needs 'L4 port' to be non-zero. Also, UDP
|
||||
* ports are not supported via big buffer now.
|
||||
*/
|
||||
if (!filter->dst_port)
|
||||
return -EINVAL;
|
||||
if (!filter->dst_port || filter->ip_proto == IPPROTO_UDP)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* adding filter using src_port/src_ip is not supported at this stage */
|
||||
if (filter->src_port || filter->src_ipv4 ||
|
||||
!ipv6_addr_any(&filter->ip.v6.src_ip6))
|
||||
return -EINVAL;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* copy element needed to add cloud filter from filter */
|
||||
i40e_set_cld_element(filter, &cld_filter.element);
|
||||
@ -6991,7 +7006,7 @@ static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
|
||||
is_multicast_ether_addr(filter->src_mac)) {
|
||||
/* MAC + IP : unsupported mode */
|
||||
if (filter->dst_ipv4)
|
||||
return -EINVAL;
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* since we validated that L4 port must be valid before
|
||||
* we get here, start with respective "flags" value
|
||||
@ -7356,7 +7371,7 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi,
|
||||
|
||||
if (tc < 0) {
|
||||
dev_err(&vsi->back->pdev->dev, "Invalid traffic class\n");
|
||||
return -EINVAL;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) ||
|
||||
|
@ -3047,10 +3047,30 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
|
||||
/* Walk through fragments adding latest fragment, testing it, and
|
||||
* then removing stale fragments from the sum.
|
||||
*/
|
||||
stale = &skb_shinfo(skb)->frags[0];
|
||||
for (;;) {
|
||||
for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
|
||||
int stale_size = skb_frag_size(stale);
|
||||
|
||||
sum += skb_frag_size(frag++);
|
||||
|
||||
/* The stale fragment may present us with a smaller
|
||||
* descriptor than the actual fragment size. To account
|
||||
* for that we need to remove all the data on the front and
|
||||
* figure out what the remainder would be in the last
|
||||
* descriptor associated with the fragment.
|
||||
*/
|
||||
if (stale_size > I40E_MAX_DATA_PER_TXD) {
|
||||
int align_pad = -(stale->page_offset) &
|
||||
(I40E_MAX_READ_REQ_SIZE - 1);
|
||||
|
||||
sum -= align_pad;
|
||||
stale_size -= align_pad;
|
||||
|
||||
do {
|
||||
sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
|
||||
stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
|
||||
} while (stale_size > I40E_MAX_DATA_PER_TXD);
|
||||
}
|
||||
|
||||
/* if sum is negative we failed to make sufficient progress */
|
||||
if (sum < 0)
|
||||
return true;
|
||||
@ -3058,7 +3078,7 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
|
||||
if (!nr_frags--)
|
||||
break;
|
||||
|
||||
sum -= skb_frag_size(stale++);
|
||||
sum -= stale_size;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -2012,10 +2012,30 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
|
||||
/* Walk through fragments adding latest fragment, testing it, and
|
||||
* then removing stale fragments from the sum.
|
||||
*/
|
||||
stale = &skb_shinfo(skb)->frags[0];
|
||||
for (;;) {
|
||||
for (stale = &skb_shinfo(skb)->frags[0];; stale++) {
|
||||
int stale_size = skb_frag_size(stale);
|
||||
|
||||
sum += skb_frag_size(frag++);
|
||||
|
||||
/* The stale fragment may present us with a smaller
|
||||
* descriptor than the actual fragment size. To account
|
||||
* for that we need to remove all the data on the front and
|
||||
* figure out what the remainder would be in the last
|
||||
* descriptor associated with the fragment.
|
||||
*/
|
||||
if (stale_size > I40E_MAX_DATA_PER_TXD) {
|
||||
int align_pad = -(stale->page_offset) &
|
||||
(I40E_MAX_READ_REQ_SIZE - 1);
|
||||
|
||||
sum -= align_pad;
|
||||
stale_size -= align_pad;
|
||||
|
||||
do {
|
||||
sum -= I40E_MAX_DATA_PER_TXD_ALIGNED;
|
||||
stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED;
|
||||
} while (stale_size > I40E_MAX_DATA_PER_TXD);
|
||||
}
|
||||
|
||||
/* if sum is negative we failed to make sufficient progress */
|
||||
if (sum < 0)
|
||||
return true;
|
||||
@ -2023,7 +2043,7 @@ bool __i40evf_chk_linearize(struct sk_buff *skb)
|
||||
if (!nr_frags--)
|
||||
break;
|
||||
|
||||
sum -= skb_frag_size(stale++);
|
||||
sum -= stale_size;
|
||||
}
|
||||
|
||||
return false;
|
||||
|
Loading…
Reference in New Issue
Block a user