mirror of
https://github.com/torvalds/linux.git
synced 2024-12-03 17:41:22 +00:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from David Miller: 1) Fix interrupt name truncation in mv88e6xxx dsa driver, from Andrew Lunn. 2) Process generic XDP even if SKB is cloned, from Toke Høiland-Jørgensen. 3) Fix leak of kernel memory to userspace in smc, from Eric Dumazet. 4) Add some missing netlink attribute validation to matchall and flower, from Davide Caratti. 5) Send icmp responses properly when NAT has been applied to the frame before we get to the tunnel emitting the icmp, from Jason Donenfeld. 6) Make sure there is enough SKB headroom when adding dsa tags for qca and ar9331. From Per Forlin. * git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (62 commits) netdevice.h: fix all kernel-doc and Sphinx warnings net: dsa: tag_ar9331: Make sure there is headroom for tag net: dsa: tag_qca: Make sure there is headroom for tag net, ip6_tunnel: enhance tunnel locate with link check net/smc: no peer ID in CLC decline for SMCD net/smc: transfer fasync_list in case of fallback net: hns3: fix a copying IPv6 address error in hclge_fd_get_flow_tuples() net: hns3: fix VF bandwidth does not take effect in some case net: hns3: add management table after IMP reset mac80211: fix wrong 160/80+80 MHz setting cfg80211: add missing policy for NL80211_ATTR_STATUS_CODE xfrm: interface: use icmp_ndo_send helper wireguard: device: use icmp_ndo_send helper sunvnet: use icmp_ndo_send helper gtp: use icmp_ndo_send helper icmp: introduce helper for nat'd source address in network device context net/sched: flower: add missing validation of TCA_FLOWER_FLAGS net/sched: matchall: add missing validation of TCA_MATCHALL_FLAGS net/flow_dissector: remove unexist field description page_pool: refill page when alloc.count of pool is zero ...
This commit is contained in:
commit
2019fc96af
@ -236,7 +236,7 @@ struct mv88e6xxx_port {
|
||||
bool mirror_ingress;
|
||||
bool mirror_egress;
|
||||
unsigned int serdes_irq;
|
||||
char serdes_irq_name[32];
|
||||
char serdes_irq_name[64];
|
||||
};
|
||||
|
||||
struct mv88e6xxx_chip {
|
||||
@ -293,16 +293,16 @@ struct mv88e6xxx_chip {
|
||||
struct mv88e6xxx_irq g1_irq;
|
||||
struct mv88e6xxx_irq g2_irq;
|
||||
int irq;
|
||||
char irq_name[32];
|
||||
char irq_name[64];
|
||||
int device_irq;
|
||||
char device_irq_name[32];
|
||||
char device_irq_name[64];
|
||||
int watchdog_irq;
|
||||
char watchdog_irq_name[32];
|
||||
char watchdog_irq_name[64];
|
||||
|
||||
int atu_prob_irq;
|
||||
char atu_prob_irq_name[32];
|
||||
char atu_prob_irq_name[64];
|
||||
int vtu_prob_irq;
|
||||
char vtu_prob_irq_name[32];
|
||||
char vtu_prob_irq_name[64];
|
||||
struct kthread_worker *kworker;
|
||||
struct kthread_delayed_work irq_poll_work;
|
||||
|
||||
|
@ -200,6 +200,11 @@ static void comp_ctxt_release(struct ena_com_admin_queue *queue,
|
||||
static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
|
||||
u16 command_id, bool capture)
|
||||
{
|
||||
if (unlikely(!queue->comp_ctx)) {
|
||||
pr_err("Completion context is NULL\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (unlikely(command_id >= queue->q_depth)) {
|
||||
pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
|
||||
command_id, queue->q_depth);
|
||||
@ -1041,9 +1046,41 @@ static int ena_com_get_feature(struct ena_com_dev *ena_dev,
|
||||
feature_ver);
|
||||
}
|
||||
|
||||
int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
|
||||
{
|
||||
return ena_dev->rss.hash_func;
|
||||
}
|
||||
|
||||
static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
|
||||
{
|
||||
struct ena_admin_feature_rss_flow_hash_control *hash_key =
|
||||
(ena_dev->rss).hash_key;
|
||||
|
||||
netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
|
||||
/* The key is stored in the device in u32 array
|
||||
* as well as the API requires the key to be passed in this
|
||||
* format. Thus the size of our array should be divided by 4
|
||||
*/
|
||||
hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32);
|
||||
}
|
||||
|
||||
static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
|
||||
{
|
||||
struct ena_rss *rss = &ena_dev->rss;
|
||||
struct ena_admin_feature_rss_flow_hash_control *hash_key;
|
||||
struct ena_admin_get_feat_resp get_resp;
|
||||
int rc;
|
||||
|
||||
hash_key = (ena_dev->rss).hash_key;
|
||||
|
||||
rc = ena_com_get_feature_ex(ena_dev, &get_resp,
|
||||
ENA_ADMIN_RSS_HASH_FUNCTION,
|
||||
ena_dev->rss.hash_key_dma_addr,
|
||||
sizeof(ena_dev->rss.hash_key), 0);
|
||||
if (unlikely(rc)) {
|
||||
hash_key = NULL;
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
rss->hash_key =
|
||||
dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
|
||||
@ -1254,30 +1291,6 @@ static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
|
||||
{
|
||||
u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
|
||||
struct ena_rss *rss = &ena_dev->rss;
|
||||
u8 idx;
|
||||
u16 i;
|
||||
|
||||
for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
|
||||
dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
|
||||
|
||||
for (i = 0; i < 1 << rss->tbl_log_size; i++) {
|
||||
if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
|
||||
return -EINVAL;
|
||||
idx = (u8)rss->rss_ind_tbl[i].cq_idx;
|
||||
|
||||
if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
|
||||
return -EINVAL;
|
||||
|
||||
rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
|
||||
u16 intr_delay_resolution)
|
||||
{
|
||||
@ -2297,15 +2310,16 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
|
||||
|
||||
switch (func) {
|
||||
case ENA_ADMIN_TOEPLITZ:
|
||||
if (key_len > sizeof(hash_key->key)) {
|
||||
pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
|
||||
key_len, sizeof(hash_key->key));
|
||||
return -EINVAL;
|
||||
if (key) {
|
||||
if (key_len != sizeof(hash_key->key)) {
|
||||
pr_err("key len (%hu) doesn't equal the supported size (%zu)\n",
|
||||
key_len, sizeof(hash_key->key));
|
||||
return -EINVAL;
|
||||
}
|
||||
memcpy(hash_key->key, key, key_len);
|
||||
rss->hash_init_val = init_val;
|
||||
hash_key->keys_num = key_len >> 2;
|
||||
}
|
||||
|
||||
memcpy(hash_key->key, key, key_len);
|
||||
rss->hash_init_val = init_val;
|
||||
hash_key->keys_num = key_len >> 2;
|
||||
break;
|
||||
case ENA_ADMIN_CRC32:
|
||||
rss->hash_init_val = init_val;
|
||||
@ -2342,7 +2356,11 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
|
||||
if (unlikely(rc))
|
||||
return rc;
|
||||
|
||||
rss->hash_func = get_resp.u.flow_hash_func.selected_func;
|
||||
/* ffs() returns 1 in case the lsb is set */
|
||||
rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
|
||||
if (rss->hash_func)
|
||||
rss->hash_func--;
|
||||
|
||||
if (func)
|
||||
*func = rss->hash_func;
|
||||
|
||||
@ -2606,10 +2624,6 @@ int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
|
||||
if (!ind_tbl)
|
||||
return 0;
|
||||
|
||||
rc = ena_com_ind_tbl_convert_from_device(ena_dev);
|
||||
if (unlikely(rc))
|
||||
return rc;
|
||||
|
||||
for (i = 0; i < (1 << rss->tbl_log_size); i++)
|
||||
ind_tbl[i] = rss->host_rss_ind_tbl[i];
|
||||
|
||||
@ -2626,9 +2640,15 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
|
||||
if (unlikely(rc))
|
||||
goto err_indr_tbl;
|
||||
|
||||
/* The following function might return unsupported in case the
|
||||
* device doesn't support setting the key / hash function. We can safely
|
||||
* ignore this error and have indirection table support only.
|
||||
*/
|
||||
rc = ena_com_hash_key_allocate(ena_dev);
|
||||
if (unlikely(rc))
|
||||
if (unlikely(rc) && rc != -EOPNOTSUPP)
|
||||
goto err_hash_key;
|
||||
else if (rc != -EOPNOTSUPP)
|
||||
ena_com_hash_key_fill_default_key(ena_dev);
|
||||
|
||||
rc = ena_com_hash_ctrl_init(ena_dev);
|
||||
if (unlikely(rc))
|
||||
|
@ -44,6 +44,7 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/netdevice.h>
|
||||
|
||||
#include "ena_common_defs.h"
|
||||
#include "ena_admin_defs.h"
|
||||
@ -655,6 +656,14 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
|
||||
*/
|
||||
void ena_com_rss_destroy(struct ena_com_dev *ena_dev);
|
||||
|
||||
/* ena_com_get_current_hash_function - Get RSS hash function
|
||||
* @ena_dev: ENA communication layer struct
|
||||
*
|
||||
* Return the current hash function.
|
||||
* @return: 0 or one of the ena_admin_hash_functions values.
|
||||
*/
|
||||
int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev);
|
||||
|
||||
/* ena_com_fill_hash_function - Fill RSS hash function
|
||||
* @ena_dev: ENA communication layer struct
|
||||
* @func: The hash function (Toeplitz or crc)
|
||||
|
@ -636,6 +636,28 @@ static u32 ena_get_rxfh_key_size(struct net_device *netdev)
|
||||
return ENA_HASH_KEY_SIZE;
|
||||
}
|
||||
|
||||
static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir)
|
||||
{
|
||||
struct ena_com_dev *ena_dev = adapter->ena_dev;
|
||||
int i, rc;
|
||||
|
||||
if (!indir)
|
||||
return 0;
|
||||
|
||||
rc = ena_com_indirect_table_get(ena_dev, indir);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Our internal representation of the indices is: even indices
|
||||
* for Tx and uneven indices for Rx. We need to convert the Rx
|
||||
* indices to be consecutive
|
||||
*/
|
||||
for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++)
|
||||
indir[i] = ENA_IO_RXQ_IDX_TO_COMBINED_IDX(indir[i]);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
|
||||
u8 *hfunc)
|
||||
{
|
||||
@ -644,11 +666,25 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
|
||||
u8 func;
|
||||
int rc;
|
||||
|
||||
rc = ena_com_indirect_table_get(adapter->ena_dev, indir);
|
||||
rc = ena_indirection_table_get(adapter, indir);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* We call this function in order to check if the device
|
||||
* supports getting/setting the hash function.
|
||||
*/
|
||||
rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key);
|
||||
|
||||
if (rc) {
|
||||
if (rc == -EOPNOTSUPP) {
|
||||
key = NULL;
|
||||
hfunc = NULL;
|
||||
rc = 0;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@ -657,7 +693,7 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
|
||||
func = ETH_RSS_HASH_TOP;
|
||||
break;
|
||||
case ENA_ADMIN_CRC32:
|
||||
func = ETH_RSS_HASH_XOR;
|
||||
func = ETH_RSS_HASH_CRC32;
|
||||
break;
|
||||
default:
|
||||
netif_err(adapter, drv, netdev,
|
||||
@ -700,10 +736,13 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
|
||||
}
|
||||
|
||||
switch (hfunc) {
|
||||
case ETH_RSS_HASH_NO_CHANGE:
|
||||
func = ena_com_get_current_hash_function(ena_dev);
|
||||
break;
|
||||
case ETH_RSS_HASH_TOP:
|
||||
func = ENA_ADMIN_TOEPLITZ;
|
||||
break;
|
||||
case ETH_RSS_HASH_XOR:
|
||||
case ETH_RSS_HASH_CRC32:
|
||||
func = ENA_ADMIN_CRC32;
|
||||
break;
|
||||
default:
|
||||
@ -814,6 +853,7 @@ static const struct ethtool_ops ena_ethtool_ops = {
|
||||
.set_channels = ena_set_channels,
|
||||
.get_tunable = ena_get_tunable,
|
||||
.set_tunable = ena_set_tunable,
|
||||
.get_ts_info = ethtool_op_get_ts_info,
|
||||
};
|
||||
|
||||
void ena_set_ethtool_ops(struct net_device *netdev)
|
||||
|
@ -3706,8 +3706,8 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
|
||||
if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
|
||||
return;
|
||||
|
||||
keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies +
|
||||
adapter->keep_alive_timeout);
|
||||
keep_alive_expired = adapter->last_keep_alive_jiffies +
|
||||
adapter->keep_alive_timeout;
|
||||
if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
|
||||
netif_err(adapter, drv, adapter->netdev,
|
||||
"Keep alive watchdog timeout.\n");
|
||||
@ -3809,7 +3809,7 @@ static void ena_timer_service(struct timer_list *t)
|
||||
}
|
||||
|
||||
/* Reset the timer */
|
||||
mod_timer(&adapter->timer_service, jiffies + HZ);
|
||||
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
|
||||
}
|
||||
|
||||
static int ena_calc_max_io_queue_num(struct pci_dev *pdev,
|
||||
|
@ -130,6 +130,8 @@
|
||||
|
||||
#define ENA_IO_TXQ_IDX(q) (2 * (q))
|
||||
#define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
|
||||
#define ENA_IO_TXQ_IDX_TO_COMBINED_IDX(q) ((q) / 2)
|
||||
#define ENA_IO_RXQ_IDX_TO_COMBINED_IDX(q) (((q) - 1) / 2)
|
||||
|
||||
#define ENA_MGMNT_IRQ_IDX 0
|
||||
#define ENA_IO_IRQ_FIRST_IDX 1
|
||||
|
@ -2013,10 +2013,10 @@ static int enic_stop(struct net_device *netdev)
|
||||
napi_disable(&enic->napi[i]);
|
||||
|
||||
netif_carrier_off(netdev);
|
||||
netif_tx_disable(netdev);
|
||||
if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
|
||||
for (i = 0; i < enic->wq_count; i++)
|
||||
napi_disable(&enic->napi[enic_cq_wq(enic, i)]);
|
||||
netif_tx_disable(netdev);
|
||||
|
||||
if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
|
||||
enic_dev_del_station_addr(enic);
|
||||
|
@ -6113,6 +6113,9 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
|
||||
static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
|
||||
struct hclge_fd_rule_tuples *tuples)
|
||||
{
|
||||
#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
|
||||
#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
|
||||
|
||||
tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
|
||||
tuples->ip_proto = fkeys->basic.ip_proto;
|
||||
tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
|
||||
@ -6121,12 +6124,12 @@ static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
|
||||
tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
|
||||
tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
|
||||
} else {
|
||||
memcpy(tuples->src_ip,
|
||||
fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
|
||||
sizeof(tuples->src_ip));
|
||||
memcpy(tuples->dst_ip,
|
||||
fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
|
||||
sizeof(tuples->dst_ip));
|
||||
int i;
|
||||
|
||||
for (i = 0; i < IPV6_SIZE; i++) {
|
||||
tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
|
||||
tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -9834,6 +9837,13 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = init_mgr_tbl(hdev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev,
|
||||
"failed to reinit manager table, ret = %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = hclge_init_fd_config(hdev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
|
||||
|
@ -566,7 +566,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
|
||||
*/
|
||||
kinfo->num_tc = vport->vport_id ? 1 :
|
||||
min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
|
||||
vport->qs_offset = (vport->vport_id ? hdev->tm_info.num_tc : 0) +
|
||||
vport->qs_offset = (vport->vport_id ? HNAE3_MAX_TC : 0) +
|
||||
(vport->vport_id ? (vport->vport_id - 1) : 0);
|
||||
|
||||
max_rss_size = min_t(u16, hdev->rss_size_max,
|
||||
|
@ -2362,7 +2362,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
if (i40e_vc_validate_vqs_bitmaps(vqs)) {
|
||||
if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
|
||||
aq_ret = I40E_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
@ -2424,7 +2424,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
|
||||
goto error_param;
|
||||
}
|
||||
|
||||
if (i40e_vc_validate_vqs_bitmaps(vqs)) {
|
||||
if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
|
||||
aq_ret = I40E_ERR_PARAM;
|
||||
goto error_param;
|
||||
}
|
||||
|
@ -1660,6 +1660,7 @@ struct ice_aqc_get_pkg_info_resp {
|
||||
__le32 count;
|
||||
struct ice_aqc_get_pkg_info pkg_info[1];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ice_aq_desc - Admin Queue (AQ) descriptor
|
||||
* @flags: ICE_AQ_FLAG_* flags
|
||||
|
@ -324,7 +324,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
dev_info(&vsi->back->pdev->dev, "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
|
||||
dev_info(ice_pf_to_dev(vsi->back), "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
|
||||
ring->q_index);
|
||||
} else {
|
||||
ring->zca.free = NULL;
|
||||
@ -405,8 +405,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
|
||||
/* Absolute queue number out of 2K needs to be passed */
|
||||
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
|
||||
if (err) {
|
||||
dev_err(&vsi->back->pdev->dev,
|
||||
"Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
|
||||
dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
|
||||
pf_q, err);
|
||||
return -EIO;
|
||||
}
|
||||
@ -428,8 +427,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
|
||||
ice_alloc_rx_bufs_slow_zc(ring, ICE_DESC_UNUSED(ring)) :
|
||||
ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
|
||||
if (err)
|
||||
dev_info(&vsi->back->pdev->dev,
|
||||
"Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
|
||||
dev_info(ice_pf_to_dev(vsi->back), "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
|
||||
ring->xsk_umem ? "UMEM enabled " : "",
|
||||
ring->q_index, pf_q);
|
||||
|
||||
@ -490,8 +488,7 @@ int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
|
||||
/* wait for the change to finish */
|
||||
ret = ice_pf_rxq_wait(pf, pf_q, ena);
|
||||
if (ret)
|
||||
dev_err(ice_pf_to_dev(pf),
|
||||
"VSI idx %d Rx ring %d %sable timeout\n",
|
||||
dev_err(ice_pf_to_dev(pf), "VSI idx %d Rx ring %d %sable timeout\n",
|
||||
vsi->idx, pf_q, (ena ? "en" : "dis"));
|
||||
|
||||
return ret;
|
||||
@ -506,20 +503,15 @@ int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
|
||||
*/
|
||||
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
|
||||
{
|
||||
struct ice_pf *pf = vsi->back;
|
||||
int v_idx = 0, num_q_vectors;
|
||||
struct device *dev;
|
||||
int err;
|
||||
struct device *dev = ice_pf_to_dev(vsi->back);
|
||||
int v_idx, err;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
if (vsi->q_vectors[0]) {
|
||||
dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
num_q_vectors = vsi->num_q_vectors;
|
||||
|
||||
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
|
||||
for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
|
||||
err = ice_vsi_alloc_q_vector(vsi, v_idx);
|
||||
if (err)
|
||||
goto err_out;
|
||||
@ -648,8 +640,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
|
||||
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
|
||||
1, qg_buf, buf_len, NULL);
|
||||
if (status) {
|
||||
dev_err(ice_pf_to_dev(pf),
|
||||
"Failed to set LAN Tx queue context, error: %d\n",
|
||||
dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
|
||||
status);
|
||||
return -ENODEV;
|
||||
}
|
||||
@ -815,14 +806,12 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
|
||||
* queues at the hardware level anyway.
|
||||
*/
|
||||
if (status == ICE_ERR_RESET_ONGOING) {
|
||||
dev_dbg(&vsi->back->pdev->dev,
|
||||
"Reset in progress. LAN Tx queues already disabled\n");
|
||||
dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n");
|
||||
} else if (status == ICE_ERR_DOES_NOT_EXIST) {
|
||||
dev_dbg(&vsi->back->pdev->dev,
|
||||
"LAN Tx queues do not exist, nothing to disable\n");
|
||||
dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
|
||||
} else if (status) {
|
||||
dev_err(&vsi->back->pdev->dev,
|
||||
"Failed to disable LAN Tx queues, error: %d\n", status);
|
||||
dev_err(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",
|
||||
status);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
@ -24,20 +24,6 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_dev_onetime_setup - Temporary HW/FW workarounds
|
||||
* @hw: pointer to the HW structure
|
||||
*
|
||||
* This function provides temporary workarounds for certain issues
|
||||
* that are expected to be fixed in the HW/FW.
|
||||
*/
|
||||
void ice_dev_onetime_setup(struct ice_hw *hw)
|
||||
{
|
||||
#define MBX_PF_VT_PFALLOC 0x00231E80
|
||||
/* set VFs per PF */
|
||||
wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF));
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_clear_pf_cfg - Clear PF configuration
|
||||
* @hw: pointer to the hardware structure
|
||||
@ -602,10 +588,10 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_get_itr_intrl_gran - determine int/intrl granularity
|
||||
* ice_get_itr_intrl_gran
|
||||
* @hw: pointer to the HW struct
|
||||
*
|
||||
* Determines the ITR/intrl granularities based on the maximum aggregate
|
||||
* Determines the ITR/INTRL granularities based on the maximum aggregate
|
||||
* bandwidth according to the device's configuration during power-on.
|
||||
*/
|
||||
static void ice_get_itr_intrl_gran(struct ice_hw *hw)
|
||||
@ -763,8 +749,6 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
|
||||
if (status)
|
||||
goto err_unroll_sched;
|
||||
|
||||
ice_dev_onetime_setup(hw);
|
||||
|
||||
/* Get MAC information */
|
||||
/* A single port can report up to two (LAN and WoL) addresses */
|
||||
mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
|
||||
@ -834,7 +818,7 @@ void ice_deinit_hw(struct ice_hw *hw)
|
||||
*/
|
||||
enum ice_status ice_check_reset(struct ice_hw *hw)
|
||||
{
|
||||
u32 cnt, reg = 0, grst_delay;
|
||||
u32 cnt, reg = 0, grst_delay, uld_mask;
|
||||
|
||||
/* Poll for Device Active state in case a recent CORER, GLOBR,
|
||||
* or EMPR has occurred. The grst delay value is in 100ms units.
|
||||
@ -856,13 +840,20 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
|
||||
return ICE_ERR_RESET_FAILED;
|
||||
}
|
||||
|
||||
#define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \
|
||||
GLNVM_ULD_GLOBR_DONE_M)
|
||||
#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
|
||||
GLNVM_ULD_PCIER_DONE_1_M |\
|
||||
GLNVM_ULD_CORER_DONE_M |\
|
||||
GLNVM_ULD_GLOBR_DONE_M |\
|
||||
GLNVM_ULD_POR_DONE_M |\
|
||||
GLNVM_ULD_POR_DONE_1_M |\
|
||||
GLNVM_ULD_PCIER_DONE_2_M)
|
||||
|
||||
uld_mask = ICE_RESET_DONE_MASK;
|
||||
|
||||
/* Device is Active; check Global Reset processes are done */
|
||||
for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
|
||||
reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
|
||||
if (reg == ICE_RESET_DONE_MASK) {
|
||||
reg = rd32(hw, GLNVM_ULD) & uld_mask;
|
||||
if (reg == uld_mask) {
|
||||
ice_debug(hw, ICE_DBG_INIT,
|
||||
"Global reset processes done. %d\n", cnt);
|
||||
break;
|
||||
|
@ -54,8 +54,6 @@ enum ice_status ice_get_caps(struct ice_hw *hw);
|
||||
|
||||
void ice_set_safe_mode_caps(struct ice_hw *hw);
|
||||
|
||||
void ice_dev_onetime_setup(struct ice_hw *hw);
|
||||
|
||||
enum ice_status
|
||||
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
|
||||
u32 rxq_index);
|
||||
|
@ -1323,13 +1323,13 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_aq_query_port_ets - query port ets configuration
|
||||
* ice_aq_query_port_ets - query port ETS configuration
|
||||
* @pi: port information structure
|
||||
* @buf: pointer to buffer
|
||||
* @buf_size: buffer size in bytes
|
||||
* @cd: pointer to command details structure or NULL
|
||||
*
|
||||
* query current port ets configuration
|
||||
* query current port ETS configuration
|
||||
*/
|
||||
static enum ice_status
|
||||
ice_aq_query_port_ets(struct ice_port_info *pi,
|
||||
@ -1416,13 +1416,13 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_query_port_ets - query port ets configuration
|
||||
* ice_query_port_ets - query port ETS configuration
|
||||
* @pi: port information structure
|
||||
* @buf: pointer to buffer
|
||||
* @buf_size: buffer size in bytes
|
||||
* @cd: pointer to command details structure or NULL
|
||||
*
|
||||
* query current port ets configuration and update the
|
||||
* query current port ETS configuration and update the
|
||||
* SW DB with the TC changes
|
||||
*/
|
||||
enum ice_status
|
||||
|
@ -315,9 +315,9 @@ ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
|
||||
*/
|
||||
void ice_dcb_rebuild(struct ice_pf *pf)
|
||||
{
|
||||
struct ice_dcbx_cfg *local_dcbx_cfg, *desired_dcbx_cfg, *prev_cfg;
|
||||
struct ice_aqc_port_ets_elem buf = { 0 };
|
||||
struct device *dev = ice_pf_to_dev(pf);
|
||||
struct ice_dcbx_cfg *err_cfg;
|
||||
enum ice_status ret;
|
||||
|
||||
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
|
||||
@ -330,53 +330,25 @@ void ice_dcb_rebuild(struct ice_pf *pf)
|
||||
if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
|
||||
return;
|
||||
|
||||
local_dcbx_cfg = &pf->hw.port_info->local_dcbx_cfg;
|
||||
desired_dcbx_cfg = &pf->hw.port_info->desired_dcbx_cfg;
|
||||
mutex_lock(&pf->tc_mutex);
|
||||
|
||||
/* Save current willing state and force FW to unwilling */
|
||||
local_dcbx_cfg->etscfg.willing = 0x0;
|
||||
local_dcbx_cfg->pfc.willing = 0x0;
|
||||
local_dcbx_cfg->app_mode = ICE_DCBX_APPS_NON_WILLING;
|
||||
if (!pf->hw.port_info->is_sw_lldp)
|
||||
ice_cfg_etsrec_defaults(pf->hw.port_info);
|
||||
|
||||
ice_cfg_etsrec_defaults(pf->hw.port_info);
|
||||
ret = ice_set_dcb_cfg(pf->hw.port_info);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to set DCB to unwilling\n");
|
||||
dev_err(dev, "Failed to set DCB config in rebuild\n");
|
||||
goto dcb_error;
|
||||
}
|
||||
|
||||
/* Retrieve DCB config and ensure same as current in SW */
|
||||
prev_cfg = kmemdup(local_dcbx_cfg, sizeof(*prev_cfg), GFP_KERNEL);
|
||||
if (!prev_cfg)
|
||||
goto dcb_error;
|
||||
|
||||
ice_init_dcb(&pf->hw, true);
|
||||
if (pf->hw.port_info->dcbx_status == ICE_DCBX_STATUS_DIS)
|
||||
pf->hw.port_info->is_sw_lldp = true;
|
||||
else
|
||||
pf->hw.port_info->is_sw_lldp = false;
|
||||
|
||||
if (ice_dcb_need_recfg(pf, prev_cfg, local_dcbx_cfg)) {
|
||||
/* difference in cfg detected - disable DCB till next MIB */
|
||||
dev_err(dev, "Set local MIB not accurate\n");
|
||||
kfree(prev_cfg);
|
||||
goto dcb_error;
|
||||
if (!pf->hw.port_info->is_sw_lldp) {
|
||||
ret = ice_cfg_lldp_mib_change(&pf->hw, true);
|
||||
if (ret && !pf->hw.port_info->is_sw_lldp) {
|
||||
dev_err(dev, "Failed to register for MIB changes\n");
|
||||
goto dcb_error;
|
||||
}
|
||||
}
|
||||
|
||||
/* fetched config congruent to previous configuration */
|
||||
kfree(prev_cfg);
|
||||
|
||||
/* Set the local desired config */
|
||||
if (local_dcbx_cfg->dcbx_mode == ICE_DCBX_MODE_CEE)
|
||||
memcpy(local_dcbx_cfg, desired_dcbx_cfg,
|
||||
sizeof(*local_dcbx_cfg));
|
||||
|
||||
ice_cfg_etsrec_defaults(pf->hw.port_info);
|
||||
ret = ice_set_dcb_cfg(pf->hw.port_info);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to set desired config\n");
|
||||
goto dcb_error;
|
||||
}
|
||||
dev_info(dev, "DCB restored after reset\n");
|
||||
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
|
||||
if (ret) {
|
||||
@ -384,26 +356,32 @@ void ice_dcb_rebuild(struct ice_pf *pf)
|
||||
goto dcb_error;
|
||||
}
|
||||
|
||||
mutex_unlock(&pf->tc_mutex);
|
||||
|
||||
return;
|
||||
|
||||
dcb_error:
|
||||
dev_err(dev, "Disabling DCB until new settings occur\n");
|
||||
prev_cfg = kzalloc(sizeof(*prev_cfg), GFP_KERNEL);
|
||||
if (!prev_cfg)
|
||||
err_cfg = kzalloc(sizeof(*err_cfg), GFP_KERNEL);
|
||||
if (!err_cfg) {
|
||||
mutex_unlock(&pf->tc_mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
prev_cfg->etscfg.willing = true;
|
||||
prev_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
|
||||
prev_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
|
||||
memcpy(&prev_cfg->etsrec, &prev_cfg->etscfg, sizeof(prev_cfg->etsrec));
|
||||
err_cfg->etscfg.willing = true;
|
||||
err_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
|
||||
err_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
|
||||
memcpy(&err_cfg->etsrec, &err_cfg->etscfg, sizeof(err_cfg->etsrec));
|
||||
/* Coverity warns the return code of ice_pf_dcb_cfg() is not checked
|
||||
* here as is done for other calls to that function. That check is
|
||||
* not necessary since this is in this function's error cleanup path.
|
||||
* Suppress the Coverity warning with the following comment...
|
||||
*/
|
||||
/* coverity[check_return] */
|
||||
ice_pf_dcb_cfg(pf, prev_cfg, false);
|
||||
kfree(prev_cfg);
|
||||
ice_pf_dcb_cfg(pf, err_cfg, false);
|
||||
kfree(err_cfg);
|
||||
|
||||
mutex_unlock(&pf->tc_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -434,9 +412,9 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_dcb_sw_default_config - Apply a default DCB config
|
||||
* ice_dcb_sw_dflt_cfg - Apply a default DCB config
|
||||
* @pf: PF to apply config to
|
||||
* @ets_willing: configure ets willing
|
||||
* @ets_willing: configure ETS willing
|
||||
* @locked: was this function called with RTNL held
|
||||
*/
|
||||
static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
|
||||
@ -599,8 +577,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
|
||||
goto dcb_init_err;
|
||||
}
|
||||
|
||||
dev_info(dev,
|
||||
"DCB is enabled in the hardware, max number of TCs supported on this port are %d\n",
|
||||
dev_info(dev, "DCB is enabled in the hardware, max number of TCs supported on this port are %d\n",
|
||||
pf->hw.func_caps.common_cap.maxtc);
|
||||
if (err) {
|
||||
struct ice_vsi *pf_vsi;
|
||||
@ -610,8 +587,8 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
|
||||
clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
|
||||
err = ice_dcb_sw_dflt_cfg(pf, true, locked);
|
||||
if (err) {
|
||||
dev_err(dev,
|
||||
"Failed to set local DCB config %d\n", err);
|
||||
dev_err(dev, "Failed to set local DCB config %d\n",
|
||||
err);
|
||||
err = -EIO;
|
||||
goto dcb_init_err;
|
||||
}
|
||||
@ -777,6 +754,8 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&pf->tc_mutex);
|
||||
|
||||
/* store the old configuration */
|
||||
tmp_dcbx_cfg = pf->hw.port_info->local_dcbx_cfg;
|
||||
|
||||
@ -787,20 +766,20 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
|
||||
ret = ice_get_dcb_cfg(pf->hw.port_info);
|
||||
if (ret) {
|
||||
dev_err(dev, "Failed to get DCB config\n");
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* No change detected in DCBX configs */
|
||||
if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
|
||||
dev_dbg(dev, "No change detected in DCBX configuration.\n");
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg,
|
||||
&pi->local_dcbx_cfg);
|
||||
ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg);
|
||||
if (!need_reconfig)
|
||||
return;
|
||||
goto out;
|
||||
|
||||
/* Enable DCB tagging only when more than one TC */
|
||||
if (ice_dcb_get_num_tc(&pi->local_dcbx_cfg) > 1) {
|
||||
@ -814,7 +793,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
|
||||
pf_vsi = ice_get_main_vsi(pf);
|
||||
if (!pf_vsi) {
|
||||
dev_dbg(dev, "PF VSI doesn't exist\n");
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rtnl_lock();
|
||||
@ -823,13 +802,15 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
|
||||
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
|
||||
if (ret) {
|
||||
dev_err(dev, "Query Port ETS failed\n");
|
||||
rtnl_unlock();
|
||||
return;
|
||||
goto unlock_rtnl;
|
||||
}
|
||||
|
||||
/* changes in configuration update VSI */
|
||||
ice_pf_dcb_recfg(pf);
|
||||
|
||||
ice_ena_vsi(pf_vsi, true);
|
||||
unlock_rtnl:
|
||||
rtnl_unlock();
|
||||
out:
|
||||
mutex_unlock(&pf->tc_mutex);
|
||||
}
|
||||
|
@ -297,8 +297,7 @@ ice_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting)
|
||||
return;
|
||||
|
||||
*setting = (pi->local_dcbx_cfg.pfc.pfcena >> prio) & 0x1;
|
||||
dev_dbg(ice_pf_to_dev(pf),
|
||||
"Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n",
|
||||
dev_dbg(ice_pf_to_dev(pf), "Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n",
|
||||
prio, *setting, pi->local_dcbx_cfg.pfc.pfcena);
|
||||
}
|
||||
|
||||
@ -418,8 +417,8 @@ ice_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio,
|
||||
return;
|
||||
|
||||
*pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio];
|
||||
dev_dbg(ice_pf_to_dev(pf),
|
||||
"Get PG config prio=%d tc=%d\n", prio, *pgid);
|
||||
dev_dbg(ice_pf_to_dev(pf), "Get PG config prio=%d tc=%d\n", prio,
|
||||
*pgid);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -713,13 +712,13 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&pf->tc_mutex);
|
||||
ret = dcb_ieee_delapp(netdev, app);
|
||||
if (ret)
|
||||
goto delapp_out;
|
||||
|
||||
old_cfg = &pf->hw.port_info->local_dcbx_cfg;
|
||||
|
||||
if (old_cfg->numapps == 1)
|
||||
if (old_cfg->numapps <= 1)
|
||||
goto delapp_out;
|
||||
|
||||
ret = dcb_ieee_delapp(netdev, app);
|
||||
if (ret)
|
||||
goto delapp_out;
|
||||
|
||||
new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
|
||||
@ -882,8 +881,7 @@ ice_dcbnl_vsi_del_app(struct ice_vsi *vsi,
|
||||
sapp.protocol = app->prot_id;
|
||||
sapp.priority = app->priority;
|
||||
err = ice_dcbnl_delapp(vsi->netdev, &sapp);
|
||||
dev_dbg(&vsi->back->pdev->dev,
|
||||
"Deleting app for VSI idx=%d err=%d sel=%d proto=0x%x, prio=%d\n",
|
||||
dev_dbg(ice_pf_to_dev(vsi->back), "Deleting app for VSI idx=%d err=%d sel=%d proto=0x%x, prio=%d\n",
|
||||
vsi->idx, err, app->selector, app->prot_id, app->priority);
|
||||
}
|
||||
|
||||
|
@ -166,13 +166,24 @@ static void
|
||||
ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
|
||||
{
|
||||
struct ice_netdev_priv *np = netdev_priv(netdev);
|
||||
u8 oem_ver, oem_patch, nvm_ver_hi, nvm_ver_lo;
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
struct ice_hw *hw = &pf->hw;
|
||||
u16 oem_build;
|
||||
|
||||
strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
|
||||
strlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
|
||||
strlcpy(drvinfo->fw_version, ice_nvm_version_str(&pf->hw),
|
||||
sizeof(drvinfo->fw_version));
|
||||
|
||||
/* Display NVM version (from which the firmware version can be
|
||||
* determined) which contains more pertinent information.
|
||||
*/
|
||||
ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch,
|
||||
&nvm_ver_hi, &nvm_ver_lo);
|
||||
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
|
||||
"%x.%02x 0x%x %d.%d.%d", nvm_ver_hi, nvm_ver_lo,
|
||||
hw->nvm.eetrack, oem_ver, oem_build, oem_patch);
|
||||
|
||||
strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
|
||||
sizeof(drvinfo->bus_info));
|
||||
drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE;
|
||||
@ -363,8 +374,7 @@ static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
|
||||
val = rd32(hw, reg);
|
||||
if (val == pattern)
|
||||
continue;
|
||||
dev_err(dev,
|
||||
"%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n"
|
||||
dev_err(dev, "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n"
|
||||
, __func__, reg, pattern, val);
|
||||
return 1;
|
||||
}
|
||||
@ -372,8 +382,7 @@ static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
|
||||
wr32(hw, reg, orig_val);
|
||||
val = rd32(hw, reg);
|
||||
if (val != orig_val) {
|
||||
dev_err(dev,
|
||||
"%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n"
|
||||
dev_err(dev, "%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n"
|
||||
, __func__, reg, orig_val, val);
|
||||
return 1;
|
||||
}
|
||||
@ -791,8 +800,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
|
||||
set_bit(__ICE_TESTING, pf->state);
|
||||
|
||||
if (ice_active_vfs(pf)) {
|
||||
dev_warn(dev,
|
||||
"Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
|
||||
dev_warn(dev, "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
|
||||
data[ICE_ETH_TEST_REG] = 1;
|
||||
data[ICE_ETH_TEST_EEPROM] = 1;
|
||||
data[ICE_ETH_TEST_INTR] = 1;
|
||||
@ -1047,7 +1055,7 @@ ice_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
|
||||
fec = ICE_FEC_NONE;
|
||||
break;
|
||||
default:
|
||||
dev_warn(&vsi->back->pdev->dev, "Unsupported FEC mode: %d\n",
|
||||
dev_warn(ice_pf_to_dev(vsi->back), "Unsupported FEC mode: %d\n",
|
||||
fecparam->fec);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1200,8 +1208,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
|
||||
* events to respond to.
|
||||
*/
|
||||
if (status)
|
||||
dev_info(dev,
|
||||
"Failed to unreg for LLDP events\n");
|
||||
dev_info(dev, "Failed to unreg for LLDP events\n");
|
||||
|
||||
/* The AQ call to stop the FW LLDP agent will generate
|
||||
* an error if the agent is already stopped.
|
||||
@ -1256,8 +1263,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
|
||||
/* Register for MIB change events */
|
||||
status = ice_cfg_lldp_mib_change(&pf->hw, true);
|
||||
if (status)
|
||||
dev_dbg(dev,
|
||||
"Fail to enable MIB change events\n");
|
||||
dev_dbg(dev, "Fail to enable MIB change events\n");
|
||||
}
|
||||
}
|
||||
if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
|
||||
@ -1710,291 +1716,13 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
|
||||
{
|
||||
struct ice_netdev_priv *np = netdev_priv(netdev);
|
||||
struct ice_port_info *pi = np->vsi->port_info;
|
||||
struct ethtool_link_ksettings cap_ksettings;
|
||||
struct ice_link_status *link_info;
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
bool unrecog_phy_high = false;
|
||||
bool unrecog_phy_low = false;
|
||||
|
||||
link_info = &vsi->port_info->phy.link_info;
|
||||
|
||||
/* Initialize supported and advertised settings based on PHY settings */
|
||||
switch (link_info->phy_type_low) {
|
||||
case ICE_PHY_TYPE_LOW_100BASE_TX:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
100baseT_Full);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising,
|
||||
100baseT_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_100M_SGMII:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
100baseT_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_1000BASE_T:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
1000baseT_Full);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising,
|
||||
1000baseT_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_1G_SGMII:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
1000baseT_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_1000BASE_SX:
|
||||
case ICE_PHY_TYPE_LOW_1000BASE_LX:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
1000baseX_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_1000BASE_KX:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
1000baseKX_Full);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising,
|
||||
1000baseKX_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_2500BASE_T:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
2500baseT_Full);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising,
|
||||
2500baseT_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_2500BASE_X:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
2500baseX_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_2500BASE_KX:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
2500baseX_Full);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising,
|
||||
2500baseX_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_5GBASE_T:
|
||||
case ICE_PHY_TYPE_LOW_5GBASE_KR:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
5000baseT_Full);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising,
|
||||
5000baseT_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_10GBASE_T:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
10000baseT_Full);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising,
|
||||
10000baseT_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_10G_SFI_DA:
|
||||
case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
|
||||
case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
10000baseT_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_10GBASE_SR:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
10000baseSR_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_10GBASE_LR:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
10000baseLR_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
10000baseKR_Full);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising,
|
||||
10000baseKR_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_25GBASE_T:
|
||||
case ICE_PHY_TYPE_LOW_25GBASE_CR:
|
||||
case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
|
||||
case ICE_PHY_TYPE_LOW_25GBASE_CR1:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
25000baseCR_Full);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising,
|
||||
25000baseCR_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
|
||||
case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
25000baseCR_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_25GBASE_SR:
|
||||
case ICE_PHY_TYPE_LOW_25GBASE_LR:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
25000baseSR_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_25GBASE_KR:
|
||||
case ICE_PHY_TYPE_LOW_25GBASE_KR1:
|
||||
case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
25000baseKR_Full);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising,
|
||||
25000baseKR_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_40GBASE_CR4:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
40000baseCR4_Full);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising,
|
||||
40000baseCR4_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
|
||||
case ICE_PHY_TYPE_LOW_40G_XLAUI:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
40000baseCR4_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_40GBASE_SR4:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
40000baseSR4_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_40GBASE_LR4:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
40000baseLR4_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_40GBASE_KR4:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
40000baseKR4_Full);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising,
|
||||
40000baseKR4_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_50GBASE_CR2:
|
||||
case ICE_PHY_TYPE_LOW_50GBASE_CP:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
50000baseCR2_Full);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising,
|
||||
50000baseCR2_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
|
||||
case ICE_PHY_TYPE_LOW_50G_LAUI2:
|
||||
case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
|
||||
case ICE_PHY_TYPE_LOW_50G_AUI2:
|
||||
case ICE_PHY_TYPE_LOW_50GBASE_SR:
|
||||
case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
|
||||
case ICE_PHY_TYPE_LOW_50G_AUI1:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
50000baseCR2_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_50GBASE_KR2:
|
||||
case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
50000baseKR2_Full);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising,
|
||||
50000baseKR2_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_50GBASE_SR2:
|
||||
case ICE_PHY_TYPE_LOW_50GBASE_LR2:
|
||||
case ICE_PHY_TYPE_LOW_50GBASE_FR:
|
||||
case ICE_PHY_TYPE_LOW_50GBASE_LR:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
50000baseSR2_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_100GBASE_CR4:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
100000baseCR4_Full);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising,
|
||||
100000baseCR4_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
|
||||
case ICE_PHY_TYPE_LOW_100G_CAUI4:
|
||||
case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
|
||||
case ICE_PHY_TYPE_LOW_100G_AUI4:
|
||||
case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
100000baseCR4_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_100GBASE_CP2:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
100000baseCR4_Full);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising,
|
||||
100000baseCR4_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_100GBASE_SR4:
|
||||
case ICE_PHY_TYPE_LOW_100GBASE_SR2:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
100000baseSR4_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_100GBASE_LR4:
|
||||
case ICE_PHY_TYPE_LOW_100GBASE_DR:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
100000baseLR4_ER4_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_LOW_100GBASE_KR4:
|
||||
case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
100000baseKR4_Full);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising,
|
||||
100000baseKR4_Full);
|
||||
break;
|
||||
default:
|
||||
unrecog_phy_low = true;
|
||||
}
|
||||
|
||||
switch (link_info->phy_type_high) {
|
||||
case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
100000baseKR4_Full);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
|
||||
ethtool_link_ksettings_add_link_mode(ks, advertising,
|
||||
100000baseKR4_Full);
|
||||
break;
|
||||
case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
|
||||
case ICE_PHY_TYPE_HIGH_100G_CAUI2:
|
||||
case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
|
||||
case ICE_PHY_TYPE_HIGH_100G_AUI2:
|
||||
ethtool_link_ksettings_add_link_mode(ks, supported,
|
||||
100000baseCR4_Full);
|
||||
break;
|
||||
default:
|
||||
unrecog_phy_high = true;
|
||||
}
|
||||
|
||||
if (unrecog_phy_low && unrecog_phy_high) {
|
||||
/* if we got here and link is up something bad is afoot */
|
||||
netdev_info(netdev,
|
||||
"WARNING: Unrecognized PHY_Low (0x%llx).\n",
|
||||
(u64)link_info->phy_type_low);
|
||||
netdev_info(netdev,
|
||||
"WARNING: Unrecognized PHY_High (0x%llx).\n",
|
||||
(u64)link_info->phy_type_high);
|
||||
}
|
||||
|
||||
/* Now that we've worked out everything that could be supported by the
|
||||
* current PHY type, get what is supported by the NVM and intersect
|
||||
* them to get what is truly supported
|
||||
*/
|
||||
memset(&cap_ksettings, 0, sizeof(cap_ksettings));
|
||||
ice_phy_type_to_ethtool(netdev, &cap_ksettings);
|
||||
ethtool_intersect_link_masks(ks, &cap_ksettings);
|
||||
/* Get supported and advertised settings from PHY ability with media */
|
||||
ice_phy_type_to_ethtool(netdev, ks);
|
||||
|
||||
switch (link_info->link_speed) {
|
||||
case ICE_AQ_LINK_SPEED_100GB:
|
||||
@ -2028,8 +1756,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
|
||||
ks->base.speed = SPEED_100;
|
||||
break;
|
||||
default:
|
||||
netdev_info(netdev,
|
||||
"WARNING: Unrecognized link_speed (0x%x).\n",
|
||||
netdev_info(netdev, "WARNING: Unrecognized link_speed (0x%x).\n",
|
||||
link_info->link_speed);
|
||||
break;
|
||||
}
|
||||
@ -2845,13 +2572,11 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
|
||||
|
||||
new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
|
||||
if (new_tx_cnt != ring->tx_pending)
|
||||
netdev_info(netdev,
|
||||
"Requested Tx descriptor count rounded up to %d\n",
|
||||
netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n",
|
||||
new_tx_cnt);
|
||||
new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE);
|
||||
if (new_rx_cnt != ring->rx_pending)
|
||||
netdev_info(netdev,
|
||||
"Requested Rx descriptor count rounded up to %d\n",
|
||||
netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n",
|
||||
new_rx_cnt);
|
||||
|
||||
/* if nothing to do return success */
|
||||
@ -3718,8 +3443,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
|
||||
if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL ||
|
||||
(ec->rx_coalesce_usecs_high &&
|
||||
ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) {
|
||||
netdev_info(vsi->netdev,
|
||||
"Invalid value, %s-usecs-high valid values are 0 (disabled), %d-%d\n",
|
||||
netdev_info(vsi->netdev, "Invalid value, %s-usecs-high valid values are 0 (disabled), %d-%d\n",
|
||||
c_type_str, pf->hw.intrl_gran,
|
||||
ICE_MAX_INTRL);
|
||||
return -EINVAL;
|
||||
@ -3737,8 +3461,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
|
||||
break;
|
||||
case ICE_TX_CONTAINER:
|
||||
if (ec->tx_coalesce_usecs_high) {
|
||||
netdev_info(vsi->netdev,
|
||||
"setting %s-usecs-high is not supported\n",
|
||||
netdev_info(vsi->netdev, "setting %s-usecs-high is not supported\n",
|
||||
c_type_str);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -3755,23 +3478,20 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
|
||||
|
||||
itr_setting = rc->itr_setting & ~ICE_ITR_DYNAMIC;
|
||||
if (coalesce_usecs != itr_setting && use_adaptive_coalesce) {
|
||||
netdev_info(vsi->netdev,
|
||||
"%s interrupt throttling cannot be changed if adaptive-%s is enabled\n",
|
||||
netdev_info(vsi->netdev, "%s interrupt throttling cannot be changed if adaptive-%s is enabled\n",
|
||||
c_type_str, c_type_str);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (coalesce_usecs > ICE_ITR_MAX) {
|
||||
netdev_info(vsi->netdev,
|
||||
"Invalid value, %s-usecs range is 0-%d\n",
|
||||
netdev_info(vsi->netdev, "Invalid value, %s-usecs range is 0-%d\n",
|
||||
c_type_str, ICE_ITR_MAX);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* hardware only supports an ITR granularity of 2us */
|
||||
if (coalesce_usecs % 2 != 0) {
|
||||
netdev_info(vsi->netdev,
|
||||
"Invalid value, %s-usecs must be even\n",
|
||||
netdev_info(vsi->netdev, "Invalid value, %s-usecs must be even\n",
|
||||
c_type_str);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -4012,8 +3732,7 @@ ice_get_module_info(struct net_device *netdev,
|
||||
}
|
||||
break;
|
||||
default:
|
||||
netdev_warn(netdev,
|
||||
"SFF Module Type not recognized.\n");
|
||||
netdev_warn(netdev, "SFF Module Type not recognized.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
@ -4081,11 +3800,11 @@ ice_get_module_eeprom(struct net_device *netdev,
|
||||
static const struct ethtool_ops ice_ethtool_ops = {
|
||||
.get_link_ksettings = ice_get_link_ksettings,
|
||||
.set_link_ksettings = ice_set_link_ksettings,
|
||||
.get_drvinfo = ice_get_drvinfo,
|
||||
.get_regs_len = ice_get_regs_len,
|
||||
.get_regs = ice_get_regs,
|
||||
.get_msglevel = ice_get_msglevel,
|
||||
.set_msglevel = ice_set_msglevel,
|
||||
.get_drvinfo = ice_get_drvinfo,
|
||||
.get_regs_len = ice_get_regs_len,
|
||||
.get_regs = ice_get_regs,
|
||||
.get_msglevel = ice_get_msglevel,
|
||||
.set_msglevel = ice_set_msglevel,
|
||||
.self_test = ice_self_test,
|
||||
.get_link = ethtool_op_get_link,
|
||||
.get_eeprom_len = ice_get_eeprom_len,
|
||||
@ -4112,8 +3831,8 @@ static const struct ethtool_ops ice_ethtool_ops = {
|
||||
.get_channels = ice_get_channels,
|
||||
.set_channels = ice_set_channels,
|
||||
.get_ts_info = ethtool_op_get_ts_info,
|
||||
.get_per_queue_coalesce = ice_get_per_q_coalesce,
|
||||
.set_per_queue_coalesce = ice_set_per_q_coalesce,
|
||||
.get_per_queue_coalesce = ice_get_per_q_coalesce,
|
||||
.set_per_queue_coalesce = ice_set_per_q_coalesce,
|
||||
.get_fecparam = ice_get_fecparam,
|
||||
.set_fecparam = ice_set_fecparam,
|
||||
.get_module_info = ice_get_module_info,
|
||||
|
@ -267,8 +267,14 @@
|
||||
#define GLNVM_GENS_SR_SIZE_S 5
|
||||
#define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, 5)
|
||||
#define GLNVM_ULD 0x000B6008
|
||||
#define GLNVM_ULD_PCIER_DONE_M BIT(0)
|
||||
#define GLNVM_ULD_PCIER_DONE_1_M BIT(1)
|
||||
#define GLNVM_ULD_CORER_DONE_M BIT(3)
|
||||
#define GLNVM_ULD_GLOBR_DONE_M BIT(4)
|
||||
#define GLNVM_ULD_POR_DONE_M BIT(5)
|
||||
#define GLNVM_ULD_POR_DONE_1_M BIT(8)
|
||||
#define GLNVM_ULD_PCIER_DONE_2_M BIT(9)
|
||||
#define GLNVM_ULD_PE_DONE_M BIT(10)
|
||||
#define GLPCI_CNF2 0x000BE004
|
||||
#define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1)
|
||||
#define PF_FUNC_RID 0x0009E880
|
||||
@ -331,7 +337,6 @@
|
||||
#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
|
||||
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
|
||||
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
|
||||
#define PF_VT_PFALLOC_HIF 0x0009DD80
|
||||
#define VSIQF_HKEY_MAX_INDEX 12
|
||||
#define VSIQF_HLUT_MAX_INDEX 15
|
||||
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
|
||||
|
@ -117,8 +117,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
|
||||
vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
|
||||
break;
|
||||
default:
|
||||
dev_dbg(&vsi->back->pdev->dev,
|
||||
"Not setting number of Tx/Rx descriptors for VSI type %d\n",
|
||||
dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n",
|
||||
vsi->type);
|
||||
break;
|
||||
}
|
||||
@ -724,7 +723,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
|
||||
vsi->num_txq = tx_count;
|
||||
|
||||
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
|
||||
dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
|
||||
dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
|
||||
/* since there is a chance that num_rxq could have been changed
|
||||
* in the above for loop, make num_txq equal to num_rxq.
|
||||
*/
|
||||
@ -929,8 +928,7 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
|
||||
vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
|
||||
vsi->idx);
|
||||
if (vsi->base_vector < 0) {
|
||||
dev_err(dev,
|
||||
"Failed to get tracking for %d vectors for VSI %d, err=%d\n",
|
||||
dev_err(dev, "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
|
||||
num_q_vectors, vsi->vsi_num, vsi->base_vector);
|
||||
return -ENOENT;
|
||||
}
|
||||
@ -1232,8 +1230,9 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
|
||||
*
|
||||
* Returns 0 on success or ENOMEM on failure.
|
||||
*/
|
||||
int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
|
||||
const u8 *macaddr)
|
||||
int
|
||||
ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
|
||||
const u8 *macaddr)
|
||||
{
|
||||
struct ice_fltr_list_entry *tmp;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
@ -1392,12 +1391,10 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
|
||||
|
||||
status = ice_remove_vlan(&pf->hw, &tmp_add_list);
|
||||
if (status == ICE_ERR_DOES_NOT_EXIST) {
|
||||
dev_dbg(dev,
|
||||
"Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
|
||||
dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
|
||||
vid, vsi->vsi_num, status);
|
||||
} else if (status) {
|
||||
dev_err(dev,
|
||||
"Error removing VLAN %d on vsi %i error: %d\n",
|
||||
dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n",
|
||||
vid, vsi->vsi_num, status);
|
||||
err = -EIO;
|
||||
}
|
||||
@ -1453,8 +1450,7 @@ setup_rings:
|
||||
|
||||
err = ice_setup_rx_ctx(vsi->rx_rings[i]);
|
||||
if (err) {
|
||||
dev_err(&vsi->back->pdev->dev,
|
||||
"ice_setup_rx_ctx failed for RxQ %d, err %d\n",
|
||||
dev_err(ice_pf_to_dev(vsi->back), "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
|
||||
i, err);
|
||||
return err;
|
||||
}
|
||||
@ -1623,7 +1619,7 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
|
||||
|
||||
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
|
||||
if (status) {
|
||||
dev_err(&vsi->back->pdev->dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
|
||||
dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %d\n",
|
||||
status, hw->adminq.sq_last_status);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
@ -1669,7 +1665,7 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
|
||||
|
||||
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
|
||||
if (status) {
|
||||
dev_err(&vsi->back->pdev->dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
|
||||
dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
|
||||
ena, status, hw->adminq.sq_last_status);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
@ -1834,8 +1830,7 @@ ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
|
||||
struct ice_q_vector *q_vector = vsi->q_vectors[i];
|
||||
|
||||
if (!q_vector) {
|
||||
dev_err(&vsi->back->pdev->dev,
|
||||
"Failed to set reg_idx on q_vector %d VSI %d\n",
|
||||
dev_err(ice_pf_to_dev(vsi->back), "Failed to set reg_idx on q_vector %d VSI %d\n",
|
||||
i, vsi->vsi_num);
|
||||
goto clear_reg_idx;
|
||||
}
|
||||
@ -1898,8 +1893,7 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
|
||||
status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
|
||||
|
||||
if (status)
|
||||
dev_err(dev,
|
||||
"Failure Adding or Removing Ethertype on VSI %i error: %d\n",
|
||||
dev_err(dev, "Failure Adding or Removing Ethertype on VSI %i error: %d\n",
|
||||
vsi->vsi_num, status);
|
||||
|
||||
ice_free_fltr_list(dev, &tmp_add_list);
|
||||
@ -2384,8 +2378,7 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
|
||||
return -EINVAL;
|
||||
|
||||
if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
|
||||
dev_err(ice_pf_to_dev(pf),
|
||||
"param err: needed=%d, num_entries = %d id=0x%04x\n",
|
||||
dev_err(ice_pf_to_dev(pf), "param err: needed=%d, num_entries = %d id=0x%04x\n",
|
||||
needed, res->num_entries, id);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -2686,7 +2679,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
|
||||
ice_vsi_put_qs(vsi);
|
||||
ice_vsi_clear_rings(vsi);
|
||||
ice_vsi_free_arrays(vsi);
|
||||
ice_dev_onetime_setup(&pf->hw);
|
||||
if (vsi->type == ICE_VSI_VF)
|
||||
ice_vsi_set_num_qs(vsi, vf->vf_id);
|
||||
else
|
||||
@ -2765,8 +2757,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
|
||||
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
|
||||
max_txqs);
|
||||
if (status) {
|
||||
dev_err(ice_pf_to_dev(pf),
|
||||
"VSI %d failed lan queue config, error %d\n",
|
||||
dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n",
|
||||
vsi->vsi_num, status);
|
||||
if (init_vsi) {
|
||||
ret = -EIO;
|
||||
@ -2834,8 +2825,8 @@ static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
|
||||
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
|
||||
{
|
||||
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
|
||||
struct ice_vsi_ctx *ctx;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
struct ice_vsi_ctx *ctx;
|
||||
enum ice_status status;
|
||||
struct device *dev;
|
||||
int i, ret = 0;
|
||||
@ -2891,25 +2882,6 @@ out:
|
||||
}
|
||||
#endif /* CONFIG_DCB */
|
||||
|
||||
/**
|
||||
* ice_nvm_version_str - format the NVM version strings
|
||||
* @hw: ptr to the hardware info
|
||||
*/
|
||||
char *ice_nvm_version_str(struct ice_hw *hw)
|
||||
{
|
||||
u8 oem_ver, oem_patch, ver_hi, ver_lo;
|
||||
static char buf[ICE_NVM_VER_LEN];
|
||||
u16 oem_build;
|
||||
|
||||
ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch, &ver_hi,
|
||||
&ver_lo);
|
||||
|
||||
snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d", ver_hi, ver_lo,
|
||||
hw->nvm.eetrack, oem_ver, oem_build, oem_patch);
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
||||
/**
|
||||
* ice_update_ring_stats - Update ring statistics
|
||||
* @ring: ring to update
|
||||
@ -2981,7 +2953,7 @@ ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set)
|
||||
status = ice_remove_mac(&vsi->back->hw, &tmp_add_list);
|
||||
|
||||
cfg_mac_fltr_exit:
|
||||
ice_free_fltr_list(&vsi->back->pdev->dev, &tmp_add_list);
|
||||
ice_free_fltr_list(ice_pf_to_dev(vsi->back), &tmp_add_list);
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -3043,16 +3015,14 @@ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
|
||||
|
||||
/* another VSI is already the default VSI for this switch */
|
||||
if (ice_is_dflt_vsi_in_use(sw)) {
|
||||
dev_err(dev,
|
||||
"Default forwarding VSI %d already in use, disable it and try again\n",
|
||||
dev_err(dev, "Default forwarding VSI %d already in use, disable it and try again\n",
|
||||
sw->dflt_vsi->vsi_num);
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX);
|
||||
if (status) {
|
||||
dev_err(dev,
|
||||
"Failed to set VSI %d as the default forwarding VSI, error %d\n",
|
||||
dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
|
||||
vsi->vsi_num, status);
|
||||
return -EIO;
|
||||
}
|
||||
@ -3091,8 +3061,7 @@ int ice_clear_dflt_vsi(struct ice_sw *sw)
|
||||
status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false,
|
||||
ICE_FLTR_RX);
|
||||
if (status) {
|
||||
dev_err(dev,
|
||||
"Failed to clear the default forwarding VSI %d, error %d\n",
|
||||
dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
|
||||
dflt_vsi->vsi_num, status);
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -97,8 +97,6 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
|
||||
|
||||
u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
|
||||
|
||||
char *ice_nvm_version_str(struct ice_hw *hw);
|
||||
|
||||
enum ice_status
|
||||
ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
|
||||
|
||||
|
@ -162,8 +162,7 @@ unregister:
|
||||
* had an error
|
||||
*/
|
||||
if (status && vsi->netdev->reg_state == NETREG_REGISTERED) {
|
||||
dev_err(ice_pf_to_dev(pf),
|
||||
"Could not add MAC filters error %d. Unregistering device\n",
|
||||
dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %d. Unregistering device\n",
|
||||
status);
|
||||
unregister_netdev(vsi->netdev);
|
||||
free_netdev(vsi->netdev);
|
||||
@ -269,7 +268,7 @@ static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
|
||||
*/
|
||||
static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
|
||||
{
|
||||
struct device *dev = &vsi->back->pdev->dev;
|
||||
struct device *dev = ice_pf_to_dev(vsi->back);
|
||||
struct net_device *netdev = vsi->netdev;
|
||||
bool promisc_forced_on = false;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
@ -335,8 +334,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
|
||||
!test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC,
|
||||
vsi->state)) {
|
||||
promisc_forced_on = true;
|
||||
netdev_warn(netdev,
|
||||
"Reached MAC filter limit, forcing promisc mode on VSI %d\n",
|
||||
netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
|
||||
vsi->vsi_num);
|
||||
} else {
|
||||
err = -EIO;
|
||||
@ -382,8 +380,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
|
||||
if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
|
||||
err = ice_set_dflt_vsi(pf->first_sw, vsi);
|
||||
if (err && err != -EEXIST) {
|
||||
netdev_err(netdev,
|
||||
"Error %d setting default VSI %i Rx rule\n",
|
||||
netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
|
||||
err, vsi->vsi_num);
|
||||
vsi->current_netdev_flags &=
|
||||
~IFF_PROMISC;
|
||||
@ -395,8 +392,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
|
||||
if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
|
||||
err = ice_clear_dflt_vsi(pf->first_sw);
|
||||
if (err) {
|
||||
netdev_err(netdev,
|
||||
"Error %d clearing default VSI %i Rx rule\n",
|
||||
netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
|
||||
err, vsi->vsi_num);
|
||||
vsi->current_netdev_flags |=
|
||||
IFF_PROMISC;
|
||||
@ -752,7 +748,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
|
||||
kfree(caps);
|
||||
|
||||
done:
|
||||
netdev_info(vsi->netdev, "NIC Link is up %sbps, Requested FEC: %s, FEC: %s, Autoneg: %s, Flow Control: %s\n",
|
||||
netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
|
||||
speed, fec_req, fec, an, fc);
|
||||
ice_print_topo_conflict(vsi);
|
||||
}
|
||||
@ -815,8 +811,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
|
||||
*/
|
||||
result = ice_update_link_info(pi);
|
||||
if (result)
|
||||
dev_dbg(dev,
|
||||
"Failed to update link status and re-enable link events for port %d\n",
|
||||
dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n",
|
||||
pi->lport);
|
||||
|
||||
/* if the old link up/down and speed is the same as the new */
|
||||
@ -834,13 +829,13 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
|
||||
|
||||
result = ice_aq_set_link_restart_an(pi, false, NULL);
|
||||
if (result) {
|
||||
dev_dbg(dev,
|
||||
"Failed to set link down, VSI %d error %d\n",
|
||||
dev_dbg(dev, "Failed to set link down, VSI %d error %d\n",
|
||||
vsi->vsi_num, result);
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
ice_dcb_rebuild(pf);
|
||||
ice_vsi_link_event(vsi, link_up);
|
||||
ice_print_link_msg(vsi, link_up);
|
||||
|
||||
@ -892,15 +887,13 @@ static int ice_init_link_events(struct ice_port_info *pi)
|
||||
ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
|
||||
|
||||
if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
|
||||
dev_dbg(ice_hw_to_dev(pi->hw),
|
||||
"Failed to set link event mask for port %d\n",
|
||||
dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
|
||||
pi->lport);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
|
||||
dev_dbg(ice_hw_to_dev(pi->hw),
|
||||
"Failed to enable link events for port %d\n",
|
||||
dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
|
||||
pi->lport);
|
||||
return -EIO;
|
||||
}
|
||||
@ -929,8 +922,8 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
|
||||
!!(link_data->link_info & ICE_AQ_LINK_UP),
|
||||
le16_to_cpu(link_data->link_speed));
|
||||
if (status)
|
||||
dev_dbg(ice_pf_to_dev(pf),
|
||||
"Could not process link event, error %d\n", status);
|
||||
dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
|
||||
status);
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -979,13 +972,11 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
|
||||
dev_dbg(dev, "%s Receive Queue VF Error detected\n",
|
||||
qtype);
|
||||
if (val & PF_FW_ARQLEN_ARQOVFL_M) {
|
||||
dev_dbg(dev,
|
||||
"%s Receive Queue Overflow Error detected\n",
|
||||
dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
|
||||
qtype);
|
||||
}
|
||||
if (val & PF_FW_ARQLEN_ARQCRIT_M)
|
||||
dev_dbg(dev,
|
||||
"%s Receive Queue Critical Error detected\n",
|
||||
dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
|
||||
qtype);
|
||||
val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
|
||||
PF_FW_ARQLEN_ARQCRIT_M);
|
||||
@ -998,8 +989,8 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
|
||||
PF_FW_ATQLEN_ATQCRIT_M)) {
|
||||
oldval = val;
|
||||
if (val & PF_FW_ATQLEN_ATQVFE_M)
|
||||
dev_dbg(dev,
|
||||
"%s Send Queue VF Error detected\n", qtype);
|
||||
dev_dbg(dev, "%s Send Queue VF Error detected\n",
|
||||
qtype);
|
||||
if (val & PF_FW_ATQLEN_ATQOVFL_M) {
|
||||
dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
|
||||
qtype);
|
||||
@ -1048,8 +1039,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
|
||||
ice_dcb_process_lldp_set_mib_change(pf, &event);
|
||||
break;
|
||||
default:
|
||||
dev_dbg(dev,
|
||||
"%s Receive Queue unknown event 0x%04x ignored\n",
|
||||
dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
|
||||
qtype, opcode);
|
||||
break;
|
||||
}
|
||||
@ -1238,7 +1228,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
|
||||
u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
|
||||
GL_MDET_TX_TCLAN_QNUM_S);
|
||||
|
||||
if (netif_msg_rx_err(pf))
|
||||
if (netif_msg_tx_err(pf))
|
||||
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
|
||||
event, queue, pf_num, vf_num);
|
||||
wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
|
||||
@ -1335,8 +1325,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
|
||||
vf->num_mdd_events++;
|
||||
if (vf->num_mdd_events &&
|
||||
vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD)
|
||||
dev_info(dev,
|
||||
"VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n",
|
||||
dev_info(dev, "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n",
|
||||
i, vf->num_mdd_events);
|
||||
}
|
||||
}
|
||||
@ -1367,7 +1356,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
|
||||
if (vsi->type != ICE_VSI_PF)
|
||||
return 0;
|
||||
|
||||
dev = &vsi->back->pdev->dev;
|
||||
dev = ice_pf_to_dev(vsi->back);
|
||||
|
||||
pi = vsi->port_info;
|
||||
|
||||
@ -1378,8 +1367,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
|
||||
retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
|
||||
NULL);
|
||||
if (retcode) {
|
||||
dev_err(dev,
|
||||
"Failed to get phy capabilities, VSI %d error %d\n",
|
||||
dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
|
||||
vsi->vsi_num, retcode);
|
||||
retcode = -EIO;
|
||||
goto out;
|
||||
@ -1649,8 +1637,8 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
|
||||
err = devm_request_irq(dev, irq_num, vsi->irq_handler, 0,
|
||||
q_vector->name, q_vector);
|
||||
if (err) {
|
||||
netdev_err(vsi->netdev,
|
||||
"MSIX request_irq failed, error: %d\n", err);
|
||||
netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
|
||||
err);
|
||||
goto free_q_irqs;
|
||||
}
|
||||
|
||||
@ -1685,7 +1673,7 @@ free_q_irqs:
|
||||
*/
|
||||
static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
|
||||
{
|
||||
struct device *dev = &vsi->back->pdev->dev;
|
||||
struct device *dev = ice_pf_to_dev(vsi->back);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vsi->num_xdp_txq; i++) {
|
||||
@ -2664,14 +2652,12 @@ static void ice_set_pf_caps(struct ice_pf *pf)
|
||||
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
|
||||
if (func_caps->common_cap.dcb)
|
||||
set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
|
||||
if (func_caps->common_cap.sr_iov_1_1) {
|
||||
set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
|
||||
pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
|
||||
ICE_MAX_VF_COUNT);
|
||||
}
|
||||
#endif /* CONFIG_PCI_IOV */
|
||||
clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
|
||||
if (func_caps->common_cap.rss_table_size)
|
||||
set_bit(ICE_FLAG_RSS_ENA, pf->flags);
|
||||
@ -2764,8 +2750,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
|
||||
}
|
||||
|
||||
if (v_actual < v_budget) {
|
||||
dev_warn(dev,
|
||||
"not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
|
||||
dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
|
||||
v_budget, v_actual);
|
||||
/* 2 vectors for LAN (traffic + OICR) */
|
||||
#define ICE_MIN_LAN_VECS 2
|
||||
@ -2787,8 +2772,7 @@ msix_err:
|
||||
goto exit_err;
|
||||
|
||||
no_hw_vecs_left_err:
|
||||
dev_err(dev,
|
||||
"not enough device MSI-X vectors. requested = %d, available = %d\n",
|
||||
dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
|
||||
needed, v_left);
|
||||
err = -ERANGE;
|
||||
exit_err:
|
||||
@ -2921,16 +2905,14 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
|
||||
!memcmp(hw->pkg_name, hw->active_pkg_name,
|
||||
sizeof(hw->pkg_name))) {
|
||||
if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
|
||||
dev_info(dev,
|
||||
"DDP package already present on device: %s version %d.%d.%d.%d\n",
|
||||
dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
|
||||
hw->active_pkg_name,
|
||||
hw->active_pkg_ver.major,
|
||||
hw->active_pkg_ver.minor,
|
||||
hw->active_pkg_ver.update,
|
||||
hw->active_pkg_ver.draft);
|
||||
else
|
||||
dev_info(dev,
|
||||
"The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
|
||||
dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
|
||||
hw->active_pkg_name,
|
||||
hw->active_pkg_ver.major,
|
||||
hw->active_pkg_ver.minor,
|
||||
@ -2938,8 +2920,7 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
|
||||
hw->active_pkg_ver.draft);
|
||||
} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
|
||||
hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
|
||||
dev_err(dev,
|
||||
"The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
|
||||
dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
|
||||
hw->active_pkg_name,
|
||||
hw->active_pkg_ver.major,
|
||||
hw->active_pkg_ver.minor,
|
||||
@ -2947,8 +2928,7 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
|
||||
*status = ICE_ERR_NOT_SUPPORTED;
|
||||
} else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
|
||||
hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
|
||||
dev_info(dev,
|
||||
"The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
|
||||
dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
|
||||
hw->active_pkg_name,
|
||||
hw->active_pkg_ver.major,
|
||||
hw->active_pkg_ver.minor,
|
||||
@ -2960,54 +2940,46 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
|
||||
hw->pkg_ver.update,
|
||||
hw->pkg_ver.draft);
|
||||
} else {
|
||||
dev_err(dev,
|
||||
"An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n");
|
||||
dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n");
|
||||
*status = ICE_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
break;
|
||||
case ICE_ERR_BUF_TOO_SHORT:
|
||||
/* fall-through */
|
||||
case ICE_ERR_CFG:
|
||||
dev_err(dev,
|
||||
"The DDP package file is invalid. Entering Safe Mode.\n");
|
||||
dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
|
||||
break;
|
||||
case ICE_ERR_NOT_SUPPORTED:
|
||||
/* Package File version not supported */
|
||||
if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
|
||||
(hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
|
||||
hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
|
||||
dev_err(dev,
|
||||
"The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
|
||||
dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
|
||||
else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
|
||||
(hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
|
||||
hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
|
||||
dev_err(dev,
|
||||
"The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
|
||||
dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
|
||||
ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
|
||||
break;
|
||||
case ICE_ERR_AQ_ERROR:
|
||||
switch (hw->pkg_dwnld_status) {
|
||||
case ICE_AQ_RC_ENOSEC:
|
||||
case ICE_AQ_RC_EBADSIG:
|
||||
dev_err(dev,
|
||||
"The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
|
||||
dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
|
||||
return;
|
||||
case ICE_AQ_RC_ESVN:
|
||||
dev_err(dev,
|
||||
"The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
|
||||
dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
|
||||
return;
|
||||
case ICE_AQ_RC_EBADMAN:
|
||||
case ICE_AQ_RC_EBADBUF:
|
||||
dev_err(dev,
|
||||
"An error occurred on the device while loading the DDP package. The device will be reset.\n");
|
||||
dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
|
||||
return;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
/* fall-through */
|
||||
default:
|
||||
dev_err(dev,
|
||||
"An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n",
|
||||
dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n",
|
||||
*status);
|
||||
break;
|
||||
}
|
||||
@ -3038,8 +3010,7 @@ ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
|
||||
status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
|
||||
ice_log_pkg_init(hw, &status);
|
||||
} else {
|
||||
dev_err(dev,
|
||||
"The DDP package file failed to load. Entering Safe Mode.\n");
|
||||
dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
|
||||
}
|
||||
|
||||
if (status) {
|
||||
@ -3065,8 +3036,7 @@ ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
|
||||
static void ice_verify_cacheline_size(struct ice_pf *pf)
|
||||
{
|
||||
if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
|
||||
dev_warn(ice_pf_to_dev(pf),
|
||||
"%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
|
||||
dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
|
||||
ICE_CACHE_LINE_BYTES);
|
||||
}
|
||||
|
||||
@ -3159,8 +3129,7 @@ static void ice_request_fw(struct ice_pf *pf)
|
||||
dflt_pkg_load:
|
||||
err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
|
||||
if (err) {
|
||||
dev_err(dev,
|
||||
"The DDP package file was not found or could not be read. Entering Safe Mode\n");
|
||||
dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -3184,7 +3153,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
|
||||
struct ice_hw *hw;
|
||||
int err;
|
||||
|
||||
/* this driver uses devres, see Documentation/driver-api/driver-model/devres.rst */
|
||||
/* this driver uses devres, see
|
||||
* Documentation/driver-api/driver-model/devres.rst
|
||||
*/
|
||||
err = pcim_enable_device(pdev);
|
||||
if (err)
|
||||
return err;
|
||||
@ -3245,11 +3216,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
|
||||
goto err_exit_unroll;
|
||||
}
|
||||
|
||||
dev_info(dev, "firmware %d.%d.%d api %d.%d.%d nvm %s build 0x%08x\n",
|
||||
hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch,
|
||||
hw->api_maj_ver, hw->api_min_ver, hw->api_patch,
|
||||
ice_nvm_version_str(hw), hw->fw_build);
|
||||
|
||||
ice_request_fw(pf);
|
||||
|
||||
/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
|
||||
@ -3257,8 +3223,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
|
||||
* true
|
||||
*/
|
||||
if (ice_is_safe_mode(pf)) {
|
||||
dev_err(dev,
|
||||
"Package download failed. Advanced features disabled - Device now in Safe Mode\n");
|
||||
dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
|
||||
/* we already got function/device capabilities but these don't
|
||||
* reflect what the driver needs to do in safe mode. Instead of
|
||||
* adding conditional logic everywhere to ignore these
|
||||
@ -3335,8 +3300,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
|
||||
/* tell the firmware we are up */
|
||||
err = ice_send_version(pf);
|
||||
if (err) {
|
||||
dev_err(dev,
|
||||
"probe failed sending driver version %s. error: %d\n",
|
||||
dev_err(dev, "probe failed sending driver version %s. error: %d\n",
|
||||
ice_drv_ver, err);
|
||||
goto err_alloc_sw_unroll;
|
||||
}
|
||||
@ -3477,8 +3441,7 @@ static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
|
||||
|
||||
err = pci_enable_device_mem(pdev);
|
||||
if (err) {
|
||||
dev_err(&pdev->dev,
|
||||
"Cannot re-enable PCI device after reset, error %d\n",
|
||||
dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
|
||||
err);
|
||||
result = PCI_ERS_RESULT_DISCONNECT;
|
||||
} else {
|
||||
@ -3497,8 +3460,7 @@ static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
|
||||
|
||||
err = pci_cleanup_aer_uncorrect_error_status(pdev);
|
||||
if (err)
|
||||
dev_dbg(&pdev->dev,
|
||||
"pci_cleanup_aer_uncorrect_error_status failed, error %d\n",
|
||||
dev_dbg(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status failed, error %d\n",
|
||||
err);
|
||||
/* non-fatal, continue */
|
||||
|
||||
@ -3517,8 +3479,8 @@ static void ice_pci_err_resume(struct pci_dev *pdev)
|
||||
struct ice_pf *pf = pci_get_drvdata(pdev);
|
||||
|
||||
if (!pf) {
|
||||
dev_err(&pdev->dev,
|
||||
"%s failed, device is unrecoverable\n", __func__);
|
||||
dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
|
||||
__func__);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -3766,8 +3728,7 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
|
||||
|
||||
/* Validate maxrate requested is within permitted range */
|
||||
if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
|
||||
netdev_err(netdev,
|
||||
"Invalid max rate %d specified for the queue %d\n",
|
||||
netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
|
||||
maxrate, queue_index);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -3783,8 +3744,8 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
|
||||
status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
|
||||
q_handle, ICE_MAX_BW, maxrate * 1000);
|
||||
if (status) {
|
||||
netdev_err(netdev,
|
||||
"Unable to set Tx max rate, error %d\n", status);
|
||||
netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
|
||||
status);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@ -3876,15 +3837,13 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
|
||||
|
||||
/* Don't set any netdev advanced features with device in Safe Mode */
|
||||
if (ice_is_safe_mode(vsi->back)) {
|
||||
dev_err(&vsi->back->pdev->dev,
|
||||
"Device is in Safe Mode - not enabling advanced netdev features\n");
|
||||
dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Do not change setting during reset */
|
||||
if (ice_is_reset_in_progress(pf->state)) {
|
||||
dev_err(&vsi->back->pdev->dev,
|
||||
"Device is resetting, changing advanced netdev features temporarily unavailable.\n");
|
||||
dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
@ -4372,21 +4331,18 @@ int ice_down(struct ice_vsi *vsi)
|
||||
|
||||
tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
|
||||
if (tx_err)
|
||||
netdev_err(vsi->netdev,
|
||||
"Failed stop Tx rings, VSI %d error %d\n",
|
||||
netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
|
||||
vsi->vsi_num, tx_err);
|
||||
if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
|
||||
tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
|
||||
if (tx_err)
|
||||
netdev_err(vsi->netdev,
|
||||
"Failed stop XDP rings, VSI %d error %d\n",
|
||||
netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
|
||||
vsi->vsi_num, tx_err);
|
||||
}
|
||||
|
||||
rx_err = ice_vsi_stop_rx_rings(vsi);
|
||||
if (rx_err)
|
||||
netdev_err(vsi->netdev,
|
||||
"Failed stop Rx rings, VSI %d error %d\n",
|
||||
netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
|
||||
vsi->vsi_num, rx_err);
|
||||
|
||||
ice_napi_disable_all(vsi);
|
||||
@ -4394,8 +4350,7 @@ int ice_down(struct ice_vsi *vsi)
|
||||
if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
|
||||
link_err = ice_force_phys_link_state(vsi, false);
|
||||
if (link_err)
|
||||
netdev_err(vsi->netdev,
|
||||
"Failed to set physical link down, VSI %d error %d\n",
|
||||
netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
|
||||
vsi->vsi_num, link_err);
|
||||
}
|
||||
|
||||
@ -4406,8 +4361,7 @@ int ice_down(struct ice_vsi *vsi)
|
||||
ice_clean_rx_ring(vsi->rx_rings[i]);
|
||||
|
||||
if (tx_err || rx_err || link_err) {
|
||||
netdev_err(vsi->netdev,
|
||||
"Failed to close VSI 0x%04X on switch 0x%04X\n",
|
||||
netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
|
||||
vsi->vsi_num, vsi->vsw->sw_id);
|
||||
return -EIO;
|
||||
}
|
||||
@ -4426,7 +4380,7 @@ int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
|
||||
int i, err = 0;
|
||||
|
||||
if (!vsi->num_txq) {
|
||||
dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
|
||||
dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
|
||||
vsi->vsi_num);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -4457,7 +4411,7 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
|
||||
int i, err = 0;
|
||||
|
||||
if (!vsi->num_rxq) {
|
||||
dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
|
||||
dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
|
||||
vsi->vsi_num);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -4554,8 +4508,7 @@ static void ice_vsi_release_all(struct ice_pf *pf)
|
||||
|
||||
err = ice_vsi_release(pf->vsi[i]);
|
||||
if (err)
|
||||
dev_dbg(ice_pf_to_dev(pf),
|
||||
"Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
|
||||
dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
|
||||
i, err, pf->vsi[i]->vsi_num);
|
||||
}
|
||||
}
|
||||
@ -4582,8 +4535,7 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
|
||||
/* rebuild the VSI */
|
||||
err = ice_vsi_rebuild(vsi, true);
|
||||
if (err) {
|
||||
dev_err(dev,
|
||||
"rebuild VSI failed, err %d, VSI index %d, type %s\n",
|
||||
dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
|
||||
err, vsi->idx, ice_vsi_type_str(type));
|
||||
return err;
|
||||
}
|
||||
@ -4591,8 +4543,7 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
|
||||
/* replay filters for the VSI */
|
||||
status = ice_replay_vsi(&pf->hw, vsi->idx);
|
||||
if (status) {
|
||||
dev_err(dev,
|
||||
"replay VSI failed, status %d, VSI index %d, type %s\n",
|
||||
dev_err(dev, "replay VSI failed, status %d, VSI index %d, type %s\n",
|
||||
status, vsi->idx, ice_vsi_type_str(type));
|
||||
return -EIO;
|
||||
}
|
||||
@ -4605,8 +4556,7 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
|
||||
/* enable the VSI */
|
||||
err = ice_ena_vsi(vsi, false);
|
||||
if (err) {
|
||||
dev_err(dev,
|
||||
"enable VSI failed, err %d, VSI index %d, type %s\n",
|
||||
dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
|
||||
err, vsi->idx, ice_vsi_type_str(type));
|
||||
return err;
|
||||
}
|
||||
@ -4684,8 +4634,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
|
||||
}
|
||||
|
||||
if (pf->first_sw->dflt_vsi_ena)
|
||||
dev_info(dev,
|
||||
"Clearing default VSI, re-enable after reset completes\n");
|
||||
dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
|
||||
/* clear the default VSI configuration if it exists */
|
||||
pf->first_sw->dflt_vsi = NULL;
|
||||
pf->first_sw->dflt_vsi_ena = false;
|
||||
@ -4736,8 +4685,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
|
||||
/* tell the firmware we are up */
|
||||
ret = ice_send_version(pf);
|
||||
if (ret) {
|
||||
dev_err(dev,
|
||||
"Rebuild failed due to error sending driver version: %d\n",
|
||||
dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
|
||||
ret);
|
||||
goto err_vsi_rebuild;
|
||||
}
|
||||
@ -4993,7 +4941,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
|
||||
|
||||
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
|
||||
if (status) {
|
||||
dev_err(&vsi->back->pdev->dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
|
||||
dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
|
||||
bmode, status, hw->adminq.sq_last_status);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
@ -5185,8 +5133,7 @@ int ice_open(struct net_device *netdev)
|
||||
if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
|
||||
err = ice_force_phys_link_state(vsi, true);
|
||||
if (err) {
|
||||
netdev_err(netdev,
|
||||
"Failed to set physical link up, error %d\n",
|
||||
netdev_err(netdev, "Failed to set physical link up, error %d\n",
|
||||
err);
|
||||
return err;
|
||||
}
|
||||
|
@ -644,7 +644,7 @@ static bool ice_page_is_reserved(struct page *page)
|
||||
* Update the offset within page so that Rx buf will be ready to be reused.
|
||||
* For systems with PAGE_SIZE < 8192 this function will flip the page offset
|
||||
* so the second half of page assigned to Rx buffer will be used, otherwise
|
||||
* the offset is moved by the @size bytes
|
||||
* the offset is moved by "size" bytes
|
||||
*/
|
||||
static void
|
||||
ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
|
||||
@ -1078,8 +1078,6 @@ construct_skb:
|
||||
skb = ice_build_skb(rx_ring, rx_buf, &xdp);
|
||||
else
|
||||
skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
|
||||
} else {
|
||||
skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
|
||||
}
|
||||
/* exit if we failed to retrieve a buffer */
|
||||
if (!skb) {
|
||||
@ -1621,11 +1619,11 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
|
||||
{
|
||||
u64 td_offset, td_tag, td_cmd;
|
||||
u16 i = tx_ring->next_to_use;
|
||||
skb_frag_t *frag;
|
||||
unsigned int data_len, size;
|
||||
struct ice_tx_desc *tx_desc;
|
||||
struct ice_tx_buf *tx_buf;
|
||||
struct sk_buff *skb;
|
||||
skb_frag_t *frag;
|
||||
dma_addr_t dma;
|
||||
|
||||
td_tag = off->td_l2tag1;
|
||||
@ -1738,9 +1736,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
|
||||
ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
|
||||
|
||||
/* notify HW of packet */
|
||||
if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
|
||||
if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
|
||||
writel(i, tx_ring->tail);
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
@ -2078,7 +2075,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb)
|
||||
frag = &skb_shinfo(skb)->frags[0];
|
||||
|
||||
/* Initialize size to the negative value of gso_size minus 1. We
|
||||
* use this as the worst case scenerio in which the frag ahead
|
||||
* use this as the worst case scenario in which the frag ahead
|
||||
* of us only provides one byte which is why we are limited to 6
|
||||
* descriptors for a single transmit as the header and previous
|
||||
* fragment are already consuming 2 descriptors.
|
||||
|
@ -33,8 +33,8 @@
|
||||
* frame.
|
||||
*
|
||||
* Note: For cache line sizes 256 or larger this value is going to end
|
||||
* up negative. In these cases we should fall back to the legacy
|
||||
* receive path.
|
||||
* up negative. In these cases we should fall back to the legacy
|
||||
* receive path.
|
||||
*/
|
||||
#if (PAGE_SIZE < 8192)
|
||||
#define ICE_2K_TOO_SMALL_WITH_PADDING \
|
||||
|
@ -10,7 +10,7 @@
|
||||
*/
|
||||
void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
|
||||
{
|
||||
u16 prev_ntu = rx_ring->next_to_use;
|
||||
u16 prev_ntu = rx_ring->next_to_use & ~0x7;
|
||||
|
||||
rx_ring->next_to_use = val;
|
||||
|
||||
|
@ -517,7 +517,7 @@ struct ice_hw {
|
||||
struct ice_fw_log_cfg fw_log;
|
||||
|
||||
/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
|
||||
* register. Used for determining the ITR/intrl granularity during
|
||||
* register. Used for determining the ITR/INTRL granularity during
|
||||
* initialization.
|
||||
*/
|
||||
#define ICE_MAX_AGG_BW_200G 0x0
|
||||
|
@ -199,8 +199,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
|
||||
if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
|
||||
wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
|
||||
else
|
||||
dev_err(dev,
|
||||
"Scattered mode for VF Rx queues is not yet implemented\n");
|
||||
dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
|
||||
}
|
||||
|
||||
/**
|
||||
@ -402,8 +401,7 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
|
||||
if ((reg & VF_TRANS_PENDING_M) == 0)
|
||||
break;
|
||||
|
||||
dev_err(dev,
|
||||
"VF %d PCI transactions stuck\n", vf->vf_id);
|
||||
dev_err(dev, "VF %d PCI transactions stuck\n", vf->vf_id);
|
||||
udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
|
||||
}
|
||||
}
|
||||
@ -462,7 +460,7 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
|
||||
|
||||
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
|
||||
if (status) {
|
||||
dev_info(&vsi->back->pdev->dev, "update VSI for port VLAN failed, err %d aq_err %d\n",
|
||||
dev_info(ice_pf_to_dev(vsi->back), "update VSI for port VLAN failed, err %d aq_err %d\n",
|
||||
status, hw->adminq.sq_last_status);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
@ -1095,7 +1093,6 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
|
||||
* finished resetting.
|
||||
*/
|
||||
for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
|
||||
|
||||
/* Check each VF in sequence */
|
||||
while (v < pf->num_alloc_vfs) {
|
||||
u32 reg;
|
||||
@ -1553,8 +1550,7 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
|
||||
dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
|
||||
v_opcode, v_retval);
|
||||
if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
|
||||
dev_err(dev,
|
||||
"Number of invalid messages exceeded for VF %d\n",
|
||||
dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
|
||||
vf->vf_id);
|
||||
dev_err(dev, "Use PF Control I/F to enable the VF\n");
|
||||
set_bit(ICE_VF_STATE_DIS, vf->vf_states);
|
||||
@ -1569,8 +1565,7 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
|
||||
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
|
||||
msg, msglen, NULL);
|
||||
if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
|
||||
dev_info(dev,
|
||||
"Unable to send the message to VF %d ret %d aq_err %d\n",
|
||||
dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %d\n",
|
||||
vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status);
|
||||
return -EIO;
|
||||
}
|
||||
@ -1914,8 +1909,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
|
||||
}
|
||||
|
||||
if (vf_vsi->type != ICE_VSI_VF) {
|
||||
netdev_err(netdev,
|
||||
"Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
|
||||
netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
|
||||
vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
|
||||
return -ENODEV;
|
||||
}
|
||||
@ -1945,8 +1939,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
|
||||
|
||||
status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
|
||||
if (status) {
|
||||
dev_err(dev,
|
||||
"Failed to %sable spoofchk on VF %d VSI %d\n error %d",
|
||||
dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d",
|
||||
ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status);
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
@ -2063,8 +2056,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
continue;
|
||||
|
||||
if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) {
|
||||
dev_err(&vsi->back->pdev->dev,
|
||||
"Failed to enable Rx ring %d on VSI %d\n",
|
||||
dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
|
||||
vf_q_id, vsi->vsi_num);
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
@ -2166,8 +2158,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
|
||||
if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
|
||||
ring, &txq_meta)) {
|
||||
dev_err(&vsi->back->pdev->dev,
|
||||
"Failed to stop Tx ring %d on VSI %d\n",
|
||||
dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
|
||||
vf_q_id, vsi->vsi_num);
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
@ -2193,8 +2184,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
continue;
|
||||
|
||||
if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) {
|
||||
dev_err(&vsi->back->pdev->dev,
|
||||
"Failed to stop Rx ring %d on VSI %d\n",
|
||||
dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
|
||||
vf_q_id, vsi->vsi_num);
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
@ -2357,8 +2347,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
|
||||
if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF ||
|
||||
qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
|
||||
dev_err(ice_pf_to_dev(pf),
|
||||
"VF-%d requesting more than supported number of queues: %d\n",
|
||||
dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
|
||||
vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto error_param;
|
||||
@ -2570,8 +2559,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
|
||||
*/
|
||||
if (set && !ice_is_vf_trusted(vf) &&
|
||||
(vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
|
||||
dev_err(ice_pf_to_dev(pf),
|
||||
"Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
|
||||
dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
|
||||
vf->vf_id);
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
goto handle_mac_exit;
|
||||
@ -2648,8 +2636,8 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
struct ice_pf *pf = vf->pf;
|
||||
u16 max_allowed_vf_queues;
|
||||
u16 tx_rx_queue_left;
|
||||
u16 cur_queues;
|
||||
struct device *dev;
|
||||
u16 cur_queues;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
|
||||
@ -2670,8 +2658,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
|
||||
vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
|
||||
} else if (req_queues > cur_queues &&
|
||||
req_queues - cur_queues > tx_rx_queue_left) {
|
||||
dev_warn(dev,
|
||||
"VF %d requested %u more queues, but only %u left.\n",
|
||||
dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
|
||||
vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
|
||||
vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
|
||||
ICE_MAX_BASE_QS_PER_VF);
|
||||
@ -2821,8 +2808,8 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
|
||||
for (i = 0; i < vfl->num_elements; i++) {
|
||||
if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
dev_err(dev,
|
||||
"invalid VF VLAN id %d\n", vfl->vlan_id[i]);
|
||||
dev_err(dev, "invalid VF VLAN id %d\n",
|
||||
vfl->vlan_id[i]);
|
||||
goto error_param;
|
||||
}
|
||||
}
|
||||
@ -2836,8 +2823,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
|
||||
|
||||
if (add_v && !ice_is_vf_trusted(vf) &&
|
||||
vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
|
||||
dev_info(dev,
|
||||
"VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
|
||||
dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
|
||||
vf->vf_id);
|
||||
/* There is no need to let VF know about being not trusted,
|
||||
* so we can just return success message here
|
||||
@ -2860,8 +2846,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
|
||||
|
||||
if (!ice_is_vf_trusted(vf) &&
|
||||
vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
|
||||
dev_info(dev,
|
||||
"VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
|
||||
dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
|
||||
vf->vf_id);
|
||||
/* There is no need to let VF know about being
|
||||
* not trusted, so we can just return success
|
||||
@ -2889,8 +2874,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
|
||||
status = ice_cfg_vlan_pruning(vsi, true, false);
|
||||
if (status) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
dev_err(dev,
|
||||
"Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
|
||||
dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
|
||||
vid, status);
|
||||
goto error_param;
|
||||
}
|
||||
@ -2903,8 +2887,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
|
||||
promisc_m, vid);
|
||||
if (status) {
|
||||
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
|
||||
dev_err(dev,
|
||||
"Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
|
||||
dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
|
||||
vid, status);
|
||||
}
|
||||
}
|
||||
@ -3140,8 +3123,7 @@ error_handler:
|
||||
case VIRTCHNL_OP_GET_VF_RESOURCES:
|
||||
err = ice_vc_get_vf_res_msg(vf, msg);
|
||||
if (ice_vf_init_vlan_stripping(vf))
|
||||
dev_err(dev,
|
||||
"Failed to initialize VLAN stripping for VF %d\n",
|
||||
dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
|
||||
vf->vf_id);
|
||||
ice_vc_notify_vf_link_state(vf);
|
||||
break;
|
||||
@ -3313,8 +3295,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
|
||||
*/
|
||||
ether_addr_copy(vf->dflt_lan_addr.addr, mac);
|
||||
vf->pf_set_mac = true;
|
||||
netdev_info(netdev,
|
||||
"MAC on VF %d set to %pM. VF driver will be reinitialized\n",
|
||||
netdev_info(netdev, "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
|
||||
vf_id, mac);
|
||||
|
||||
ice_vc_reset_vf(vf);
|
||||
@ -3332,10 +3313,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
|
||||
int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
|
||||
{
|
||||
struct ice_pf *pf = ice_netdev_to_pf(netdev);
|
||||
struct device *dev;
|
||||
struct ice_vf *vf;
|
||||
|
||||
dev = ice_pf_to_dev(pf);
|
||||
if (ice_validate_vf_id(pf, vf_id))
|
||||
return -EINVAL;
|
||||
|
||||
@ -3358,7 +3337,7 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
|
||||
|
||||
vf->trusted = trusted;
|
||||
ice_vc_reset_vf(vf);
|
||||
dev_info(dev, "VF %u is now %strusted\n",
|
||||
dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
|
||||
vf_id, trusted ? "" : "un");
|
||||
|
||||
return 0;
|
||||
|
@ -338,8 +338,8 @@ static int ice_xsk_umem_dma_map(struct ice_vsi *vsi, struct xdp_umem *umem)
|
||||
DMA_BIDIRECTIONAL,
|
||||
ICE_RX_DMA_ATTR);
|
||||
if (dma_mapping_error(dev, dma)) {
|
||||
dev_dbg(dev,
|
||||
"XSK UMEM DMA mapping error on page num %d", i);
|
||||
dev_dbg(dev, "XSK UMEM DMA mapping error on page num %d\n",
|
||||
i);
|
||||
goto out_unmap;
|
||||
}
|
||||
|
||||
|
@ -1810,6 +1810,9 @@ static int ave_pro4_get_pinmode(struct ave_private *priv,
|
||||
break;
|
||||
case PHY_INTERFACE_MODE_MII:
|
||||
case PHY_INTERFACE_MODE_RGMII:
|
||||
case PHY_INTERFACE_MODE_RGMII_ID:
|
||||
case PHY_INTERFACE_MODE_RGMII_RXID:
|
||||
case PHY_INTERFACE_MODE_RGMII_TXID:
|
||||
priv->pinmode_val = 0;
|
||||
break;
|
||||
default:
|
||||
@ -1854,6 +1857,9 @@ static int ave_ld20_get_pinmode(struct ave_private *priv,
|
||||
priv->pinmode_val = SG_ETPINMODE_RMII(0);
|
||||
break;
|
||||
case PHY_INTERFACE_MODE_RGMII:
|
||||
case PHY_INTERFACE_MODE_RGMII_ID:
|
||||
case PHY_INTERFACE_MODE_RGMII_RXID:
|
||||
case PHY_INTERFACE_MODE_RGMII_TXID:
|
||||
priv->pinmode_val = 0;
|
||||
break;
|
||||
default:
|
||||
@ -1876,6 +1882,9 @@ static int ave_pxs3_get_pinmode(struct ave_private *priv,
|
||||
priv->pinmode_val = SG_ETPINMODE_RMII(arg);
|
||||
break;
|
||||
case PHY_INTERFACE_MODE_RGMII:
|
||||
case PHY_INTERFACE_MODE_RGMII_ID:
|
||||
case PHY_INTERFACE_MODE_RGMII_RXID:
|
||||
case PHY_INTERFACE_MODE_RGMII_TXID:
|
||||
priv->pinmode_val = 0;
|
||||
break;
|
||||
default:
|
||||
|
@ -1350,27 +1350,12 @@ sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
|
||||
if (vio_version_after_eq(&port->vio, 1, 3))
|
||||
localmtu -= VLAN_HLEN;
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
struct flowi4 fl4;
|
||||
struct rtable *rt = NULL;
|
||||
|
||||
memset(&fl4, 0, sizeof(fl4));
|
||||
fl4.flowi4_oif = dev->ifindex;
|
||||
fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
|
||||
fl4.daddr = ip_hdr(skb)->daddr;
|
||||
fl4.saddr = ip_hdr(skb)->saddr;
|
||||
|
||||
rt = ip_route_output_key(dev_net(dev), &fl4);
|
||||
if (!IS_ERR(rt)) {
|
||||
skb_dst_set(skb, &rt->dst);
|
||||
icmp_send(skb, ICMP_DEST_UNREACH,
|
||||
ICMP_FRAG_NEEDED,
|
||||
htonl(localmtu));
|
||||
}
|
||||
}
|
||||
if (skb->protocol == htons(ETH_P_IP))
|
||||
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
|
||||
htonl(localmtu));
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
else if (skb->protocol == htons(ETH_P_IPV6))
|
||||
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
|
||||
icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
|
||||
#endif
|
||||
goto out_dropped;
|
||||
}
|
||||
|
@ -546,8 +546,8 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
|
||||
mtu < ntohs(iph->tot_len)) {
|
||||
netdev_dbg(dev, "packet too big, fragmentation needed\n");
|
||||
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
|
||||
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
|
||||
htonl(mtu));
|
||||
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
|
||||
htonl(mtu));
|
||||
goto err_rt;
|
||||
}
|
||||
|
||||
|
@ -61,7 +61,6 @@ enum qmi_wwan_flags {
|
||||
|
||||
enum qmi_wwan_quirks {
|
||||
QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */
|
||||
QMI_WWAN_QUIRK_QUECTEL_DYNCFG = 1 << 1, /* check num. endpoints */
|
||||
};
|
||||
|
||||
struct qmimux_hdr {
|
||||
@ -916,16 +915,6 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = {
|
||||
.data = QMI_WWAN_QUIRK_DTR,
|
||||
};
|
||||
|
||||
static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = {
|
||||
.description = "WWAN/QMI device",
|
||||
.flags = FLAG_WWAN | FLAG_SEND_ZLP,
|
||||
.bind = qmi_wwan_bind,
|
||||
.unbind = qmi_wwan_unbind,
|
||||
.manage_power = qmi_wwan_manage_power,
|
||||
.rx_fixup = qmi_wwan_rx_fixup,
|
||||
.data = QMI_WWAN_QUIRK_DTR | QMI_WWAN_QUIRK_QUECTEL_DYNCFG,
|
||||
};
|
||||
|
||||
#define HUAWEI_VENDOR_ID 0x12D1
|
||||
|
||||
/* map QMI/wwan function by a fixed interface number */
|
||||
@ -946,14 +935,18 @@ static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = {
|
||||
#define QMI_GOBI_DEVICE(vend, prod) \
|
||||
QMI_FIXED_INTF(vend, prod, 0)
|
||||
|
||||
/* Quectel does not use fixed interface numbers on at least some of their
|
||||
* devices. We need to check the number of endpoints to ensure that we bind to
|
||||
* the correct interface.
|
||||
/* Many devices have QMI and DIAG functions which are distinguishable
|
||||
* from other vendor specific functions by class, subclass and
|
||||
* protocol all being 0xff. The DIAG function has exactly 2 endpoints
|
||||
* and is silently rejected when probed.
|
||||
*
|
||||
* This makes it possible to match dynamically numbered QMI functions
|
||||
* as seen on e.g. many Quectel modems.
|
||||
*/
|
||||
#define QMI_QUIRK_QUECTEL_DYNCFG(vend, prod) \
|
||||
#define QMI_MATCH_FF_FF_FF(vend, prod) \
|
||||
USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_VENDOR_SPEC, \
|
||||
USB_SUBCLASS_VENDOR_SPEC, 0xff), \
|
||||
.driver_info = (unsigned long)&qmi_wwan_info_quirk_quectel_dyncfg
|
||||
.driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr
|
||||
|
||||
static const struct usb_device_id products[] = {
|
||||
/* 1. CDC ECM like devices match on the control interface */
|
||||
@ -1059,10 +1052,10 @@ static const struct usb_device_id products[] = {
|
||||
USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
|
||||
.driver_info = (unsigned long)&qmi_wwan_info,
|
||||
},
|
||||
{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
|
||||
{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
|
||||
{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
|
||||
{QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
|
||||
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
|
||||
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
|
||||
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
|
||||
{QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
|
||||
|
||||
/* 3. Combined interface devices matching on interface number */
|
||||
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
|
||||
@ -1363,6 +1356,7 @@ static const struct usb_device_id products[] = {
|
||||
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
|
||||
{QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
|
||||
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
|
||||
{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
|
||||
@ -1454,7 +1448,6 @@ static int qmi_wwan_probe(struct usb_interface *intf,
|
||||
{
|
||||
struct usb_device_id *id = (struct usb_device_id *)prod;
|
||||
struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
|
||||
const struct driver_info *info;
|
||||
|
||||
/* Workaround to enable dynamic IDs. This disables usbnet
|
||||
* blacklisting functionality. Which, if required, can be
|
||||
@ -1490,12 +1483,8 @@ static int qmi_wwan_probe(struct usb_interface *intf,
|
||||
* different. Ignore the current interface if the number of endpoints
|
||||
* equals the number for the diag interface (two).
|
||||
*/
|
||||
info = (void *)id->driver_info;
|
||||
|
||||
if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) {
|
||||
if (desc->bNumEndpoints == 2)
|
||||
return -ENODEV;
|
||||
}
|
||||
if (desc->bNumEndpoints == 2)
|
||||
return -ENODEV;
|
||||
|
||||
return usbnet_probe(intf, id);
|
||||
}
|
||||
|
@ -203,9 +203,9 @@ err_peer:
|
||||
err:
|
||||
++dev->stats.tx_errors;
|
||||
if (skb->protocol == htons(ETH_P_IP))
|
||||
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
|
||||
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
|
||||
else if (skb->protocol == htons(ETH_P_IPV6))
|
||||
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
|
||||
icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
|
||||
kfree_skb(skb);
|
||||
return ret;
|
||||
}
|
||||
|
@ -31,6 +31,12 @@ static inline void icmpv6_send(struct sk_buff *skb,
|
||||
}
|
||||
#endif
|
||||
|
||||
#if IS_ENABLED(CONFIG_NF_NAT)
|
||||
void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info);
|
||||
#else
|
||||
#define icmpv6_ndo_send icmpv6_send
|
||||
#endif
|
||||
|
||||
extern int icmpv6_init(void);
|
||||
extern int icmpv6_err_convert(u8 type, u8 code,
|
||||
int *err);
|
||||
|
@ -1616,6 +1616,7 @@ enum netdev_priv_flags {
|
||||
* and drivers will need to set them appropriately.
|
||||
*
|
||||
* @mpls_features: Mask of features inheritable by MPLS
|
||||
* @gso_partial_features: value(s) from NETIF_F_GSO\*
|
||||
*
|
||||
* @ifindex: interface index
|
||||
* @group: The group the device belongs to
|
||||
@ -1640,8 +1641,11 @@ enum netdev_priv_flags {
|
||||
* @netdev_ops: Includes several pointers to callbacks,
|
||||
* if one wants to override the ndo_*() functions
|
||||
* @ethtool_ops: Management operations
|
||||
* @l3mdev_ops: Layer 3 master device operations
|
||||
* @ndisc_ops: Includes callbacks for different IPv6 neighbour
|
||||
* discovery handling. Necessary for e.g. 6LoWPAN.
|
||||
* @xfrmdev_ops: Transformation offload operations
|
||||
* @tlsdev_ops: Transport Layer Security offload operations
|
||||
* @header_ops: Includes callbacks for creating,parsing,caching,etc
|
||||
* of Layer 2 headers.
|
||||
*
|
||||
@ -1680,6 +1684,7 @@ enum netdev_priv_flags {
|
||||
* @dev_port: Used to differentiate devices that share
|
||||
* the same function
|
||||
* @addr_list_lock: XXX: need comments on this one
|
||||
* @name_assign_type: network interface name assignment type
|
||||
* @uc_promisc: Counter that indicates promiscuous mode
|
||||
* has been enabled due to the need to listen to
|
||||
* additional unicast addresses in a device that
|
||||
@ -1702,6 +1707,9 @@ enum netdev_priv_flags {
|
||||
* @ip6_ptr: IPv6 specific data
|
||||
* @ax25_ptr: AX.25 specific data
|
||||
* @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
|
||||
* @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network
|
||||
* device struct
|
||||
* @mpls_ptr: mpls_dev struct pointer
|
||||
*
|
||||
* @dev_addr: Hw address (before bcast,
|
||||
* because most packets are unicast)
|
||||
@ -1710,6 +1718,8 @@ enum netdev_priv_flags {
|
||||
* @num_rx_queues: Number of RX queues
|
||||
* allocated at register_netdev() time
|
||||
* @real_num_rx_queues: Number of RX queues currently active in device
|
||||
* @xdp_prog: XDP sockets filter program pointer
|
||||
* @gro_flush_timeout: timeout for GRO layer in NAPI
|
||||
*
|
||||
* @rx_handler: handler for received packets
|
||||
* @rx_handler_data: XXX: need comments on this one
|
||||
@ -1731,10 +1741,14 @@ enum netdev_priv_flags {
|
||||
* @qdisc: Root qdisc from userspace point of view
|
||||
* @tx_queue_len: Max frames per queue allowed
|
||||
* @tx_global_lock: XXX: need comments on this one
|
||||
* @xdp_bulkq: XDP device bulk queue
|
||||
* @xps_cpus_map: all CPUs map for XPS device
|
||||
* @xps_rxqs_map: all RXQs map for XPS device
|
||||
*
|
||||
* @xps_maps: XXX: need comments on this one
|
||||
* @miniq_egress: clsact qdisc specific data for
|
||||
* egress processing
|
||||
* @qdisc_hash: qdisc hash table
|
||||
* @watchdog_timeo: Represents the timeout that is used by
|
||||
* the watchdog (see dev_watchdog())
|
||||
* @watchdog_timer: List of timers
|
||||
@ -3548,7 +3562,7 @@ static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
|
||||
}
|
||||
|
||||
/**
|
||||
* netif_attrmask_next_and - get the next CPU/Rx queue in *src1p & *src2p
|
||||
* netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p
|
||||
* @n: CPU/Rx queue index
|
||||
* @src1p: the first CPUs/Rx queues mask pointer
|
||||
* @src2p: the second CPUs/Rx queues mask pointer
|
||||
|
@ -33,7 +33,6 @@ enum flow_dissect_ret {
|
||||
|
||||
/**
|
||||
* struct flow_dissector_key_basic:
|
||||
* @thoff: Transport header offset
|
||||
* @n_proto: Network header protocol (eg. IPv4/IPv6)
|
||||
* @ip_proto: Transport header protocol (eg. TCP/UDP)
|
||||
*/
|
||||
|
@ -43,6 +43,12 @@ static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32
|
||||
__icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt);
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_NF_NAT)
|
||||
void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info);
|
||||
#else
|
||||
#define icmp_ndo_send icmp_send
|
||||
#endif
|
||||
|
||||
int icmp_rcv(struct sk_buff *skb);
|
||||
int icmp_err(struct sk_buff *skb, u32 info);
|
||||
int icmp_init(void);
|
||||
|
@ -1004,12 +1004,11 @@ ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate)
|
||||
struct ieee80211_tx_info {
|
||||
/* common information */
|
||||
u32 flags;
|
||||
u8 band;
|
||||
|
||||
u8 hw_queue;
|
||||
|
||||
u16 ack_frame_id:6;
|
||||
u16 tx_time_est:10;
|
||||
u32 band:3,
|
||||
ack_frame_id:13,
|
||||
hw_queue:4,
|
||||
tx_time_est:10;
|
||||
/* 2 free bits */
|
||||
|
||||
union {
|
||||
struct {
|
||||
|
@ -4527,14 +4527,14 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
|
||||
/* Reinjected packets coming from act_mirred or similar should
|
||||
* not get XDP generic processing.
|
||||
*/
|
||||
if (skb_cloned(skb) || skb_is_tc_redirected(skb))
|
||||
if (skb_is_tc_redirected(skb))
|
||||
return XDP_PASS;
|
||||
|
||||
/* XDP packets must be linear and must have sufficient headroom
|
||||
* of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
|
||||
* native XDP provides, thus we need to do it here as well.
|
||||
*/
|
||||
if (skb_is_nonlinear(skb) ||
|
||||
if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
|
||||
skb_headroom(skb) < XDP_PACKET_HEADROOM) {
|
||||
int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
|
||||
int troom = skb->tail + skb->data_len - skb->end;
|
||||
|
@ -99,8 +99,7 @@ EXPORT_SYMBOL(page_pool_create);
|
||||
static void __page_pool_return_page(struct page_pool *pool, struct page *page);
|
||||
|
||||
noinline
|
||||
static struct page *page_pool_refill_alloc_cache(struct page_pool *pool,
|
||||
bool refill)
|
||||
static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
|
||||
{
|
||||
struct ptr_ring *r = &pool->ring;
|
||||
struct page *page;
|
||||
@ -141,8 +140,7 @@ static struct page *page_pool_refill_alloc_cache(struct page_pool *pool,
|
||||
page = NULL;
|
||||
break;
|
||||
}
|
||||
} while (pool->alloc.count < PP_ALLOC_CACHE_REFILL &&
|
||||
refill);
|
||||
} while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
|
||||
|
||||
/* Return last page */
|
||||
if (likely(pool->alloc.count > 0))
|
||||
@ -155,20 +153,16 @@ static struct page *page_pool_refill_alloc_cache(struct page_pool *pool,
|
||||
/* fast path */
|
||||
static struct page *__page_pool_get_cached(struct page_pool *pool)
|
||||
{
|
||||
bool refill = false;
|
||||
struct page *page;
|
||||
|
||||
/* Test for safe-context, caller should provide this guarantee */
|
||||
if (likely(in_serving_softirq())) {
|
||||
if (likely(pool->alloc.count)) {
|
||||
/* Fast-path */
|
||||
page = pool->alloc.cache[--pool->alloc.count];
|
||||
return page;
|
||||
}
|
||||
refill = true;
|
||||
/* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
|
||||
if (likely(pool->alloc.count)) {
|
||||
/* Fast-path */
|
||||
page = pool->alloc.cache[--pool->alloc.count];
|
||||
} else {
|
||||
page = page_pool_refill_alloc_cache(pool);
|
||||
}
|
||||
|
||||
page = page_pool_refill_alloc_cache(pool, refill);
|
||||
return page;
|
||||
}
|
||||
|
||||
|
@ -31,7 +31,7 @@ static struct sk_buff *ar9331_tag_xmit(struct sk_buff *skb,
|
||||
__le16 *phdr;
|
||||
u16 hdr;
|
||||
|
||||
if (skb_cow_head(skb, 0) < 0)
|
||||
if (skb_cow_head(skb, AR9331_HDR_LEN) < 0)
|
||||
return NULL;
|
||||
|
||||
phdr = skb_push(skb, AR9331_HDR_LEN);
|
||||
|
@ -33,7 +33,7 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
struct dsa_port *dp = dsa_slave_to_port(dev);
|
||||
u16 *phdr, hdr;
|
||||
|
||||
if (skb_cow_head(skb, 0) < 0)
|
||||
if (skb_cow_head(skb, QCA_HDR_LEN) < 0)
|
||||
return NULL;
|
||||
|
||||
skb_push(skb, QCA_HDR_LEN);
|
||||
|
@ -748,6 +748,39 @@ out:;
|
||||
}
|
||||
EXPORT_SYMBOL(__icmp_send);
|
||||
|
||||
#if IS_ENABLED(CONFIG_NF_NAT)
|
||||
#include <net/netfilter/nf_conntrack.h>
|
||||
void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
|
||||
{
|
||||
struct sk_buff *cloned_skb = NULL;
|
||||
enum ip_conntrack_info ctinfo;
|
||||
struct nf_conn *ct;
|
||||
__be32 orig_ip;
|
||||
|
||||
ct = nf_ct_get(skb_in, &ctinfo);
|
||||
if (!ct || !(ct->status & IPS_SRC_NAT)) {
|
||||
icmp_send(skb_in, type, code, info);
|
||||
return;
|
||||
}
|
||||
|
||||
if (skb_shared(skb_in))
|
||||
skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC);
|
||||
|
||||
if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head ||
|
||||
(skb_network_header(skb_in) + sizeof(struct iphdr)) >
|
||||
skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in,
|
||||
skb_network_offset(skb_in) + sizeof(struct iphdr))))
|
||||
goto out;
|
||||
|
||||
orig_ip = ip_hdr(skb_in)->saddr;
|
||||
ip_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.ip;
|
||||
icmp_send(skb_in, type, code, info);
|
||||
ip_hdr(skb_in)->saddr = orig_ip;
|
||||
out:
|
||||
consume_skb(cloned_skb);
|
||||
}
|
||||
EXPORT_SYMBOL(icmp_ndo_send);
|
||||
#endif
|
||||
|
||||
static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
|
||||
{
|
||||
|
@ -45,4 +45,38 @@ out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL(icmpv6_send);
|
||||
|
||||
#if IS_ENABLED(CONFIG_NF_NAT)
|
||||
#include <net/netfilter/nf_conntrack.h>
|
||||
void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
|
||||
{
|
||||
struct sk_buff *cloned_skb = NULL;
|
||||
enum ip_conntrack_info ctinfo;
|
||||
struct in6_addr orig_ip;
|
||||
struct nf_conn *ct;
|
||||
|
||||
ct = nf_ct_get(skb_in, &ctinfo);
|
||||
if (!ct || !(ct->status & IPS_SRC_NAT)) {
|
||||
icmpv6_send(skb_in, type, code, info);
|
||||
return;
|
||||
}
|
||||
|
||||
if (skb_shared(skb_in))
|
||||
skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC);
|
||||
|
||||
if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head ||
|
||||
(skb_network_header(skb_in) + sizeof(struct ipv6hdr)) >
|
||||
skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in,
|
||||
skb_network_offset(skb_in) + sizeof(struct ipv6hdr))))
|
||||
goto out;
|
||||
|
||||
orig_ip = ipv6_hdr(skb_in)->saddr;
|
||||
ipv6_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.in6;
|
||||
icmpv6_send(skb_in, type, code, info);
|
||||
ipv6_hdr(skb_in)->saddr = orig_ip;
|
||||
out:
|
||||
consume_skb(cloned_skb);
|
||||
}
|
||||
EXPORT_SYMBOL(icmpv6_ndo_send);
|
||||
#endif
|
||||
#endif
|
||||
|
@ -121,6 +121,7 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
|
||||
|
||||
/**
|
||||
* ip6_tnl_lookup - fetch tunnel matching the end-point addresses
|
||||
* @link: ifindex of underlying interface
|
||||
* @remote: the address of the tunnel exit-point
|
||||
* @local: the address of the tunnel entry-point
|
||||
*
|
||||
@ -134,37 +135,56 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
|
||||
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
|
||||
|
||||
static struct ip6_tnl *
|
||||
ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
|
||||
ip6_tnl_lookup(struct net *net, int link,
|
||||
const struct in6_addr *remote, const struct in6_addr *local)
|
||||
{
|
||||
unsigned int hash = HASH(remote, local);
|
||||
struct ip6_tnl *t;
|
||||
struct ip6_tnl *t, *cand = NULL;
|
||||
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
|
||||
struct in6_addr any;
|
||||
|
||||
for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
|
||||
if (ipv6_addr_equal(local, &t->parms.laddr) &&
|
||||
ipv6_addr_equal(remote, &t->parms.raddr) &&
|
||||
(t->dev->flags & IFF_UP))
|
||||
if (!ipv6_addr_equal(local, &t->parms.laddr) ||
|
||||
!ipv6_addr_equal(remote, &t->parms.raddr) ||
|
||||
!(t->dev->flags & IFF_UP))
|
||||
continue;
|
||||
|
||||
if (link == t->parms.link)
|
||||
return t;
|
||||
else
|
||||
cand = t;
|
||||
}
|
||||
|
||||
memset(&any, 0, sizeof(any));
|
||||
hash = HASH(&any, local);
|
||||
for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
|
||||
if (ipv6_addr_equal(local, &t->parms.laddr) &&
|
||||
ipv6_addr_any(&t->parms.raddr) &&
|
||||
(t->dev->flags & IFF_UP))
|
||||
if (!ipv6_addr_equal(local, &t->parms.laddr) ||
|
||||
!ipv6_addr_any(&t->parms.raddr) ||
|
||||
!(t->dev->flags & IFF_UP))
|
||||
continue;
|
||||
|
||||
if (link == t->parms.link)
|
||||
return t;
|
||||
else if (!cand)
|
||||
cand = t;
|
||||
}
|
||||
|
||||
hash = HASH(remote, &any);
|
||||
for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
|
||||
if (ipv6_addr_equal(remote, &t->parms.raddr) &&
|
||||
ipv6_addr_any(&t->parms.laddr) &&
|
||||
(t->dev->flags & IFF_UP))
|
||||
if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
|
||||
!ipv6_addr_any(&t->parms.laddr) ||
|
||||
!(t->dev->flags & IFF_UP))
|
||||
continue;
|
||||
|
||||
if (link == t->parms.link)
|
||||
return t;
|
||||
else if (!cand)
|
||||
cand = t;
|
||||
}
|
||||
|
||||
if (cand)
|
||||
return cand;
|
||||
|
||||
t = rcu_dereference(ip6n->collect_md_tun);
|
||||
if (t && t->dev->flags & IFF_UP)
|
||||
return t;
|
||||
@ -351,7 +371,8 @@ static struct ip6_tnl *ip6_tnl_locate(struct net *net,
|
||||
(t = rtnl_dereference(*tp)) != NULL;
|
||||
tp = &t->next) {
|
||||
if (ipv6_addr_equal(local, &t->parms.laddr) &&
|
||||
ipv6_addr_equal(remote, &t->parms.raddr)) {
|
||||
ipv6_addr_equal(remote, &t->parms.raddr) &&
|
||||
p->link == t->parms.link) {
|
||||
if (create)
|
||||
return ERR_PTR(-EEXIST);
|
||||
|
||||
@ -485,7 +506,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
|
||||
processing of the error. */
|
||||
|
||||
rcu_read_lock();
|
||||
t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr);
|
||||
t = ip6_tnl_lookup(dev_net(skb->dev), skb->dev->ifindex, &ipv6h->daddr, &ipv6h->saddr);
|
||||
if (!t)
|
||||
goto out;
|
||||
|
||||
@ -887,7 +908,7 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
|
||||
int ret = -1;
|
||||
|
||||
rcu_read_lock();
|
||||
t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
|
||||
t = ip6_tnl_lookup(dev_net(skb->dev), skb->dev->ifindex, &ipv6h->saddr, &ipv6h->daddr);
|
||||
|
||||
if (t) {
|
||||
u8 tproto = READ_ONCE(t->parms.proto);
|
||||
@ -1420,8 +1441,10 @@ tx_err:
|
||||
static void ip6_tnl_link_config(struct ip6_tnl *t)
|
||||
{
|
||||
struct net_device *dev = t->dev;
|
||||
struct net_device *tdev = NULL;
|
||||
struct __ip6_tnl_parm *p = &t->parms;
|
||||
struct flowi6 *fl6 = &t->fl.u.ip6;
|
||||
unsigned int mtu;
|
||||
int t_hlen;
|
||||
|
||||
memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
|
||||
@ -1457,22 +1480,25 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
|
||||
struct rt6_info *rt = rt6_lookup(t->net,
|
||||
&p->raddr, &p->laddr,
|
||||
p->link, NULL, strict);
|
||||
if (rt) {
|
||||
tdev = rt->dst.dev;
|
||||
ip6_rt_put(rt);
|
||||
}
|
||||
|
||||
if (!rt)
|
||||
return;
|
||||
if (!tdev && p->link)
|
||||
tdev = __dev_get_by_index(t->net, p->link);
|
||||
|
||||
if (rt->dst.dev) {
|
||||
dev->hard_header_len = rt->dst.dev->hard_header_len +
|
||||
t_hlen;
|
||||
if (tdev) {
|
||||
dev->hard_header_len = tdev->hard_header_len + t_hlen;
|
||||
mtu = min_t(unsigned int, tdev->mtu, IP6_MAX_MTU);
|
||||
|
||||
dev->mtu = rt->dst.dev->mtu - t_hlen;
|
||||
dev->mtu = mtu - t_hlen;
|
||||
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
|
||||
dev->mtu -= 8;
|
||||
|
||||
if (dev->mtu < IPV6_MIN_MTU)
|
||||
dev->mtu = IPV6_MIN_MTU;
|
||||
}
|
||||
ip6_rt_put(rt);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3450,7 +3450,7 @@ int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb,
|
||||
|
||||
spin_lock_irqsave(&local->ack_status_lock, spin_flags);
|
||||
id = idr_alloc(&local->ack_status_frames, ack_skb,
|
||||
1, 0x40, GFP_ATOMIC);
|
||||
1, 0x2000, GFP_ATOMIC);
|
||||
spin_unlock_irqrestore(&local->ack_status_lock, spin_flags);
|
||||
|
||||
if (id < 0) {
|
||||
|
@ -8,7 +8,7 @@
|
||||
* Copyright 2007, Michael Wu <flamingice@sourmilk.net>
|
||||
* Copyright 2013-2014 Intel Mobile Communications GmbH
|
||||
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
|
||||
* Copyright (C) 2018 - 2019 Intel Corporation
|
||||
* Copyright (C) 2018 - 2020 Intel Corporation
|
||||
*/
|
||||
|
||||
#include <linux/delay.h>
|
||||
@ -1311,7 +1311,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
|
||||
if (!res) {
|
||||
ch_switch.timestamp = timestamp;
|
||||
ch_switch.device_timestamp = device_timestamp;
|
||||
ch_switch.block_tx = beacon ? csa_ie.mode : 0;
|
||||
ch_switch.block_tx = csa_ie.mode;
|
||||
ch_switch.chandef = csa_ie.chandef;
|
||||
ch_switch.count = csa_ie.count;
|
||||
ch_switch.delay = csa_ie.max_switch_time;
|
||||
@ -1404,7 +1404,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
|
||||
|
||||
sdata->vif.csa_active = true;
|
||||
sdata->csa_chandef = csa_ie.chandef;
|
||||
sdata->csa_block_tx = ch_switch.block_tx;
|
||||
sdata->csa_block_tx = csa_ie.mode;
|
||||
ifmgd->csa_ignored_same_chan = false;
|
||||
|
||||
if (sdata->csa_block_tx)
|
||||
@ -1438,7 +1438,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
|
||||
* reset when the disconnection worker runs.
|
||||
*/
|
||||
sdata->vif.csa_active = true;
|
||||
sdata->csa_block_tx = ch_switch.block_tx;
|
||||
sdata->csa_block_tx = csa_ie.mode;
|
||||
|
||||
ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
|
||||
mutex_unlock(&local->chanctx_mtx);
|
||||
|
@ -2442,7 +2442,7 @@ static int ieee80211_store_ack_skb(struct ieee80211_local *local,
|
||||
|
||||
spin_lock_irqsave(&local->ack_status_lock, flags);
|
||||
id = idr_alloc(&local->ack_status_frames, ack_skb,
|
||||
1, 0x40, GFP_ATOMIC);
|
||||
1, 0x2000, GFP_ATOMIC);
|
||||
spin_unlock_irqrestore(&local->ack_status_lock, flags);
|
||||
|
||||
if (id >= 0) {
|
||||
|
@ -1063,16 +1063,22 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
|
||||
elem_parse_failed = true;
|
||||
break;
|
||||
case WLAN_EID_VHT_OPERATION:
|
||||
if (elen >= sizeof(struct ieee80211_vht_operation))
|
||||
if (elen >= sizeof(struct ieee80211_vht_operation)) {
|
||||
elems->vht_operation = (void *)pos;
|
||||
else
|
||||
elem_parse_failed = true;
|
||||
if (calc_crc)
|
||||
crc = crc32_be(crc, pos - 2, elen + 2);
|
||||
break;
|
||||
}
|
||||
elem_parse_failed = true;
|
||||
break;
|
||||
case WLAN_EID_OPMODE_NOTIF:
|
||||
if (elen > 0)
|
||||
if (elen > 0) {
|
||||
elems->opmode_notif = pos;
|
||||
else
|
||||
elem_parse_failed = true;
|
||||
if (calc_crc)
|
||||
crc = crc32_be(crc, pos - 2, elen + 2);
|
||||
break;
|
||||
}
|
||||
elem_parse_failed = true;
|
||||
break;
|
||||
case WLAN_EID_MESH_ID:
|
||||
elems->mesh_id = pos;
|
||||
@ -2987,10 +2993,22 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw,
|
||||
int cf0, cf1;
|
||||
int ccfs0, ccfs1, ccfs2;
|
||||
int ccf0, ccf1;
|
||||
u32 vht_cap;
|
||||
bool support_80_80 = false;
|
||||
bool support_160 = false;
|
||||
|
||||
if (!oper || !htop)
|
||||
return false;
|
||||
|
||||
vht_cap = hw->wiphy->bands[chandef->chan->band]->vht_cap.cap;
|
||||
support_160 = (vht_cap & (IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK |
|
||||
IEEE80211_VHT_CAP_EXT_NSS_BW_MASK));
|
||||
support_80_80 = ((vht_cap &
|
||||
IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) ||
|
||||
(vht_cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
|
||||
vht_cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) ||
|
||||
((vht_cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) >>
|
||||
IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT > 1));
|
||||
ccfs0 = oper->center_freq_seg0_idx;
|
||||
ccfs1 = oper->center_freq_seg1_idx;
|
||||
ccfs2 = (le16_to_cpu(htop->operation_mode) &
|
||||
@ -3018,10 +3036,10 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw,
|
||||
unsigned int diff;
|
||||
|
||||
diff = abs(ccf1 - ccf0);
|
||||
if (diff == 8) {
|
||||
if ((diff == 8) && support_160) {
|
||||
new.width = NL80211_CHAN_WIDTH_160;
|
||||
new.center_freq1 = cf1;
|
||||
} else if (diff > 8) {
|
||||
} else if ((diff > 8) && support_80_80) {
|
||||
new.width = NL80211_CHAN_WIDTH_80P80;
|
||||
new.center_freq2 = cf1;
|
||||
}
|
||||
|
@ -643,7 +643,7 @@ static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
|
||||
}
|
||||
#endif
|
||||
|
||||
struct sock *mptcp_sk_clone_lock(const struct sock *sk)
|
||||
static struct sock *mptcp_sk_clone_lock(const struct sock *sk)
|
||||
{
|
||||
struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
|
||||
|
||||
|
@ -691,6 +691,7 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
|
||||
.len = 128 / BITS_PER_BYTE },
|
||||
[TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY,
|
||||
.len = 128 / BITS_PER_BYTE },
|
||||
[TCA_FLOWER_FLAGS] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
static const struct nla_policy
|
||||
|
@ -157,6 +157,7 @@ static void *mall_get(struct tcf_proto *tp, u32 handle)
|
||||
static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
|
||||
[TCA_MATCHALL_UNSPEC] = { .type = NLA_UNSPEC },
|
||||
[TCA_MATCHALL_CLASSID] = { .type = NLA_U32 },
|
||||
[TCA_MATCHALL_FLAGS] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
static int mall_set_parms(struct net *net, struct tcf_proto *tp,
|
||||
|
@ -470,6 +470,8 @@ static void smc_switch_to_fallback(struct smc_sock *smc)
|
||||
if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
|
||||
smc->clcsock->file = smc->sk.sk_socket->file;
|
||||
smc->clcsock->file->private_data = smc->clcsock;
|
||||
smc->clcsock->wq.fasync_list =
|
||||
smc->sk.sk_socket->wq.fasync_list;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -372,7 +372,9 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
|
||||
dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline));
|
||||
dclc.hdr.version = SMC_CLC_V1;
|
||||
dclc.hdr.flag = (peer_diag_info == SMC_CLC_DECL_SYNCERR) ? 1 : 0;
|
||||
memcpy(dclc.id_for_peer, local_systemid, sizeof(local_systemid));
|
||||
if (smc->conn.lgr && !smc->conn.lgr->is_smcd)
|
||||
memcpy(dclc.id_for_peer, local_systemid,
|
||||
sizeof(local_systemid));
|
||||
dclc.peer_diagnosis = htonl(peer_diag_info);
|
||||
memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
|
||||
|
||||
|
@ -39,16 +39,15 @@ static void smc_diag_msg_common_fill(struct smc_diag_msg *r, struct sock *sk)
|
||||
{
|
||||
struct smc_sock *smc = smc_sk(sk);
|
||||
|
||||
memset(r, 0, sizeof(*r));
|
||||
r->diag_family = sk->sk_family;
|
||||
sock_diag_save_cookie(sk, r->id.idiag_cookie);
|
||||
if (!smc->clcsock)
|
||||
return;
|
||||
r->id.idiag_sport = htons(smc->clcsock->sk->sk_num);
|
||||
r->id.idiag_dport = smc->clcsock->sk->sk_dport;
|
||||
r->id.idiag_if = smc->clcsock->sk->sk_bound_dev_if;
|
||||
sock_diag_save_cookie(sk, r->id.idiag_cookie);
|
||||
if (sk->sk_protocol == SMCPROTO_SMC) {
|
||||
memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
|
||||
memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
|
||||
r->id.idiag_src[0] = smc->clcsock->sk->sk_rcv_saddr;
|
||||
r->id.idiag_dst[0] = smc->clcsock->sk->sk_daddr;
|
||||
#if IS_ENABLED(CONFIG_IPV6)
|
||||
|
@ -278,7 +278,7 @@ struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos)
|
||||
}
|
||||
#endif
|
||||
|
||||
void tipc_node_free(struct rcu_head *rp)
|
||||
static void tipc_node_free(struct rcu_head *rp)
|
||||
{
|
||||
struct tipc_node *n = container_of(rp, struct tipc_node, rcu);
|
||||
|
||||
@ -2798,7 +2798,7 @@ static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
|
||||
static int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
|
||||
{
|
||||
struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1];
|
||||
struct net *net = sock_net(skb->sk);
|
||||
@ -2875,7 +2875,8 @@ int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
|
||||
return err;
|
||||
}
|
||||
|
||||
int __tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info)
|
||||
static int __tipc_nl_node_flush_key(struct sk_buff *skb,
|
||||
struct genl_info *info)
|
||||
{
|
||||
struct net *net = sock_net(skb->sk);
|
||||
struct tipc_net *tn = tipc_net(net);
|
||||
|
@ -2441,6 +2441,8 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
|
||||
return -ETIMEDOUT;
|
||||
if (signal_pending(current))
|
||||
return sock_intr_errno(*timeo_p);
|
||||
if (sk->sk_state == TIPC_DISCONNECTING)
|
||||
break;
|
||||
|
||||
add_wait_queue(sk_sleep(sk), &wait);
|
||||
done = sk_wait_event(sk, timeo_p, tipc_sk_connected(sk),
|
||||
|
@ -7,9 +7,13 @@
|
||||
void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
|
||||
{
|
||||
struct wireless_dev *wdev = dev->ieee80211_ptr;
|
||||
struct device *pdev = wiphy_dev(wdev->wiphy);
|
||||
|
||||
strlcpy(info->driver, wiphy_dev(wdev->wiphy)->driver->name,
|
||||
sizeof(info->driver));
|
||||
if (pdev->driver)
|
||||
strlcpy(info->driver, pdev->driver->name,
|
||||
sizeof(info->driver));
|
||||
else
|
||||
strlcpy(info->driver, "N/A", sizeof(info->driver));
|
||||
|
||||
strlcpy(info->version, init_utsname()->release, sizeof(info->version));
|
||||
|
||||
|
@ -437,6 +437,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
|
||||
[NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT] = { .type = NLA_FLAG },
|
||||
[NL80211_ATTR_CONTROL_PORT_OVER_NL80211] = { .type = NLA_FLAG },
|
||||
[NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG },
|
||||
[NL80211_ATTR_STATUS_CODE] = { .type = NLA_U16 },
|
||||
[NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 },
|
||||
[NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
|
||||
[NL80211_ATTR_PID] = { .type = NLA_U32 },
|
||||
|
@ -300,10 +300,10 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
|
||||
if (mtu < IPV6_MIN_MTU)
|
||||
mtu = IPV6_MIN_MTU;
|
||||
|
||||
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
|
||||
icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
|
||||
} else {
|
||||
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
|
||||
htonl(mtu));
|
||||
icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
|
||||
htonl(mtu));
|
||||
}
|
||||
|
||||
dst_release(dst);
|
||||
|
@ -24,6 +24,7 @@
|
||||
set -e
|
||||
|
||||
exec 3>&1
|
||||
export LANG=C
|
||||
export WG_HIDE_KEYS=never
|
||||
netns0="wg-test-$$-0"
|
||||
netns1="wg-test-$$-1"
|
||||
@ -297,7 +298,17 @@ ip1 -4 rule add table main suppress_prefixlength 0
|
||||
n1 ping -W 1 -c 100 -f 192.168.99.7
|
||||
n1 ping -W 1 -c 100 -f abab::1111
|
||||
|
||||
# Have ns2 NAT into wg0 packets from ns0, but return an icmp error along the right route.
|
||||
n2 iptables -t nat -A POSTROUTING -s 10.0.0.0/24 -d 192.168.241.0/24 -j SNAT --to 192.168.241.2
|
||||
n0 iptables -t filter -A INPUT \! -s 10.0.0.0/24 -i vethrs -j DROP # Manual rpfilter just to be explicit.
|
||||
n2 bash -c 'printf 1 > /proc/sys/net/ipv4/ip_forward'
|
||||
ip0 -4 route add 192.168.241.1 via 10.0.0.100
|
||||
n2 wg set wg0 peer "$pub1" remove
|
||||
[[ $(! n0 ping -W 1 -c 1 192.168.241.1 || false) == *"From 10.0.0.100 icmp_seq=1 Destination Host Unreachable"* ]]
|
||||
|
||||
n0 iptables -t nat -F
|
||||
n0 iptables -t filter -F
|
||||
n2 iptables -t nat -F
|
||||
ip0 link del vethrc
|
||||
ip0 link del vethrs
|
||||
ip1 link del wg0
|
||||
|
Loading…
Reference in New Issue
Block a user