forked from Minki/linux
Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue
Jeff Kirsher says: ==================== 40GbE Intel Wired LAN Driver Updates 2016-02-17 This series contains updates to i40e/i40evf once again. Mitch updates the use of a define instead of a magic number. Adds support for packet split receive on VFs, which is disabled by default. Expands on a code comment which was not verbose or really helpful. Fixes an issue where if a reset fails to complete and was not properly setting the adapter state, which would cause a panic on rmmod, so set the adpater state to DOWN to avoid a panic. Jesse cleans up a "dump" in debugfs that never panned out to be useful. Anjali adds a workaround for cases where we might have interrupts that get lost but wright-back (WB) happened. Fixes an issue by falling back to enabling unicast, multicast and broadcast promiscuous mode when the driver must disable it's use of "default port" (defport mode) due to internal incompatibility with Multiple Function per Port (MFP). Fixes an issue where queues should never be enabled/disabled in the interrupt handler. Kiran cleans up th code which used hard coded base VEB SEID since it was removed from the specification. Shannon adds a few bits for better debug messages. Fixes an obscure corner case, where it was possible to clear the NVM update wait flag when no update_done message was actually received. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
d56fddaa84
@ -64,9 +64,6 @@
|
||||
#include "i40e_dcb.h"
|
||||
|
||||
/* Useful i40e defaults */
|
||||
#define I40E_BASE_PF_SEID 16
|
||||
#define I40E_BASE_VSI_SEID 512
|
||||
#define I40E_BASE_VEB_SEID 288
|
||||
#define I40E_MAX_VEB 16
|
||||
|
||||
#define I40E_MAX_NUM_DESCRIPTORS 4096
|
||||
@ -512,6 +509,7 @@ struct i40e_vsi {
|
||||
u32 tx_busy;
|
||||
u64 tx_linearize;
|
||||
u64 tx_force_wb;
|
||||
u64 tx_lost_interrupt;
|
||||
u32 rx_buf_failed;
|
||||
u32 rx_page_failed;
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
*
|
||||
* Intel Ethernet Controller XL710 Family Linux Driver
|
||||
* Copyright(c) 2013 - 2014 Intel Corporation.
|
||||
* Copyright(c) 2013 - 2016 Intel Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
@ -953,6 +953,9 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
|
||||
u16 flags;
|
||||
u16 ntu;
|
||||
|
||||
/* pre-clean the event info */
|
||||
memset(&e->desc, 0, sizeof(e->desc));
|
||||
|
||||
/* take the lock before we start messing with the ring */
|
||||
mutex_lock(&hw->aq.arq_mutex);
|
||||
|
||||
@ -1020,14 +1023,6 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
|
||||
hw->aq.arq.next_to_clean = ntc;
|
||||
hw->aq.arq.next_to_use = ntu;
|
||||
|
||||
clean_arq_element_out:
|
||||
/* Set pending if needed, unlock and return */
|
||||
if (pending != NULL)
|
||||
*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
|
||||
|
||||
clean_arq_element_err:
|
||||
mutex_unlock(&hw->aq.arq_mutex);
|
||||
|
||||
if (i40e_is_nvm_update_op(&e->desc)) {
|
||||
if (hw->aq.nvm_release_on_done) {
|
||||
i40e_release_nvm(hw);
|
||||
@ -1048,6 +1043,13 @@ clean_arq_element_err:
|
||||
}
|
||||
}
|
||||
|
||||
clean_arq_element_out:
|
||||
/* Set pending if needed, unlock and return */
|
||||
if (pending)
|
||||
*pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
|
||||
clean_arq_element_err:
|
||||
mutex_unlock(&hw->aq.arq_mutex);
|
||||
|
||||
return ret_code;
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
*
|
||||
* Intel Ethernet Controller XL710 Family Linux Driver
|
||||
* Copyright(c) 2013 - 2014 Intel Corporation.
|
||||
* Copyright(c) 2013 - 2016 Intel Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
@ -1087,6 +1087,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
|
||||
#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
|
||||
#define I40E_AQC_SET_VSI_DEFAULT 0x08
|
||||
#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
|
||||
#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000
|
||||
__le16 seid;
|
||||
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
|
||||
__le16 vlan_tag;
|
||||
|
@ -1952,12 +1952,19 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
|
||||
i40e_fill_default_direct_cmd_desc(&desc,
|
||||
i40e_aqc_opc_set_vsi_promiscuous_modes);
|
||||
|
||||
if (set)
|
||||
if (set) {
|
||||
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
|
||||
if (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
|
||||
(hw->aq.api_maj_ver > 1))
|
||||
flags |= I40E_AQC_SET_VSI_PROMISC_TX;
|
||||
}
|
||||
|
||||
cmd->promiscuous_flags = cpu_to_le16(flags);
|
||||
|
||||
cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
|
||||
if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
|
||||
(hw->aq.api_maj_ver > 1))
|
||||
cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX);
|
||||
|
||||
cmd->seid = cpu_to_le16(seid);
|
||||
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
|
||||
|
@ -61,258 +61,12 @@ static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid)
|
||||
{
|
||||
int i;
|
||||
|
||||
if ((seid < I40E_BASE_VEB_SEID) ||
|
||||
(seid > (I40E_BASE_VEB_SEID + I40E_MAX_VEB)))
|
||||
dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
|
||||
else
|
||||
for (i = 0; i < I40E_MAX_VEB; i++)
|
||||
if (pf->veb[i] && pf->veb[i]->seid == seid)
|
||||
return pf->veb[i];
|
||||
for (i = 0; i < I40E_MAX_VEB; i++)
|
||||
if (pf->veb[i] && pf->veb[i]->seid == seid)
|
||||
return pf->veb[i];
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**************************************************************
|
||||
* dump
|
||||
* The dump entry in debugfs is for getting a data snapshow of
|
||||
* the driver's current configuration and runtime details.
|
||||
* When the filesystem entry is written, a snapshot is taken.
|
||||
* When the entry is read, the most recent snapshot data is dumped.
|
||||
**************************************************************/
|
||||
static char *i40e_dbg_dump_buf;
|
||||
static ssize_t i40e_dbg_dump_data_len;
|
||||
static ssize_t i40e_dbg_dump_buffer_len;
|
||||
|
||||
/**
|
||||
* i40e_dbg_dump_read - read the dump data
|
||||
* @filp: the opened file
|
||||
* @buffer: where to write the data for the user to read
|
||||
* @count: the size of the user's buffer
|
||||
* @ppos: file position offset
|
||||
**/
|
||||
static ssize_t i40e_dbg_dump_read(struct file *filp, char __user *buffer,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
int bytes_not_copied;
|
||||
int len;
|
||||
|
||||
/* is *ppos bigger than the available data? */
|
||||
if (*ppos >= i40e_dbg_dump_data_len || !i40e_dbg_dump_buf)
|
||||
return 0;
|
||||
|
||||
/* be sure to not read beyond the end of available data */
|
||||
len = min_t(int, count, (i40e_dbg_dump_data_len - *ppos));
|
||||
|
||||
bytes_not_copied = copy_to_user(buffer, &i40e_dbg_dump_buf[*ppos], len);
|
||||
if (bytes_not_copied)
|
||||
return -EFAULT;
|
||||
|
||||
*ppos += len;
|
||||
return len;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_dbg_prep_dump_buf
|
||||
* @pf: the PF we're working with
|
||||
* @buflen: the desired buffer length
|
||||
*
|
||||
* Return positive if success, 0 if failed
|
||||
**/
|
||||
static int i40e_dbg_prep_dump_buf(struct i40e_pf *pf, int buflen)
|
||||
{
|
||||
/* if not already big enough, prep for re alloc */
|
||||
if (i40e_dbg_dump_buffer_len && i40e_dbg_dump_buffer_len < buflen) {
|
||||
kfree(i40e_dbg_dump_buf);
|
||||
i40e_dbg_dump_buffer_len = 0;
|
||||
i40e_dbg_dump_buf = NULL;
|
||||
}
|
||||
|
||||
/* get a new buffer if needed */
|
||||
if (!i40e_dbg_dump_buf) {
|
||||
i40e_dbg_dump_buf = kzalloc(buflen, GFP_KERNEL);
|
||||
if (i40e_dbg_dump_buf != NULL)
|
||||
i40e_dbg_dump_buffer_len = buflen;
|
||||
}
|
||||
|
||||
return i40e_dbg_dump_buffer_len;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_dbg_dump_write - trigger a datadump snapshot
|
||||
* @filp: the opened file
|
||||
* @buffer: where to find the user's data
|
||||
* @count: the length of the user's data
|
||||
* @ppos: file position offset
|
||||
*
|
||||
* Any write clears the stats
|
||||
**/
|
||||
static ssize_t i40e_dbg_dump_write(struct file *filp,
|
||||
const char __user *buffer,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct i40e_pf *pf = filp->private_data;
|
||||
bool seid_found = false;
|
||||
long seid = -1;
|
||||
int buflen = 0;
|
||||
int i, ret;
|
||||
int len;
|
||||
u8 *p;
|
||||
|
||||
/* don't allow partial writes */
|
||||
if (*ppos != 0)
|
||||
return 0;
|
||||
|
||||
/* decode the SEID given to be dumped */
|
||||
ret = kstrtol_from_user(buffer, count, 0, &seid);
|
||||
|
||||
if (ret) {
|
||||
dev_info(&pf->pdev->dev, "bad seid value\n");
|
||||
} else if (seid == 0) {
|
||||
seid_found = true;
|
||||
|
||||
kfree(i40e_dbg_dump_buf);
|
||||
i40e_dbg_dump_buffer_len = 0;
|
||||
i40e_dbg_dump_data_len = 0;
|
||||
i40e_dbg_dump_buf = NULL;
|
||||
dev_info(&pf->pdev->dev, "debug buffer freed\n");
|
||||
|
||||
} else if (seid == pf->pf_seid || seid == 1) {
|
||||
seid_found = true;
|
||||
|
||||
buflen = sizeof(struct i40e_pf);
|
||||
buflen += (sizeof(struct i40e_aq_desc)
|
||||
* (pf->hw.aq.num_arq_entries + pf->hw.aq.num_asq_entries));
|
||||
|
||||
if (i40e_dbg_prep_dump_buf(pf, buflen)) {
|
||||
p = i40e_dbg_dump_buf;
|
||||
|
||||
/* avoid use of memcpy here due to sparse warning
|
||||
* about copy size.
|
||||
*/
|
||||
*((struct i40e_pf *)p) = *pf;
|
||||
p += sizeof(struct i40e_pf);
|
||||
|
||||
len = (sizeof(struct i40e_aq_desc)
|
||||
* pf->hw.aq.num_asq_entries);
|
||||
memcpy(p, pf->hw.aq.asq.desc_buf.va, len);
|
||||
p += len;
|
||||
|
||||
len = (sizeof(struct i40e_aq_desc)
|
||||
* pf->hw.aq.num_arq_entries);
|
||||
memcpy(p, pf->hw.aq.arq.desc_buf.va, len);
|
||||
p += len;
|
||||
|
||||
i40e_dbg_dump_data_len = buflen;
|
||||
dev_info(&pf->pdev->dev,
|
||||
"PF seid %ld dumped %d bytes\n",
|
||||
seid, (int)i40e_dbg_dump_data_len);
|
||||
}
|
||||
} else if (seid >= I40E_BASE_VSI_SEID) {
|
||||
struct i40e_vsi *vsi = NULL;
|
||||
struct i40e_mac_filter *f;
|
||||
int filter_count = 0;
|
||||
|
||||
mutex_lock(&pf->switch_mutex);
|
||||
vsi = i40e_dbg_find_vsi(pf, seid);
|
||||
if (!vsi) {
|
||||
mutex_unlock(&pf->switch_mutex);
|
||||
goto write_exit;
|
||||
}
|
||||
|
||||
buflen = sizeof(struct i40e_vsi);
|
||||
buflen += sizeof(struct i40e_q_vector) * vsi->num_q_vectors;
|
||||
buflen += sizeof(struct i40e_ring) * 2 * vsi->num_queue_pairs;
|
||||
buflen += sizeof(struct i40e_tx_buffer) * vsi->num_queue_pairs;
|
||||
buflen += sizeof(struct i40e_rx_buffer) * vsi->num_queue_pairs;
|
||||
list_for_each_entry(f, &vsi->mac_filter_list, list)
|
||||
filter_count++;
|
||||
buflen += sizeof(struct i40e_mac_filter) * filter_count;
|
||||
|
||||
if (i40e_dbg_prep_dump_buf(pf, buflen)) {
|
||||
p = i40e_dbg_dump_buf;
|
||||
seid_found = true;
|
||||
|
||||
len = sizeof(struct i40e_vsi);
|
||||
memcpy(p, vsi, len);
|
||||
p += len;
|
||||
|
||||
if (vsi->num_q_vectors) {
|
||||
len = (sizeof(struct i40e_q_vector)
|
||||
* vsi->num_q_vectors);
|
||||
memcpy(p, vsi->q_vectors, len);
|
||||
p += len;
|
||||
}
|
||||
|
||||
if (vsi->num_queue_pairs) {
|
||||
len = (sizeof(struct i40e_ring) *
|
||||
vsi->num_queue_pairs);
|
||||
memcpy(p, vsi->tx_rings, len);
|
||||
p += len;
|
||||
memcpy(p, vsi->rx_rings, len);
|
||||
p += len;
|
||||
}
|
||||
|
||||
if (vsi->tx_rings[0]) {
|
||||
len = sizeof(struct i40e_tx_buffer);
|
||||
for (i = 0; i < vsi->num_queue_pairs; i++) {
|
||||
memcpy(p, vsi->tx_rings[i]->tx_bi, len);
|
||||
p += len;
|
||||
}
|
||||
len = sizeof(struct i40e_rx_buffer);
|
||||
for (i = 0; i < vsi->num_queue_pairs; i++) {
|
||||
memcpy(p, vsi->rx_rings[i]->rx_bi, len);
|
||||
p += len;
|
||||
}
|
||||
}
|
||||
|
||||
/* macvlan filter list */
|
||||
len = sizeof(struct i40e_mac_filter);
|
||||
list_for_each_entry(f, &vsi->mac_filter_list, list) {
|
||||
memcpy(p, f, len);
|
||||
p += len;
|
||||
}
|
||||
|
||||
i40e_dbg_dump_data_len = buflen;
|
||||
dev_info(&pf->pdev->dev,
|
||||
"VSI seid %ld dumped %d bytes\n",
|
||||
seid, (int)i40e_dbg_dump_data_len);
|
||||
}
|
||||
mutex_unlock(&pf->switch_mutex);
|
||||
} else if (seid >= I40E_BASE_VEB_SEID) {
|
||||
struct i40e_veb *veb = NULL;
|
||||
|
||||
mutex_lock(&pf->switch_mutex);
|
||||
veb = i40e_dbg_find_veb(pf, seid);
|
||||
if (!veb) {
|
||||
mutex_unlock(&pf->switch_mutex);
|
||||
goto write_exit;
|
||||
}
|
||||
|
||||
buflen = sizeof(struct i40e_veb);
|
||||
if (i40e_dbg_prep_dump_buf(pf, buflen)) {
|
||||
seid_found = true;
|
||||
memcpy(i40e_dbg_dump_buf, veb, buflen);
|
||||
i40e_dbg_dump_data_len = buflen;
|
||||
dev_info(&pf->pdev->dev,
|
||||
"VEB seid %ld dumped %d bytes\n",
|
||||
seid, (int)i40e_dbg_dump_data_len);
|
||||
}
|
||||
mutex_unlock(&pf->switch_mutex);
|
||||
}
|
||||
|
||||
write_exit:
|
||||
if (!seid_found)
|
||||
dev_info(&pf->pdev->dev, "unknown seid %ld\n", seid);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static const struct file_operations i40e_dbg_dump_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = simple_open,
|
||||
.read = i40e_dbg_dump_read,
|
||||
.write = i40e_dbg_dump_write,
|
||||
};
|
||||
|
||||
/**************************************************************
|
||||
* command
|
||||
* The command entry in debugfs is for giving the driver commands
|
||||
@ -933,12 +687,6 @@ static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid)
|
||||
{
|
||||
struct i40e_veb *veb;
|
||||
|
||||
if ((seid < I40E_BASE_VEB_SEID) ||
|
||||
(seid >= (I40E_MAX_VEB + I40E_BASE_VEB_SEID))) {
|
||||
dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
|
||||
return;
|
||||
}
|
||||
|
||||
veb = i40e_dbg_find_veb(pf, seid);
|
||||
if (!veb) {
|
||||
dev_info(&pf->pdev->dev, "can't find veb %d\n", seid);
|
||||
@ -2217,11 +1965,6 @@ void i40e_dbg_pf_init(struct i40e_pf *pf)
|
||||
if (!pfile)
|
||||
goto create_failed;
|
||||
|
||||
pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf,
|
||||
&i40e_dbg_dump_fops);
|
||||
if (!pfile)
|
||||
goto create_failed;
|
||||
|
||||
pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf,
|
||||
&i40e_dbg_netdev_ops_fops);
|
||||
if (!pfile)
|
||||
@ -2242,9 +1985,6 @@ void i40e_dbg_pf_exit(struct i40e_pf *pf)
|
||||
{
|
||||
debugfs_remove_recursive(pf->i40e_dbg_pf);
|
||||
pf->i40e_dbg_pf = NULL;
|
||||
|
||||
kfree(i40e_dbg_dump_buf);
|
||||
i40e_dbg_dump_buf = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -89,6 +89,7 @@ static const struct i40e_stats i40e_gstrings_misc_stats[] = {
|
||||
I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
|
||||
I40E_VSI_STAT("tx_linearize", tx_linearize),
|
||||
I40E_VSI_STAT("tx_force_wb", tx_force_wb),
|
||||
I40E_VSI_STAT("tx_lost_interrupt", tx_lost_interrupt),
|
||||
I40E_VSI_STAT("rx_alloc_fail", rx_buf_failed),
|
||||
I40E_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
|
||||
};
|
||||
@ -1004,16 +1005,19 @@ static int i40e_get_eeprom(struct net_device *netdev,
|
||||
/* check for NVMUpdate access method */
|
||||
magic = hw->vendor_id | (hw->device_id << 16);
|
||||
if (eeprom->magic && eeprom->magic != magic) {
|
||||
struct i40e_nvm_access *cmd;
|
||||
int errno;
|
||||
struct i40e_nvm_access *cmd = (struct i40e_nvm_access *)eeprom;
|
||||
int errno = 0;
|
||||
|
||||
/* make sure it is the right magic for NVMUpdate */
|
||||
if ((eeprom->magic >> 16) != hw->device_id)
|
||||
return -EINVAL;
|
||||
errno = -EINVAL;
|
||||
else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
|
||||
test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
|
||||
errno = -EBUSY;
|
||||
else
|
||||
ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
|
||||
|
||||
cmd = (struct i40e_nvm_access *)eeprom;
|
||||
ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
|
||||
if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM))
|
||||
if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM))
|
||||
dev_info(&pf->pdev->dev,
|
||||
"NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
|
||||
ret_val, hw->aq.asq_last_status, errno,
|
||||
@ -1097,27 +1101,25 @@ static int i40e_set_eeprom(struct net_device *netdev,
|
||||
struct i40e_netdev_priv *np = netdev_priv(netdev);
|
||||
struct i40e_hw *hw = &np->vsi->back->hw;
|
||||
struct i40e_pf *pf = np->vsi->back;
|
||||
struct i40e_nvm_access *cmd;
|
||||
struct i40e_nvm_access *cmd = (struct i40e_nvm_access *)eeprom;
|
||||
int ret_val = 0;
|
||||
int errno;
|
||||
int errno = 0;
|
||||
u32 magic;
|
||||
|
||||
/* normal ethtool set_eeprom is not supported */
|
||||
magic = hw->vendor_id | (hw->device_id << 16);
|
||||
if (eeprom->magic == magic)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
errno = -EOPNOTSUPP;
|
||||
/* check for NVMUpdate access method */
|
||||
if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id)
|
||||
return -EINVAL;
|
||||
else if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id)
|
||||
errno = -EINVAL;
|
||||
else if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
|
||||
test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
|
||||
errno = -EBUSY;
|
||||
else
|
||||
ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
|
||||
|
||||
if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
|
||||
test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
|
||||
return -EBUSY;
|
||||
|
||||
cmd = (struct i40e_nvm_access *)eeprom;
|
||||
ret_val = i40e_nvmupd_command(hw, cmd, bytes, &errno);
|
||||
if (ret_val && (hw->debug_mask & I40E_DEBUG_NVM))
|
||||
if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM))
|
||||
dev_info(&pf->pdev->dev,
|
||||
"NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d\n",
|
||||
ret_val, hw->aq.asq_last_status, errno,
|
||||
|
@ -46,7 +46,7 @@ static const char i40e_driver_string[] =
|
||||
|
||||
#define DRV_VERSION_MAJOR 1
|
||||
#define DRV_VERSION_MINOR 4
|
||||
#define DRV_VERSION_BUILD 13
|
||||
#define DRV_VERSION_BUILD 15
|
||||
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
|
||||
__stringify(DRV_VERSION_MINOR) "." \
|
||||
__stringify(DRV_VERSION_BUILD) DRV_KERN
|
||||
@ -768,7 +768,7 @@ static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
|
||||
if (vsi->type != I40E_VSI_FCOE)
|
||||
return;
|
||||
|
||||
idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET;
|
||||
idx = hw->pf_id + I40E_FCOE_PF_STAT_OFFSET;
|
||||
fs = &vsi->fcoe_stats;
|
||||
ofs = &vsi->fcoe_stats_offsets;
|
||||
|
||||
@ -819,6 +819,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
|
||||
struct i40e_eth_stats *oes;
|
||||
struct i40e_eth_stats *es; /* device's eth stats */
|
||||
u32 tx_restart, tx_busy;
|
||||
u64 tx_lost_interrupt;
|
||||
struct i40e_ring *p;
|
||||
u32 rx_page, rx_buf;
|
||||
u64 bytes, packets;
|
||||
@ -844,6 +845,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
|
||||
rx_b = rx_p = 0;
|
||||
tx_b = tx_p = 0;
|
||||
tx_restart = tx_busy = tx_linearize = tx_force_wb = 0;
|
||||
tx_lost_interrupt = 0;
|
||||
rx_page = 0;
|
||||
rx_buf = 0;
|
||||
rcu_read_lock();
|
||||
@ -862,6 +864,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
|
||||
tx_busy += p->tx_stats.tx_busy;
|
||||
tx_linearize += p->tx_stats.tx_linearize;
|
||||
tx_force_wb += p->tx_stats.tx_force_wb;
|
||||
tx_lost_interrupt += p->tx_stats.tx_lost_interrupt;
|
||||
|
||||
/* Rx queue is part of the same block as Tx queue */
|
||||
p = &p[1];
|
||||
@ -880,6 +883,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
|
||||
vsi->tx_busy = tx_busy;
|
||||
vsi->tx_linearize = tx_linearize;
|
||||
vsi->tx_force_wb = tx_force_wb;
|
||||
vsi->tx_lost_interrupt = tx_lost_interrupt;
|
||||
vsi->rx_page_failed = rx_page;
|
||||
vsi->rx_buf_failed = rx_buf;
|
||||
|
||||
@ -2118,7 +2122,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
|
||||
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
|
||||
test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
|
||||
&vsi->state));
|
||||
if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB) {
|
||||
if ((vsi->type == I40E_VSI_MAIN) &&
|
||||
(pf->lan_veb != I40E_NO_VEB) &&
|
||||
!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
|
||||
/* set defport ON for Main VSI instead of true promisc
|
||||
* this way we will get all unicast/multicast and VLAN
|
||||
* promisc behavior but will not get VF or VMDq traffic
|
||||
@ -3456,16 +3462,12 @@ static irqreturn_t i40e_intr(int irq, void *data)
|
||||
struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
|
||||
struct i40e_q_vector *q_vector = vsi->q_vectors[0];
|
||||
|
||||
/* temporarily disable queue cause for NAPI processing */
|
||||
u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
|
||||
|
||||
qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
|
||||
wr32(hw, I40E_QINT_RQCTL(0), qval);
|
||||
|
||||
qval = rd32(hw, I40E_QINT_TQCTL(0));
|
||||
qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
|
||||
wr32(hw, I40E_QINT_TQCTL(0), qval);
|
||||
|
||||
/* We do not have a way to disarm Queue causes while leaving
|
||||
* interrupt enabled for all other causes, ideally
|
||||
* interrupt should be disabled while we are in NAPI but
|
||||
* this is not a performance path and napi_schedule()
|
||||
* can deal with rescheduling.
|
||||
*/
|
||||
if (!test_bit(__I40E_DOWN, &pf->state))
|
||||
napi_schedule_irqoff(&q_vector->napi);
|
||||
}
|
||||
@ -3473,6 +3475,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
|
||||
if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
|
||||
ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
|
||||
set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
|
||||
i40e_debug(&pf->hw, I40E_DEBUG_NVM, "AdminQ event\n");
|
||||
}
|
||||
|
||||
if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
|
||||
@ -4349,7 +4352,7 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
|
||||
{
|
||||
struct i40e_ring *tx_ring = NULL;
|
||||
struct i40e_pf *pf;
|
||||
u32 head, val, tx_pending;
|
||||
u32 head, val, tx_pending_hw;
|
||||
int i;
|
||||
|
||||
pf = vsi->back;
|
||||
@ -4375,16 +4378,9 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
|
||||
else
|
||||
val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
|
||||
|
||||
/* Bail out if interrupts are disabled because napi_poll
|
||||
* execution in-progress or will get scheduled soon.
|
||||
* napi_poll cleans TX and RX queues and updates 'next_to_clean'.
|
||||
*/
|
||||
if (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))
|
||||
return;
|
||||
|
||||
head = i40e_get_head(tx_ring);
|
||||
|
||||
tx_pending = i40e_get_tx_pending(tx_ring);
|
||||
tx_pending_hw = i40e_get_tx_pending(tx_ring, false);
|
||||
|
||||
/* HW is done executing descriptors, updated HEAD write back,
|
||||
* but SW hasn't processed those descriptors. If interrupt is
|
||||
@ -4392,12 +4388,12 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
|
||||
* dev_watchdog detecting timeout on those netdev_queue,
|
||||
* hence proactively trigger SW interrupt.
|
||||
*/
|
||||
if (tx_pending) {
|
||||
if (tx_pending_hw && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
|
||||
/* NAPI Poll didn't run and clear since it was set */
|
||||
if (test_and_clear_bit(I40E_Q_VECTOR_HUNG_DETECT,
|
||||
&tx_ring->q_vector->hung_detected)) {
|
||||
netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n",
|
||||
vsi->seid, q_idx, tx_pending,
|
||||
netdev_info(vsi->netdev, "VSI_seid %d, Hung TX queue %d, tx_pending_hw: %d, NTC:0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x\n",
|
||||
vsi->seid, q_idx, tx_pending_hw,
|
||||
tx_ring->next_to_clean, head,
|
||||
tx_ring->next_to_use,
|
||||
readl(tx_ring->tail));
|
||||
@ -4410,6 +4406,17 @@ static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
|
||||
&tx_ring->q_vector->hung_detected);
|
||||
}
|
||||
}
|
||||
|
||||
/* This is the case where we have interrupts missing,
|
||||
* so the tx_pending in HW will most likely be 0, but we
|
||||
* will have tx_pending in SW since the WB happened but the
|
||||
* interrupt got lost.
|
||||
*/
|
||||
if ((!tx_pending_hw) && i40e_get_tx_pending(tx_ring, true) &&
|
||||
(!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK))) {
|
||||
if (napi_reschedule(&tx_ring->q_vector->napi))
|
||||
tx_ring->tx_stats.tx_lost_interrupt++;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
@ -6326,7 +6333,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
|
||||
case i40e_aqc_opc_nvm_erase:
|
||||
case i40e_aqc_opc_nvm_update:
|
||||
case i40e_aqc_opc_oem_post_update:
|
||||
i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n");
|
||||
i40e_debug(&pf->hw, I40E_DEBUG_NVM,
|
||||
"ARQ NVM operation 0x%04x completed\n",
|
||||
opcode);
|
||||
break;
|
||||
default:
|
||||
dev_info(&pf->pdev->dev,
|
||||
@ -11219,8 +11228,8 @@ static void i40e_remove(struct pci_dev *pdev)
|
||||
i40e_vsi_release(pf->vsi[pf->lan_vsi]);
|
||||
|
||||
/* shutdown and destroy the HMC */
|
||||
if (pf->hw.hmc.hmc_obj) {
|
||||
ret_code = i40e_shutdown_lan_hmc(&pf->hw);
|
||||
if (hw->hmc.hmc_obj) {
|
||||
ret_code = i40e_shutdown_lan_hmc(hw);
|
||||
if (ret_code)
|
||||
dev_warn(&pdev->dev,
|
||||
"Failed to destroy the HMC resources: %d\n",
|
||||
@ -11228,7 +11237,7 @@ static void i40e_remove(struct pci_dev *pdev)
|
||||
}
|
||||
|
||||
/* shutdown the adminq */
|
||||
ret_code = i40e_shutdown_adminq(&pf->hw);
|
||||
ret_code = i40e_shutdown_adminq(hw);
|
||||
if (ret_code)
|
||||
dev_warn(&pdev->dev,
|
||||
"Failed to destroy the Admin Queue resources: %d\n",
|
||||
@ -11256,7 +11265,7 @@ static void i40e_remove(struct pci_dev *pdev)
|
||||
kfree(pf->qp_pile);
|
||||
kfree(pf->vsi);
|
||||
|
||||
iounmap(pf->hw.hw_addr);
|
||||
iounmap(hw->hw_addr);
|
||||
kfree(pf);
|
||||
pci_release_selected_regions(pdev,
|
||||
pci_select_bars(pdev, IORESOURCE_MEM));
|
||||
|
@ -610,15 +610,19 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
|
||||
/**
|
||||
* i40e_get_tx_pending - how many tx descriptors not processed
|
||||
* @tx_ring: the ring of descriptors
|
||||
* @in_sw: is tx_pending being checked in SW or HW
|
||||
*
|
||||
* Since there is no access to the ring head register
|
||||
* in XL710, we need to use our local copies
|
||||
**/
|
||||
u32 i40e_get_tx_pending(struct i40e_ring *ring)
|
||||
u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
|
||||
{
|
||||
u32 head, tail;
|
||||
|
||||
head = i40e_get_head(ring);
|
||||
if (!in_sw)
|
||||
head = i40e_get_head(ring);
|
||||
else
|
||||
head = ring->next_to_clean;
|
||||
tail = readl(ring->tail);
|
||||
|
||||
if (head != tail)
|
||||
@ -741,7 +745,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
|
||||
* them to be written back in case we stay in NAPI.
|
||||
* In this mode on X722 we do not enable Interrupt.
|
||||
*/
|
||||
j = i40e_get_tx_pending(tx_ring);
|
||||
j = i40e_get_tx_pending(tx_ring, false);
|
||||
|
||||
if (budget &&
|
||||
((j / (WB_STRIDE + 1)) == 0) && (j != 0) &&
|
||||
@ -1659,28 +1663,33 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
|
||||
rx_bi->page_offset + copysize,
|
||||
rx_packet_len, I40E_RXBUFFER_2048);
|
||||
|
||||
get_page(rx_bi->page);
|
||||
/* switch to the other half-page here; the allocation
|
||||
* code programs the right addr into HW. If we haven't
|
||||
* used this half-page, the address won't be changed,
|
||||
* and HW can just use it next time through.
|
||||
*/
|
||||
rx_bi->page_offset ^= PAGE_SIZE / 2;
|
||||
/* If the page count is more than 2, then both halves
|
||||
* of the page are used and we need to free it. Do it
|
||||
* here instead of in the alloc code. Otherwise one
|
||||
* of the half-pages might be released between now and
|
||||
* then, and we wouldn't know which one to use.
|
||||
* Don't call get_page and free_page since those are
|
||||
* both expensive atomic operations that just change
|
||||
* the refcount in opposite directions. Just give the
|
||||
* page to the stack; he can have our refcount.
|
||||
*/
|
||||
if (page_count(rx_bi->page) > 2) {
|
||||
dma_unmap_page(rx_ring->dev,
|
||||
rx_bi->page_dma,
|
||||
PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
__free_page(rx_bi->page);
|
||||
rx_bi->page = NULL;
|
||||
rx_bi->page_dma = 0;
|
||||
rx_ring->rx_stats.realloc_count++;
|
||||
} else {
|
||||
get_page(rx_bi->page);
|
||||
/* switch to the other half-page here; the
|
||||
* allocation code programs the right addr
|
||||
* into HW. If we haven't used this half-page,
|
||||
* the address won't be changed, and HW can
|
||||
* just use it next time through.
|
||||
*/
|
||||
rx_bi->page_offset ^= PAGE_SIZE / 2;
|
||||
}
|
||||
|
||||
}
|
||||
@ -2042,19 +2051,6 @@ tx_only:
|
||||
if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
|
||||
i40e_update_enable_itr(vsi, q_vector);
|
||||
} else { /* Legacy mode */
|
||||
struct i40e_hw *hw = &vsi->back->hw;
|
||||
/* We re-enable the queue 0 cause, but
|
||||
* don't worry about dynamic_enable
|
||||
* because we left it on for the other
|
||||
* possible interrupts during napi
|
||||
*/
|
||||
u32 qval = rd32(hw, I40E_QINT_RQCTL(0)) |
|
||||
I40E_QINT_RQCTL_CAUSE_ENA_MASK;
|
||||
|
||||
wr32(hw, I40E_QINT_RQCTL(0), qval);
|
||||
qval = rd32(hw, I40E_QINT_TQCTL(0)) |
|
||||
I40E_QINT_TQCTL_CAUSE_ENA_MASK;
|
||||
wr32(hw, I40E_QINT_TQCTL(0), qval);
|
||||
i40e_irq_dynamic_enable_icr0(vsi->back, false);
|
||||
}
|
||||
return 0;
|
||||
|
@ -203,6 +203,7 @@ struct i40e_tx_queue_stats {
|
||||
u64 tx_done_old;
|
||||
u64 tx_linearize;
|
||||
u64 tx_force_wb;
|
||||
u64 tx_lost_interrupt;
|
||||
};
|
||||
|
||||
struct i40e_rx_queue_stats {
|
||||
@ -338,7 +339,7 @@ int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
|
||||
struct i40e_ring *tx_ring, u32 *flags);
|
||||
#endif
|
||||
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
|
||||
u32 i40e_get_tx_pending(struct i40e_ring *ring);
|
||||
u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
|
||||
|
||||
/**
|
||||
* i40e_get_head - Retrieve head from head writeback
|
||||
|
@ -461,7 +461,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_id,
|
||||
rx_ctx.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
|
||||
|
||||
/* set splitalways mode 10b */
|
||||
rx_ctx.dtype = 0x2;
|
||||
rx_ctx.dtype = I40E_RX_DTYPE_HEADER_SPLIT;
|
||||
}
|
||||
|
||||
/* databuffer length validation */
|
||||
@ -2037,7 +2037,11 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
|
||||
if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
|
||||
return 0;
|
||||
|
||||
/* re-enable vflr interrupt cause */
|
||||
/* Re-enable the VFLR interrupt cause here, before looking for which
|
||||
* VF got reset. Otherwise, if another VF gets a reset while the
|
||||
* first one is being processed, that interrupt will be lost, and
|
||||
* that VF will be stuck in reset forever.
|
||||
*/
|
||||
reg = rd32(hw, I40E_PFINT_ICR0_ENA);
|
||||
reg |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
|
||||
wr32(hw, I40E_PFINT_ICR0_ENA, reg);
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
*
|
||||
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
|
||||
* Copyright(c) 2013 - 2014 Intel Corporation.
|
||||
* Copyright(c) 2013 - 2016 Intel Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
@ -887,6 +887,9 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
|
||||
u16 flags;
|
||||
u16 ntu;
|
||||
|
||||
/* pre-clean the event info */
|
||||
memset(&e->desc, 0, sizeof(e->desc));
|
||||
|
||||
/* take the lock before we start messing with the ring */
|
||||
mutex_lock(&hw->aq.arq_mutex);
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
*
|
||||
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
|
||||
* Copyright(c) 2013 - 2014 Intel Corporation.
|
||||
* Copyright(c) 2013 - 2016 Intel Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
@ -1084,6 +1084,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
|
||||
#define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
|
||||
#define I40E_AQC_SET_VSI_DEFAULT 0x08
|
||||
#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
|
||||
#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000
|
||||
__le16 seid;
|
||||
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
|
||||
__le16 vlan_tag;
|
||||
|
@ -129,15 +129,19 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring)
|
||||
/**
|
||||
* i40evf_get_tx_pending - how many Tx descriptors not processed
|
||||
* @tx_ring: the ring of descriptors
|
||||
* @in_sw: is tx_pending being checked in SW or HW
|
||||
*
|
||||
* Since there is no access to the ring head register
|
||||
* in XL710, we need to use our local copies
|
||||
**/
|
||||
u32 i40evf_get_tx_pending(struct i40e_ring *ring)
|
||||
u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw)
|
||||
{
|
||||
u32 head, tail;
|
||||
|
||||
head = i40e_get_head(ring);
|
||||
if (!in_sw)
|
||||
head = i40e_get_head(ring);
|
||||
else
|
||||
head = ring->next_to_clean;
|
||||
tail = readl(ring->tail);
|
||||
|
||||
if (head != tail)
|
||||
@ -259,7 +263,7 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
|
||||
* them to be written back in case we stay in NAPI.
|
||||
* In this mode on X722 we do not enable Interrupt.
|
||||
*/
|
||||
j = i40evf_get_tx_pending(tx_ring);
|
||||
j = i40evf_get_tx_pending(tx_ring, false);
|
||||
|
||||
if (budget &&
|
||||
((j / (WB_STRIDE + 1)) == 0) && (j > 0) &&
|
||||
@ -1122,28 +1126,33 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
|
||||
rx_bi->page_offset + copysize,
|
||||
rx_packet_len, I40E_RXBUFFER_2048);
|
||||
|
||||
get_page(rx_bi->page);
|
||||
/* switch to the other half-page here; the allocation
|
||||
* code programs the right addr into HW. If we haven't
|
||||
* used this half-page, the address won't be changed,
|
||||
* and HW can just use it next time through.
|
||||
*/
|
||||
rx_bi->page_offset ^= PAGE_SIZE / 2;
|
||||
/* If the page count is more than 2, then both halves
|
||||
* of the page are used and we need to free it. Do it
|
||||
* here instead of in the alloc code. Otherwise one
|
||||
* of the half-pages might be released between now and
|
||||
* then, and we wouldn't know which one to use.
|
||||
* Don't call get_page and free_page since those are
|
||||
* both expensive atomic operations that just change
|
||||
* the refcount in opposite directions. Just give the
|
||||
* page to the stack; he can have our refcount.
|
||||
*/
|
||||
if (page_count(rx_bi->page) > 2) {
|
||||
dma_unmap_page(rx_ring->dev,
|
||||
rx_bi->page_dma,
|
||||
PAGE_SIZE,
|
||||
DMA_FROM_DEVICE);
|
||||
__free_page(rx_bi->page);
|
||||
rx_bi->page = NULL;
|
||||
rx_bi->page_dma = 0;
|
||||
rx_ring->rx_stats.realloc_count++;
|
||||
} else {
|
||||
get_page(rx_bi->page);
|
||||
/* switch to the other half-page here; the
|
||||
* allocation code programs the right addr
|
||||
* into HW. If we haven't used this half-page,
|
||||
* the address won't be changed, and HW can
|
||||
* just use it next time through.
|
||||
*/
|
||||
rx_bi->page_offset ^= PAGE_SIZE / 2;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -202,6 +202,7 @@ struct i40e_tx_queue_stats {
|
||||
u64 tx_done_old;
|
||||
u64 tx_linearize;
|
||||
u64 tx_force_wb;
|
||||
u64 tx_lost_interrupt;
|
||||
};
|
||||
|
||||
struct i40e_rx_queue_stats {
|
||||
@ -326,7 +327,7 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring);
|
||||
void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
|
||||
int i40evf_napi_poll(struct napi_struct *napi, int budget);
|
||||
void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
|
||||
u32 i40evf_get_tx_pending(struct i40e_ring *ring);
|
||||
u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw);
|
||||
|
||||
/**
|
||||
* i40e_get_head - Retrieve head from head writeback
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
*
|
||||
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
|
||||
* Copyright(c) 2013 - 2014 Intel Corporation.
|
||||
* Copyright(c) 2013 - 2016 Intel Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
@ -274,6 +274,9 @@ struct i40evf_adapter {
|
||||
};
|
||||
|
||||
|
||||
/* Ethtool Private Flags */
|
||||
#define I40EVF_PRIV_FLAGS_PS BIT(0)
|
||||
|
||||
/* needed by i40evf_ethtool.c */
|
||||
extern char i40evf_driver_name[];
|
||||
extern const char i40evf_driver_version[];
|
||||
@ -281,6 +284,7 @@ extern const char i40evf_driver_version[];
|
||||
int i40evf_up(struct i40evf_adapter *adapter);
|
||||
void i40evf_down(struct i40evf_adapter *adapter);
|
||||
int i40evf_process_config(struct i40evf_adapter *adapter);
|
||||
void i40evf_schedule_reset(struct i40evf_adapter *adapter);
|
||||
void i40evf_reset(struct i40evf_adapter *adapter);
|
||||
void i40evf_set_ethtool_ops(struct net_device *netdev);
|
||||
void i40evf_update_stats(struct i40evf_adapter *adapter);
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*******************************************************************************
|
||||
*
|
||||
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
|
||||
* Copyright(c) 2013 - 2015 Intel Corporation.
|
||||
* Copyright(c) 2013 - 2016 Intel Corporation.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms and conditions of the GNU General Public License,
|
||||
@ -63,6 +63,12 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
|
||||
#define I40EVF_STATS_LEN(_dev) \
|
||||
(I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
|
||||
|
||||
static const char i40evf_priv_flags_strings[][ETH_GSTRING_LEN] = {
|
||||
"packet-split",
|
||||
};
|
||||
|
||||
#define I40EVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40evf_priv_flags_strings)
|
||||
|
||||
/**
|
||||
* i40evf_get_settings - Get Link Speed and Duplex settings
|
||||
* @netdev: network interface device structure
|
||||
@ -97,6 +103,8 @@ static int i40evf_get_sset_count(struct net_device *netdev, int sset)
|
||||
{
|
||||
if (sset == ETH_SS_STATS)
|
||||
return I40EVF_STATS_LEN(netdev);
|
||||
else if (sset == ETH_SS_PRIV_FLAGS)
|
||||
return I40EVF_PRIV_FLAGS_STR_LEN;
|
||||
else
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -162,6 +170,12 @@ static void i40evf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
|
||||
snprintf(p, ETH_GSTRING_LEN, "rx-%u.bytes", i);
|
||||
p += ETH_GSTRING_LEN;
|
||||
}
|
||||
} else if (sset == ETH_SS_PRIV_FLAGS) {
|
||||
for (i = 0; i < I40EVF_PRIV_FLAGS_STR_LEN; i++) {
|
||||
memcpy(data, i40evf_priv_flags_strings[i],
|
||||
ETH_GSTRING_LEN);
|
||||
data += ETH_GSTRING_LEN;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -211,6 +225,7 @@ static void i40evf_get_drvinfo(struct net_device *netdev,
|
||||
strlcpy(drvinfo->version, i40evf_driver_version, 32);
|
||||
strlcpy(drvinfo->fw_version, "N/A", 4);
|
||||
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
|
||||
drvinfo->n_priv_flags = I40EVF_PRIV_FLAGS_STR_LEN;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -710,6 +725,54 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
|
||||
I40EVF_HLUT_ARRAY_SIZE);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40evf_get_priv_flags - report device private flags
|
||||
* @dev: network interface device structure
|
||||
*
|
||||
* The get string set count and the string set should be matched for each
|
||||
* flag returned. Add new strings for each flag to the i40e_priv_flags_strings
|
||||
* array.
|
||||
*
|
||||
* Returns a u32 bitmap of flags.
|
||||
**/
|
||||
static u32 i40evf_get_priv_flags(struct net_device *dev)
|
||||
{
|
||||
struct i40evf_adapter *adapter = netdev_priv(dev);
|
||||
u32 ret_flags = 0;
|
||||
|
||||
ret_flags |= adapter->flags & I40EVF_FLAG_RX_PS_ENABLED ?
|
||||
I40EVF_PRIV_FLAGS_PS : 0;
|
||||
|
||||
return ret_flags;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40evf_set_priv_flags - set private flags
|
||||
* @dev: network interface device structure
|
||||
* @flags: bit flags to be set
|
||||
**/
|
||||
static int i40evf_set_priv_flags(struct net_device *dev, u32 flags)
|
||||
{
|
||||
struct i40evf_adapter *adapter = netdev_priv(dev);
|
||||
bool reset_required = false;
|
||||
|
||||
if ((flags & I40EVF_PRIV_FLAGS_PS) &&
|
||||
!(adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) {
|
||||
adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
|
||||
reset_required = true;
|
||||
} else if (!(flags & I40EVF_PRIV_FLAGS_PS) &&
|
||||
(adapter->flags & I40EVF_FLAG_RX_PS_ENABLED)) {
|
||||
adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
|
||||
reset_required = true;
|
||||
}
|
||||
|
||||
/* if needed, issue reset to cause things to take effect */
|
||||
if (reset_required)
|
||||
i40evf_schedule_reset(adapter);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct ethtool_ops i40evf_ethtool_ops = {
|
||||
.get_settings = i40evf_get_settings,
|
||||
.get_drvinfo = i40evf_get_drvinfo,
|
||||
@ -719,6 +782,8 @@ static const struct ethtool_ops i40evf_ethtool_ops = {
|
||||
.get_strings = i40evf_get_strings,
|
||||
.get_ethtool_stats = i40evf_get_ethtool_stats,
|
||||
.get_sset_count = i40evf_get_sset_count,
|
||||
.get_priv_flags = i40evf_get_priv_flags,
|
||||
.set_priv_flags = i40evf_set_priv_flags,
|
||||
.get_msglevel = i40evf_get_msglevel,
|
||||
.set_msglevel = i40evf_set_msglevel,
|
||||
.get_coalesce = i40evf_get_coalesce,
|
||||
|
@ -38,7 +38,7 @@ static const char i40evf_driver_string[] =
|
||||
|
||||
#define DRV_VERSION_MAJOR 1
|
||||
#define DRV_VERSION_MINOR 4
|
||||
#define DRV_VERSION_BUILD 9
|
||||
#define DRV_VERSION_BUILD 11
|
||||
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
|
||||
__stringify(DRV_VERSION_MINOR) "." \
|
||||
__stringify(DRV_VERSION_BUILD) \
|
||||
@ -172,6 +172,19 @@ void i40evf_debug_d(void *hw, u32 mask, char *fmt_str, ...)
|
||||
pr_info("%s", buf);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40evf_schedule_reset - Set the flags and schedule a reset event
|
||||
* @adapter: board private structure
|
||||
**/
|
||||
void i40evf_schedule_reset(struct i40evf_adapter *adapter)
|
||||
{
|
||||
if (!(adapter->flags &
|
||||
(I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED))) {
|
||||
adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
|
||||
schedule_work(&adapter->reset_task);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* i40evf_tx_timeout - Respond to a Tx Hang
|
||||
* @netdev: network interface device structure
|
||||
@ -181,11 +194,7 @@ static void i40evf_tx_timeout(struct net_device *netdev)
|
||||
struct i40evf_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
adapter->tx_timeout_count++;
|
||||
if (!(adapter->flags & (I40EVF_FLAG_RESET_PENDING |
|
||||
I40EVF_FLAG_RESET_NEEDED))) {
|
||||
adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
|
||||
queue_work(i40evf_wq, &adapter->reset_task);
|
||||
}
|
||||
i40evf_schedule_reset(adapter);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -638,35 +647,22 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter)
|
||||
int rx_buf_len;
|
||||
|
||||
|
||||
adapter->flags &= ~I40EVF_FLAG_RX_PS_CAPABLE;
|
||||
adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE;
|
||||
|
||||
/* Decide whether to use packet split mode or not */
|
||||
if (netdev->mtu > ETH_DATA_LEN) {
|
||||
if (adapter->flags & I40EVF_FLAG_RX_PS_CAPABLE)
|
||||
adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
|
||||
else
|
||||
adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
|
||||
} else {
|
||||
if (adapter->flags & I40EVF_FLAG_RX_1BUF_CAPABLE)
|
||||
adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
|
||||
else
|
||||
adapter->flags |= I40EVF_FLAG_RX_PS_ENABLED;
|
||||
}
|
||||
|
||||
/* Set the RX buffer length according to the mode */
|
||||
if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
|
||||
rx_buf_len = I40E_RX_HDR_SIZE;
|
||||
} else {
|
||||
if (netdev->mtu <= ETH_DATA_LEN)
|
||||
rx_buf_len = I40EVF_RXBUFFER_2048;
|
||||
else
|
||||
rx_buf_len = ALIGN(max_frame, 1024);
|
||||
}
|
||||
if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED ||
|
||||
netdev->mtu <= ETH_DATA_LEN)
|
||||
rx_buf_len = I40EVF_RXBUFFER_2048;
|
||||
else
|
||||
rx_buf_len = ALIGN(max_frame, 1024);
|
||||
|
||||
for (i = 0; i < adapter->num_active_queues; i++) {
|
||||
adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
|
||||
adapter->rx_rings[i].rx_buf_len = rx_buf_len;
|
||||
if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
|
||||
set_ring_ps_enabled(&adapter->rx_rings[i]);
|
||||
adapter->rx_rings[i].rx_hdr_len = I40E_RX_HDR_SIZE;
|
||||
} else {
|
||||
clear_ring_ps_enabled(&adapter->rx_rings[i]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1003,7 +999,12 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
|
||||
for (i = 0; i < adapter->num_active_queues; i++) {
|
||||
struct i40e_ring *ring = &adapter->rx_rings[i];
|
||||
|
||||
if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
|
||||
i40evf_alloc_rx_headers(ring);
|
||||
i40evf_alloc_rx_buffers_ps(ring, ring->count);
|
||||
} else {
|
||||
i40evf_alloc_rx_buffers_1buf(ring, ring->count);
|
||||
}
|
||||
ring->next_to_use = ring->count - 1;
|
||||
writel(ring->next_to_use, ring->tail);
|
||||
}
|
||||
@ -1882,6 +1883,7 @@ static void i40evf_reset_task(struct work_struct *work)
|
||||
adapter->netdev->flags &= ~IFF_UP;
|
||||
clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
|
||||
adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
|
||||
adapter->state = __I40EVF_DOWN;
|
||||
dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
|
||||
return; /* Do not attempt to reinit. It's dead, Jim. */
|
||||
}
|
||||
@ -2481,6 +2483,11 @@ static void i40evf_init_task(struct work_struct *work)
|
||||
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
|
||||
|
||||
adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
|
||||
adapter->flags |= I40EVF_FLAG_RX_1BUF_CAPABLE;
|
||||
adapter->flags |= I40EVF_FLAG_RX_PS_CAPABLE;
|
||||
|
||||
/* Default to single buffer rx, can be changed through ethtool. */
|
||||
adapter->flags &= ~I40EVF_FLAG_RX_PS_ENABLED;
|
||||
|
||||
netdev->netdev_ops = &i40evf_netdev_ops;
|
||||
i40evf_set_ethtool_ops(netdev);
|
||||
|
@ -270,6 +270,10 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
|
||||
vqpi->rxq.max_pkt_size = adapter->netdev->mtu
|
||||
+ ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
|
||||
vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
|
||||
if (adapter->flags & I40EVF_FLAG_RX_PS_ENABLED) {
|
||||
vqpi->rxq.splithdr_enabled = true;
|
||||
vqpi->rxq.hdr_size = I40E_RX_HDR_SIZE;
|
||||
}
|
||||
vqpi++;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user