forked from Minki/linux
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
031554eab0
@ -32,7 +32,7 @@
|
||||
|
||||
obj-$(CONFIG_IXGBE) += ixgbe.o
|
||||
|
||||
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o ixgbe_debugfs.o\
|
||||
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
|
||||
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
|
||||
ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o
|
||||
|
||||
@ -40,4 +40,5 @@ ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
|
||||
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
|
||||
|
||||
ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o
|
||||
ixgbe-$(CONFIG_DEBUG_FS) += ixgbe_debugfs.o
|
||||
ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/aer.h>
|
||||
#include <linux/if_vlan.h>
|
||||
#include <linux/jiffies.h>
|
||||
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/net_tstamp.h>
|
||||
@ -231,6 +232,7 @@ struct ixgbe_ring {
|
||||
struct ixgbe_tx_buffer *tx_buffer_info;
|
||||
struct ixgbe_rx_buffer *rx_buffer_info;
|
||||
};
|
||||
unsigned long last_rx_timestamp;
|
||||
unsigned long state;
|
||||
u8 __iomem *tail;
|
||||
dma_addr_t dma; /* phys. address of descriptor ring */
|
||||
@ -580,11 +582,14 @@ struct ixgbe_adapter {
|
||||
|
||||
struct ptp_clock *ptp_clock;
|
||||
struct ptp_clock_info ptp_caps;
|
||||
struct work_struct ptp_tx_work;
|
||||
struct sk_buff *ptp_tx_skb;
|
||||
unsigned long ptp_tx_start;
|
||||
unsigned long last_overflow_check;
|
||||
unsigned long last_rx_ptp_check;
|
||||
spinlock_t tmreg_lock;
|
||||
struct cyclecounter cc;
|
||||
struct timecounter tc;
|
||||
int rx_hwtstamp_filter;
|
||||
u32 base_incval;
|
||||
|
||||
/* SR-IOV */
|
||||
@ -749,15 +754,32 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
|
||||
extern void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
|
||||
extern void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
|
||||
extern void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
|
||||
extern void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
|
||||
struct sk_buff *skb);
|
||||
extern void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
|
||||
union ixgbe_adv_rx_desc *rx_desc,
|
||||
struct sk_buff *skb);
|
||||
extern void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
|
||||
extern void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
|
||||
struct sk_buff *skb);
|
||||
static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
|
||||
union ixgbe_adv_rx_desc *rx_desc,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
|
||||
return;
|
||||
|
||||
__ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
|
||||
|
||||
/*
|
||||
* Update the last_rx_timestamp timer in order to enable watchdog check
|
||||
* for error case of latched timestamp on a dropped packet.
|
||||
*/
|
||||
rx_ring->last_rx_timestamp = jiffies;
|
||||
}
|
||||
|
||||
extern int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
|
||||
struct ifreq *ifr, int cmd);
|
||||
extern void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
|
||||
extern void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
|
||||
extern void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter);
|
||||
#endif
|
||||
|
||||
#endif /* _IXGBE_H_ */
|
||||
|
@ -24,9 +24,6 @@
|
||||
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
|
||||
|
||||
*******************************************************************************/
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
@ -277,5 +274,3 @@ void ixgbe_dbg_exit(void)
|
||||
{
|
||||
debugfs_remove_recursive(ixgbe_dbg_root);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
|
@ -1837,19 +1837,11 @@ static void ixgbe_diag_test(struct net_device *netdev,
|
||||
struct ethtool_test *eth_test, u64 *data)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
bool if_running = netif_running(netdev);
|
||||
|
||||
set_bit(__IXGBE_TESTING, &adapter->state);
|
||||
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
|
||||
/* Offline tests */
|
||||
|
||||
e_info(hw, "offline testing starting\n");
|
||||
|
||||
/* Link test performed before hardware reset so autoneg doesn't
|
||||
* interfere with test result */
|
||||
if (ixgbe_link_test(adapter, &data[4]))
|
||||
eth_test->flags |= ETH_TEST_FL_FAILED;
|
||||
|
||||
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
|
||||
int i;
|
||||
for (i = 0; i < adapter->num_vfs; i++) {
|
||||
@ -1870,12 +1862,24 @@ static void ixgbe_diag_test(struct net_device *netdev,
|
||||
}
|
||||
}
|
||||
|
||||
/* Offline tests */
|
||||
e_info(hw, "offline testing starting\n");
|
||||
|
||||
if (if_running)
|
||||
/* indicate we're in test mode */
|
||||
dev_close(netdev);
|
||||
else
|
||||
ixgbe_reset(adapter);
|
||||
|
||||
/* bringing adapter down disables SFP+ optics */
|
||||
if (hw->mac.ops.enable_tx_laser)
|
||||
hw->mac.ops.enable_tx_laser(hw);
|
||||
|
||||
/* Link test performed before hardware reset so autoneg doesn't
|
||||
* interfere with test result
|
||||
*/
|
||||
if (ixgbe_link_test(adapter, &data[4]))
|
||||
eth_test->flags |= ETH_TEST_FL_FAILED;
|
||||
|
||||
ixgbe_reset(adapter);
|
||||
e_info(hw, "register testing starting\n");
|
||||
if (ixgbe_reg_test(adapter, &data[0]))
|
||||
eth_test->flags |= ETH_TEST_FL_FAILED;
|
||||
@ -1908,16 +1912,22 @@ static void ixgbe_diag_test(struct net_device *netdev,
|
||||
skip_loopback:
|
||||
ixgbe_reset(adapter);
|
||||
|
||||
/* clear testing bit and return adapter to previous state */
|
||||
clear_bit(__IXGBE_TESTING, &adapter->state);
|
||||
if (if_running)
|
||||
dev_open(netdev);
|
||||
} else {
|
||||
e_info(hw, "online testing starting\n");
|
||||
|
||||
/* if adapter is down, SFP+ optics will be disabled */
|
||||
if (!if_running && hw->mac.ops.enable_tx_laser)
|
||||
hw->mac.ops.enable_tx_laser(hw);
|
||||
|
||||
/* Online tests */
|
||||
if (ixgbe_link_test(adapter, &data[4]))
|
||||
eth_test->flags |= ETH_TEST_FL_FAILED;
|
||||
|
||||
/* Online tests aren't run; pass by default */
|
||||
/* Offline tests aren't run; pass by default */
|
||||
data[0] = 0;
|
||||
data[1] = 0;
|
||||
data[2] = 0;
|
||||
@ -1925,6 +1935,10 @@ skip_loopback:
|
||||
|
||||
clear_bit(__IXGBE_TESTING, &adapter->state);
|
||||
}
|
||||
|
||||
/* if adapter was down, ensure SFP+ optics are disabled again */
|
||||
if (!if_running && hw->mac.ops.disable_tx_laser)
|
||||
hw->mac.ops.disable_tx_laser(hw);
|
||||
skip_ol_tests:
|
||||
msleep_interruptible(4 * 1000);
|
||||
}
|
||||
@ -2695,6 +2709,14 @@ static int ixgbe_get_ts_info(struct net_device *dev,
|
||||
(1 << HWTSTAMP_FILTER_NONE) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
|
||||
(1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
|
||||
break;
|
||||
default:
|
||||
|
@ -803,6 +803,7 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter)
|
||||
/* Do the reset outside of interrupt context */
|
||||
if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
|
||||
adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
|
||||
e_warn(drv, "initiating reset due to tx timeout\n");
|
||||
ixgbe_service_event_schedule(adapter);
|
||||
}
|
||||
}
|
||||
@ -850,9 +851,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
|
||||
total_bytes += tx_buffer->bytecount;
|
||||
total_packets += tx_buffer->gso_segs;
|
||||
|
||||
if (unlikely(tx_buffer->tx_flags & IXGBE_TX_FLAGS_TSTAMP))
|
||||
ixgbe_ptp_tx_hwtstamp(q_vector, tx_buffer->skb);
|
||||
|
||||
/* free the skb */
|
||||
dev_kfree_skb_any(tx_buffer->skb);
|
||||
|
||||
@ -1441,7 +1439,7 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
|
||||
|
||||
ixgbe_rx_checksum(rx_ring, rx_desc, skb);
|
||||
|
||||
ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, rx_desc, skb);
|
||||
ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb);
|
||||
|
||||
if ((dev->features & NETIF_F_HW_VLAN_RX) &&
|
||||
ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
|
||||
@ -5534,6 +5532,8 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
|
||||
break;
|
||||
}
|
||||
|
||||
adapter->last_rx_ptp_check = jiffies;
|
||||
|
||||
if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED)
|
||||
ixgbe_ptp_start_cyclecounter(adapter);
|
||||
|
||||
@ -5614,6 +5614,7 @@ static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter)
|
||||
* to get done, so reset controller to flush Tx.
|
||||
* (Do the reset outside of interrupt context).
|
||||
*/
|
||||
e_warn(drv, "initiating reset to clear Tx work after link loss\n");
|
||||
adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
|
||||
}
|
||||
}
|
||||
@ -5878,7 +5879,6 @@ static void ixgbe_service_task(struct work_struct *work)
|
||||
struct ixgbe_adapter *adapter = container_of(work,
|
||||
struct ixgbe_adapter,
|
||||
service_task);
|
||||
|
||||
ixgbe_reset_subtask(adapter);
|
||||
ixgbe_sfp_detection_subtask(adapter);
|
||||
ixgbe_sfp_link_config_subtask(adapter);
|
||||
@ -5886,7 +5886,11 @@ static void ixgbe_service_task(struct work_struct *work)
|
||||
ixgbe_watchdog_subtask(adapter);
|
||||
ixgbe_fdir_reinit_subtask(adapter);
|
||||
ixgbe_check_hang_subtask(adapter);
|
||||
ixgbe_ptp_overflow_check(adapter);
|
||||
|
||||
if (adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) {
|
||||
ixgbe_ptp_overflow_check(adapter);
|
||||
ixgbe_ptp_rx_hang(adapter);
|
||||
}
|
||||
|
||||
ixgbe_service_event_complete(adapter);
|
||||
}
|
||||
@ -6432,6 +6436,11 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
|
||||
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
|
||||
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
|
||||
tx_flags |= IXGBE_TX_FLAGS_TSTAMP;
|
||||
|
||||
/* schedule check for Tx timestamp */
|
||||
adapter->ptp_tx_skb = skb_get(skb);
|
||||
adapter->ptp_tx_start = jiffies;
|
||||
schedule_work(&adapter->ptp_tx_work);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
@ -6827,6 +6836,26 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
|
||||
}
|
||||
|
||||
#endif /* CONFIG_IXGBE_DCB */
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
|
||||
rtnl_lock();
|
||||
#ifdef CONFIG_IXGBE_DCB
|
||||
ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev));
|
||||
#else
|
||||
if (netif_running(netdev))
|
||||
ixgbe_close(netdev);
|
||||
ixgbe_clear_interrupt_scheme(adapter);
|
||||
ixgbe_init_interrupt_scheme(adapter);
|
||||
if (netif_running(netdev))
|
||||
ixgbe_open(netdev);
|
||||
#endif
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
#endif
|
||||
void ixgbe_do_reset(struct net_device *netdev)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(netdev);
|
||||
@ -7353,7 +7382,15 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
ixgbe_enable_sriov(adapter, ii);
|
||||
/* SR-IOV not supported on the 82598 */
|
||||
if (adapter->hw.mac.type == ixgbe_mac_82598EB)
|
||||
goto skip_sriov;
|
||||
/* Mailbox */
|
||||
ixgbe_init_mbx_params_pf(hw);
|
||||
memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
|
||||
ixgbe_enable_sriov(adapter);
|
||||
pci_sriov_set_totalvfs(pdev, 63);
|
||||
skip_sriov:
|
||||
|
||||
#endif
|
||||
netdev->features = NETIF_F_SG |
|
||||
@ -7609,8 +7646,14 @@ static void ixgbe_remove(struct pci_dev *pdev)
|
||||
if (netdev->reg_state == NETREG_REGISTERED)
|
||||
unregister_netdev(netdev);
|
||||
|
||||
ixgbe_disable_sriov(adapter);
|
||||
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
/*
|
||||
* Only disable SR-IOV on unload if the user specified the now
|
||||
* deprecated max_vfs module parameter.
|
||||
*/
|
||||
if (max_vfs)
|
||||
ixgbe_disable_sriov(adapter);
|
||||
#endif
|
||||
ixgbe_clear_interrupt_scheme(adapter);
|
||||
|
||||
ixgbe_release_hw_control(adapter);
|
||||
@ -7824,6 +7867,7 @@ static struct pci_driver ixgbe_driver = {
|
||||
.resume = ixgbe_resume,
|
||||
#endif
|
||||
.shutdown = ixgbe_shutdown,
|
||||
.sriov_configure = ixgbe_pci_sriov_configure,
|
||||
.err_handler = &ixgbe_err_handler
|
||||
};
|
||||
|
||||
|
@ -96,15 +96,12 @@
|
||||
#define IXGBE_MAX_TIMEADJ_VALUE 0x7FFFFFFFFFFFFFFFULL
|
||||
|
||||
#define IXGBE_OVERFLOW_PERIOD (HZ * 30)
|
||||
#define IXGBE_PTP_TX_TIMEOUT (HZ * 15)
|
||||
|
||||
#ifndef NSECS_PER_SEC
|
||||
#define NSECS_PER_SEC 1000000000ULL
|
||||
#endif
|
||||
|
||||
static struct sock_filter ptp_filter[] = {
|
||||
PTP_FILTER
|
||||
};
|
||||
|
||||
/**
|
||||
* ixgbe_ptp_setup_sdp
|
||||
* @hw: the hardware private structure
|
||||
@ -405,149 +402,145 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* ixgbe_ptp_overflow_check - delayed work to detect SYSTIME overflow
|
||||
* @work: structure containing information about this work task
|
||||
* ixgbe_ptp_overflow_check - watchdog task to detect SYSTIME overflow
|
||||
* @adapter: private adapter struct
|
||||
*
|
||||
* this work function is scheduled to continue reading the timecounter
|
||||
* this watchdog task periodically reads the timecounter
|
||||
* in order to prevent missing when the system time registers wrap
|
||||
* around. This needs to be run approximately twice a minute when no
|
||||
* PTP activity is occurring.
|
||||
* around. This needs to be run approximately twice a minute.
|
||||
*/
|
||||
void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
unsigned long elapsed_jiffies = adapter->last_overflow_check - jiffies;
|
||||
bool timeout = time_is_before_jiffies(adapter->last_overflow_check +
|
||||
IXGBE_OVERFLOW_PERIOD);
|
||||
struct timespec ts;
|
||||
|
||||
if ((adapter->flags2 & IXGBE_FLAG2_PTP_ENABLED) &&
|
||||
(elapsed_jiffies >= IXGBE_OVERFLOW_PERIOD)) {
|
||||
if (timeout) {
|
||||
ixgbe_ptp_gettime(&adapter->ptp_caps, &ts);
|
||||
adapter->last_overflow_check = jiffies;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_ptp_match - determine if this skb matches a ptp packet
|
||||
* @skb: pointer to the skb
|
||||
* @hwtstamp: pointer to the hwtstamp_config to check
|
||||
* ixgbe_ptp_rx_hang - detect error case when Rx timestamp registers latched
|
||||
* @adapter: private network adapter structure
|
||||
*
|
||||
* Determine whether the skb should have been timestamped, assuming the
|
||||
* hwtstamp was set via the hwtstamp ioctl. Returns non-zero when the packet
|
||||
* should have a timestamp waiting in the registers, and 0 otherwise.
|
||||
*
|
||||
* V1 packets have to check the version type to determine whether they are
|
||||
* correct. However, we can't directly access the data because it might be
|
||||
* fragmented in the SKB, in paged memory. In order to work around this, we
|
||||
* use skb_copy_bits which will properly copy the data whether it is in the
|
||||
* paged memory fragments or not. We have to copy the IP header as well as the
|
||||
* message type.
|
||||
* this watchdog task is scheduled to detect error case where hardware has
|
||||
* dropped an Rx packet that was timestamped when the ring is full. The
|
||||
* particular error is rare but leaves the device in a state unable to timestamp
|
||||
* any future packets.
|
||||
*/
|
||||
static int ixgbe_ptp_match(struct sk_buff *skb, int rx_filter)
|
||||
void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
struct iphdr iph;
|
||||
u8 msgtype;
|
||||
unsigned int type, offset;
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
struct ixgbe_ring *rx_ring;
|
||||
u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
|
||||
unsigned long rx_event;
|
||||
int n;
|
||||
|
||||
if (rx_filter == HWTSTAMP_FILTER_NONE)
|
||||
return 0;
|
||||
|
||||
type = sk_run_filter(skb, ptp_filter);
|
||||
|
||||
if (likely(rx_filter == HWTSTAMP_FILTER_PTP_V2_EVENT))
|
||||
return type & PTP_CLASS_V2;
|
||||
|
||||
/* For the remaining cases actually check message type */
|
||||
switch (type) {
|
||||
case PTP_CLASS_V1_IPV4:
|
||||
skb_copy_bits(skb, OFF_IHL, &iph, sizeof(iph));
|
||||
offset = ETH_HLEN + (iph.ihl << 2) + UDP_HLEN + OFF_PTP_CONTROL;
|
||||
break;
|
||||
case PTP_CLASS_V1_IPV6:
|
||||
offset = OFF_PTP6 + OFF_PTP_CONTROL;
|
||||
break;
|
||||
default:
|
||||
/* other cases invalid or handled above */
|
||||
return 0;
|
||||
/* if we don't have a valid timestamp in the registers, just update the
|
||||
* timeout counter and exit
|
||||
*/
|
||||
if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) {
|
||||
adapter->last_rx_ptp_check = jiffies;
|
||||
return;
|
||||
}
|
||||
|
||||
/* Make sure our buffer is long enough */
|
||||
if (skb->len < offset)
|
||||
return 0;
|
||||
/* determine the most recent watchdog or rx_timestamp event */
|
||||
rx_event = adapter->last_rx_ptp_check;
|
||||
for (n = 0; n < adapter->num_rx_queues; n++) {
|
||||
rx_ring = adapter->rx_ring[n];
|
||||
if (time_after(rx_ring->last_rx_timestamp, rx_event))
|
||||
rx_event = rx_ring->last_rx_timestamp;
|
||||
}
|
||||
|
||||
skb_copy_bits(skb, offset, &msgtype, sizeof(msgtype));
|
||||
/* only need to read the high RXSTMP register to clear the lock */
|
||||
if (time_is_before_jiffies(rx_event + 5*HZ)) {
|
||||
IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
|
||||
adapter->last_rx_ptp_check = jiffies;
|
||||
|
||||
switch (rx_filter) {
|
||||
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
|
||||
return (msgtype == IXGBE_RXMTRL_V1_SYNC_MSG);
|
||||
break;
|
||||
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
|
||||
return (msgtype == IXGBE_RXMTRL_V1_DELAY_REQ_MSG);
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
e_warn(drv, "clearing RX Timestamp hang");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp
|
||||
* @q_vector: structure containing interrupt and ring information
|
||||
* @skb: particular skb to send timestamp with
|
||||
* @adapter: the private adapter struct
|
||||
*
|
||||
* if the timestamp is valid, we convert it into the timecounter ns
|
||||
* value, then store that result into the shhwtstamps structure which
|
||||
* is passed up the network stack
|
||||
*/
|
||||
void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
|
||||
struct sk_buff *skb)
|
||||
static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
struct ixgbe_adapter *adapter;
|
||||
struct ixgbe_hw *hw;
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
struct skb_shared_hwtstamps shhwtstamps;
|
||||
u64 regval = 0, ns;
|
||||
u32 tsynctxctl;
|
||||
unsigned long flags;
|
||||
|
||||
/* we cannot process timestamps on a ring without a q_vector */
|
||||
if (!q_vector || !q_vector->adapter)
|
||||
return;
|
||||
|
||||
adapter = q_vector->adapter;
|
||||
hw = &adapter->hw;
|
||||
|
||||
tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
|
||||
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
|
||||
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32;
|
||||
|
||||
/*
|
||||
* if TX timestamp is not valid, exit after clearing the
|
||||
* timestamp registers
|
||||
*/
|
||||
if (!(tsynctxctl & IXGBE_TSYNCTXCTL_VALID))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&adapter->tmreg_lock, flags);
|
||||
ns = timecounter_cyc2time(&adapter->tc, regval);
|
||||
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
|
||||
|
||||
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
|
||||
shhwtstamps.hwtstamp = ns_to_ktime(ns);
|
||||
skb_tstamp_tx(skb, &shhwtstamps);
|
||||
skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
|
||||
|
||||
dev_kfree_skb_any(adapter->ptp_tx_skb);
|
||||
adapter->ptp_tx_skb = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
|
||||
* ixgbe_ptp_tx_hwtstamp_work
|
||||
* @work: pointer to the work struct
|
||||
*
|
||||
* This work item polls TSYNCTXCTL valid bit to determine when a Tx hardware
|
||||
* timestamp has been taken for the current skb. It is necesary, because the
|
||||
* descriptor's "done" bit does not correlate with the timestamp event.
|
||||
*/
|
||||
static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = container_of(work, struct ixgbe_adapter,
|
||||
ptp_tx_work);
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
bool timeout = time_is_before_jiffies(adapter->ptp_tx_start +
|
||||
IXGBE_PTP_TX_TIMEOUT);
|
||||
u32 tsynctxctl;
|
||||
|
||||
/* we have to have a valid skb */
|
||||
if (!adapter->ptp_tx_skb)
|
||||
return;
|
||||
|
||||
if (timeout) {
|
||||
dev_kfree_skb_any(adapter->ptp_tx_skb);
|
||||
adapter->ptp_tx_skb = NULL;
|
||||
e_warn(drv, "clearing Tx Timestamp hang");
|
||||
return;
|
||||
}
|
||||
|
||||
tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
|
||||
if (tsynctxctl & IXGBE_TSYNCTXCTL_VALID)
|
||||
ixgbe_ptp_tx_hwtstamp(adapter);
|
||||
else
|
||||
/* reschedule to keep checking if it's not available yet */
|
||||
schedule_work(&adapter->ptp_tx_work);
|
||||
}
|
||||
|
||||
/**
|
||||
* __ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
|
||||
* @q_vector: structure containing interrupt and ring information
|
||||
* @rx_desc: the rx descriptor
|
||||
* @skb: particular skb to send timestamp with
|
||||
*
|
||||
* if the timestamp is valid, we convert it into the timecounter ns
|
||||
* value, then store that result into the shhwtstamps structure which
|
||||
* is passed up the network stack
|
||||
*/
|
||||
void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
|
||||
union ixgbe_adv_rx_desc *rx_desc,
|
||||
struct sk_buff *skb)
|
||||
void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct ixgbe_adapter *adapter;
|
||||
struct ixgbe_hw *hw;
|
||||
@ -563,37 +556,17 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
|
||||
adapter = q_vector->adapter;
|
||||
hw = &adapter->hw;
|
||||
|
||||
if (likely(!ixgbe_ptp_match(skb, adapter->rx_hwtstamp_filter)))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Read the tsyncrxctl register afterwards in order to prevent taking an
|
||||
* I/O hit on every packet.
|
||||
*/
|
||||
tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
|
||||
|
||||
/* Check if we have a valid timestamp and make sure the skb should
|
||||
* have been timestamped */
|
||||
if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Always read the registers, in order to clear a possible fault
|
||||
* because of stagnant RX timestamp values for a packet that never
|
||||
* reached the queue.
|
||||
*/
|
||||
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
|
||||
regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;
|
||||
|
||||
/*
|
||||
* If the timestamp bit is set in the packet's descriptor, we know the
|
||||
* timestamp belongs to this packet. No other packet can be
|
||||
* timestamped until the registers for timestamping have been read.
|
||||
* Therefor only one packet with this bit can be in the queue at a
|
||||
* time, and the rx timestamp values that were in the registers belong
|
||||
* to this packet.
|
||||
*
|
||||
* If nothing went wrong, then it should have a skb_shared_tx that we
|
||||
* can turn into a skb_shared_hwtstamps.
|
||||
*/
|
||||
if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS)))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&adapter->tmreg_lock, flags);
|
||||
ns = timecounter_cyc2time(&adapter->tc, regval);
|
||||
@ -660,11 +633,11 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
|
||||
break;
|
||||
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
|
||||
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
|
||||
tsync_rx_mtrl = IXGBE_RXMTRL_V1_SYNC_MSG;
|
||||
tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG;
|
||||
break;
|
||||
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
|
||||
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
|
||||
tsync_rx_mtrl = IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
|
||||
tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
|
||||
break;
|
||||
case HWTSTAMP_FILTER_PTP_V2_EVENT:
|
||||
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
|
||||
@ -698,9 +671,6 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Store filter value for later use */
|
||||
adapter->rx_hwtstamp_filter = config.rx_filter;
|
||||
|
||||
/* define ethertype filter for timestamping L2 packets */
|
||||
if (is_l2)
|
||||
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588),
|
||||
@ -902,11 +872,8 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
|
||||
return;
|
||||
}
|
||||
|
||||
/* initialize the ptp filter */
|
||||
if (ptp_filter_init(ptp_filter, ARRAY_SIZE(ptp_filter)))
|
||||
e_dev_warn("ptp_filter_init failed\n");
|
||||
|
||||
spin_lock_init(&adapter->tmreg_lock);
|
||||
INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work);
|
||||
|
||||
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
|
||||
&adapter->pdev->dev);
|
||||
@ -938,6 +905,12 @@ void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
|
||||
|
||||
ixgbe_ptp_setup_sdp(adapter);
|
||||
|
||||
cancel_work_sync(&adapter->ptp_tx_work);
|
||||
if (adapter->ptp_tx_skb) {
|
||||
dev_kfree_skb_any(adapter->ptp_tx_skb);
|
||||
adapter->ptp_tx_skb = NULL;
|
||||
}
|
||||
|
||||
if (adapter->ptp_clock) {
|
||||
ptp_clock_unregister(adapter->ptp_clock);
|
||||
adapter->ptp_clock = NULL;
|
||||
|
@ -44,50 +44,11 @@
|
||||
#include "ixgbe_sriov.h"
|
||||
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
|
||||
const struct ixgbe_info *ii)
|
||||
static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
int num_vf_macvlans, i;
|
||||
struct vf_macvlans *mv_list;
|
||||
int pre_existing_vfs = 0;
|
||||
|
||||
pre_existing_vfs = pci_num_vf(adapter->pdev);
|
||||
if (!pre_existing_vfs && !adapter->num_vfs)
|
||||
return;
|
||||
|
||||
/* If there are pre-existing VFs then we have to force
|
||||
* use of that many because they were not deleted the last
|
||||
* time someone removed the PF driver. That would have
|
||||
* been because they were allocated to guest VMs and can't
|
||||
* be removed. Go ahead and just re-enable the old amount.
|
||||
* If the user wants to change the number of VFs they can
|
||||
* use ethtool while making sure no VFs are allocated to
|
||||
* guest VMs... i.e. the right way.
|
||||
*/
|
||||
if (pre_existing_vfs) {
|
||||
adapter->num_vfs = pre_existing_vfs;
|
||||
dev_warn(&adapter->pdev->dev, "Virtual Functions already "
|
||||
"enabled for this device - Please reload all "
|
||||
"VF drivers to avoid spoofed packet errors\n");
|
||||
} else {
|
||||
int err;
|
||||
/*
|
||||
* The 82599 supports up to 64 VFs per physical function
|
||||
* but this implementation limits allocation to 63 so that
|
||||
* basic networking resources are still available to the
|
||||
* physical function. If the user requests greater thn
|
||||
* 63 VFs then it is an error - reset to default of zero.
|
||||
*/
|
||||
adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63);
|
||||
|
||||
err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
|
||||
if (err) {
|
||||
e_err(probe, "Failed to enable PCI sriov: %d\n", err);
|
||||
adapter->num_vfs = 0;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
|
||||
e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs);
|
||||
@ -128,12 +89,6 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
|
||||
kcalloc(adapter->num_vfs,
|
||||
sizeof(struct vf_data_storage), GFP_KERNEL);
|
||||
if (adapter->vfinfo) {
|
||||
/* Now that we're sure SR-IOV is enabled
|
||||
* and memory allocated set up the mailbox parameters
|
||||
*/
|
||||
ixgbe_init_mbx_params_pf(hw);
|
||||
memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops));
|
||||
|
||||
/* limit trafffic classes based on VFs enabled */
|
||||
if ((adapter->hw.mac.type == ixgbe_mac_82599EB) &&
|
||||
(adapter->num_vfs < 16)) {
|
||||
@ -157,10 +112,62 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
|
||||
/* enable spoof checking for all VFs */
|
||||
for (i = 0; i < adapter->num_vfs; i++)
|
||||
adapter->vfinfo[i].spoofchk_enabled = true;
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Oh oh */
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Note this function is called when the user wants to enable SR-IOV
|
||||
* VFs using the now deprecated module parameter
|
||||
*/
|
||||
void ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
int pre_existing_vfs = 0;
|
||||
|
||||
pre_existing_vfs = pci_num_vf(adapter->pdev);
|
||||
if (!pre_existing_vfs && !adapter->num_vfs)
|
||||
return;
|
||||
|
||||
if (!pre_existing_vfs)
|
||||
dev_warn(&adapter->pdev->dev,
|
||||
"Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
|
||||
|
||||
/* If there are pre-existing VFs then we have to force
|
||||
* use of that many - over ride any module parameter value.
|
||||
* This may result from the user unloading the PF driver
|
||||
* while VFs were assigned to guest VMs or because the VFs
|
||||
* have been created via the new PCI SR-IOV sysfs interface.
|
||||
*/
|
||||
if (pre_existing_vfs) {
|
||||
adapter->num_vfs = pre_existing_vfs;
|
||||
dev_warn(&adapter->pdev->dev,
|
||||
"Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n");
|
||||
} else {
|
||||
int err;
|
||||
/*
|
||||
* The 82599 supports up to 64 VFs per physical function
|
||||
* but this implementation limits allocation to 63 so that
|
||||
* basic networking resources are still available to the
|
||||
* physical function. If the user requests greater thn
|
||||
* 63 VFs then it is an error - reset to default of zero.
|
||||
*/
|
||||
adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, 63);
|
||||
|
||||
err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
|
||||
if (err) {
|
||||
e_err(probe, "Failed to enable PCI sriov: %d\n", err);
|
||||
adapter->num_vfs = 0;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (!__ixgbe_enable_sriov(adapter))
|
||||
return;
|
||||
|
||||
/* If we have gotten to this point then there is no memory available
|
||||
* to manage the VF devices - print message and bail.
|
||||
*/
|
||||
e_err(probe, "Unable to allocate memory for VF Data Storage - "
|
||||
"SRIOV disabled\n");
|
||||
ixgbe_disable_sriov(adapter);
|
||||
@ -200,11 +207,12 @@ static bool ixgbe_vfs_are_assigned(struct ixgbe_adapter *adapter)
|
||||
}
|
||||
|
||||
#endif /* #ifdef CONFIG_PCI_IOV */
|
||||
void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
|
||||
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
|
||||
{
|
||||
struct ixgbe_hw *hw = &adapter->hw;
|
||||
u32 gpie;
|
||||
u32 vmdctl;
|
||||
int rss;
|
||||
|
||||
/* set num VFs to 0 to prevent access to vfinfo */
|
||||
adapter->num_vfs = 0;
|
||||
@ -219,7 +227,7 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
|
||||
|
||||
/* if SR-IOV is already disabled then there is nothing to do */
|
||||
if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
/*
|
||||
@ -229,7 +237,7 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
|
||||
*/
|
||||
if (ixgbe_vfs_are_assigned(adapter)) {
|
||||
e_dev_warn("Unloading driver while VFs are assigned - VFs will not be deallocated\n");
|
||||
return;
|
||||
return -EPERM;
|
||||
}
|
||||
/* disable iov and allow time for transactions to clear */
|
||||
pci_disable_sriov(adapter->pdev);
|
||||
@ -252,10 +260,94 @@ void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
|
||||
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
|
||||
adapter->ring_feature[RING_F_VMDQ].offset = 0;
|
||||
|
||||
rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
|
||||
adapter->ring_feature[RING_F_RSS].limit = rss;
|
||||
|
||||
/* take a breather then clean up driver data */
|
||||
msleep(100);
|
||||
|
||||
adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
|
||||
{
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
|
||||
int err = 0;
|
||||
int i;
|
||||
int pre_existing_vfs = pci_num_vf(dev);
|
||||
|
||||
if (pre_existing_vfs && pre_existing_vfs != num_vfs)
|
||||
err = ixgbe_disable_sriov(adapter);
|
||||
else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
|
||||
goto out;
|
||||
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
/* While the SR-IOV capability structure reports total VFs to be
|
||||
* 64 we limit the actual number that can be allocated to 63 so
|
||||
* that some transmit/receive resources can be reserved to the
|
||||
* PF. The PCI bus driver already checks for other values out of
|
||||
* range.
|
||||
*/
|
||||
if (num_vfs > 63) {
|
||||
err = -EPERM;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
adapter->num_vfs = num_vfs;
|
||||
|
||||
err = __ixgbe_enable_sriov(adapter);
|
||||
if (err)
|
||||
goto err_out;
|
||||
|
||||
for (i = 0; i < adapter->num_vfs; i++)
|
||||
ixgbe_vf_configuration(dev, (i | 0x10000000));
|
||||
|
||||
err = pci_enable_sriov(dev, num_vfs);
|
||||
if (err) {
|
||||
e_dev_warn("Failed to enable PCI sriov: %d\n", err);
|
||||
goto err_out;
|
||||
}
|
||||
ixgbe_sriov_reinit(adapter);
|
||||
|
||||
out:
|
||||
return num_vfs;
|
||||
|
||||
err_out:
|
||||
return err;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
|
||||
int err;
|
||||
u32 current_flags = adapter->flags;
|
||||
|
||||
err = ixgbe_disable_sriov(adapter);
|
||||
|
||||
/* Only reinit if no error and state changed */
|
||||
if (!err && current_flags != adapter->flags) {
|
||||
/* ixgbe_disable_sriov() doesn't clear VMDQ flag */
|
||||
adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED;
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
ixgbe_sriov_reinit(adapter);
|
||||
#endif
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
|
||||
{
|
||||
if (num_vfs == 0)
|
||||
return ixgbe_pci_sriov_disable(dev);
|
||||
else
|
||||
return ixgbe_pci_sriov_enable(dev, num_vfs);
|
||||
}
|
||||
|
||||
static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
|
||||
|
@ -41,11 +41,11 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
|
||||
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
|
||||
int vf, struct ifla_vf_info *ivi);
|
||||
void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter);
|
||||
void ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
|
||||
int ixgbe_disable_sriov(struct ixgbe_adapter *adapter);
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
void ixgbe_enable_sriov(struct ixgbe_adapter *adapter,
|
||||
const struct ixgbe_info *ii);
|
||||
void ixgbe_enable_sriov(struct ixgbe_adapter *adapter);
|
||||
#endif
|
||||
int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
|
||||
|
||||
static inline void ixgbe_set_vmvir(struct ixgbe_adapter *adapter,
|
||||
u16 vid, u16 qos, u32 vf)
|
||||
|
@ -2245,10 +2245,23 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
|
||||
|
||||
if (link_up) {
|
||||
if (!netif_carrier_ok(netdev)) {
|
||||
char *link_speed_string;
|
||||
switch (link_speed) {
|
||||
case IXGBE_LINK_SPEED_10GB_FULL:
|
||||
link_speed_string = "10 Gbps";
|
||||
break;
|
||||
case IXGBE_LINK_SPEED_1GB_FULL:
|
||||
link_speed_string = "1 Gbps";
|
||||
break;
|
||||
case IXGBE_LINK_SPEED_100_FULL:
|
||||
link_speed_string = "100 Mbps";
|
||||
break;
|
||||
default:
|
||||
link_speed_string = "unknown speed";
|
||||
break;
|
||||
}
|
||||
dev_info(&adapter->pdev->dev,
|
||||
"NIC Link is Up, %u Gbps\n",
|
||||
(link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
|
||||
10 : 1);
|
||||
"NIC Link is Up, %s\n", link_speed_string);
|
||||
netif_carrier_on(netdev);
|
||||
netif_tx_wake_all_queues(netdev);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user