2013-09-11 08:39:56 +00:00
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Driver
2013-12-18 13:45:51 +00:00
* Copyright ( c ) 2013 - 2014 Intel Corporation .
2013-09-11 08:39:56 +00:00
*
* This program is free software ; you can redistribute it and / or modify it
* under the terms and conditions of the GNU General Public License ,
* version 2 , as published by the Free Software Foundation .
*
* This program is distributed in the hope it will be useful , but WITHOUT
* ANY WARRANTY ; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE . See the GNU General Public License for
* more details .
*
2013-12-18 13:45:51 +00:00
* You should have received a copy of the GNU General Public License along
* with this program . If not , see < http : //www.gnu.org/licenses/>.
2013-09-11 08:39:56 +00:00
*
* The full GNU General Public License is included in this distribution in
* the file called " COPYING " .
*
* Contact Information :
* e1000 - devel Mailing List < e1000 - devel @ lists . sourceforge . net >
* Intel Corporation , 5200 N . E . Elam Young Parkway , Hillsboro , OR 97124 - 6497
*
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* ethtool support for i40e */
# include "i40e.h"
# include "i40e_diag.h"
struct i40e_stats {
char stat_string [ ETH_GSTRING_LEN ] ;
int sizeof_stat ;
int stat_offset ;
} ;
# define I40E_STAT(_type, _name, _stat) { \
. stat_string = _name , \
. sizeof_stat = FIELD_SIZEOF ( _type , _stat ) , \
. stat_offset = offsetof ( _type , _stat ) \
}
2014-11-11 20:07:27 +00:00
2013-09-11 08:39:56 +00:00
# define I40E_NETDEV_STAT(_net_stat) \
2014-11-11 20:07:27 +00:00
I40E_STAT ( struct rtnl_link_stats64 , # _net_stat , _net_stat )
2013-09-11 08:39:56 +00:00
# define I40E_PF_STAT(_name, _stat) \
I40E_STAT ( struct i40e_pf , _name , _stat )
# define I40E_VSI_STAT(_name, _stat) \
I40E_STAT ( struct i40e_vsi , _name , _stat )
2014-04-23 04:49:55 +00:00
# define I40E_VEB_STAT(_name, _stat) \
I40E_STAT ( struct i40e_veb , _name , _stat )
2013-09-11 08:39:56 +00:00
static const struct i40e_stats i40e_gstrings_net_stats [ ] = {
I40E_NETDEV_STAT ( rx_packets ) ,
I40E_NETDEV_STAT ( tx_packets ) ,
I40E_NETDEV_STAT ( rx_bytes ) ,
I40E_NETDEV_STAT ( tx_bytes ) ,
I40E_NETDEV_STAT ( rx_errors ) ,
I40E_NETDEV_STAT ( tx_errors ) ,
I40E_NETDEV_STAT ( rx_dropped ) ,
I40E_NETDEV_STAT ( tx_dropped ) ,
I40E_NETDEV_STAT ( collisions ) ,
I40E_NETDEV_STAT ( rx_length_errors ) ,
I40E_NETDEV_STAT ( rx_crc_errors ) ,
} ;
2014-04-23 04:49:55 +00:00
static const struct i40e_stats i40e_gstrings_veb_stats [ ] = {
I40E_VEB_STAT ( " rx_bytes " , stats . rx_bytes ) ,
I40E_VEB_STAT ( " tx_bytes " , stats . tx_bytes ) ,
I40E_VEB_STAT ( " rx_unicast " , stats . rx_unicast ) ,
I40E_VEB_STAT ( " tx_unicast " , stats . tx_unicast ) ,
I40E_VEB_STAT ( " rx_multicast " , stats . rx_multicast ) ,
I40E_VEB_STAT ( " tx_multicast " , stats . tx_multicast ) ,
I40E_VEB_STAT ( " rx_broadcast " , stats . rx_broadcast ) ,
I40E_VEB_STAT ( " tx_broadcast " , stats . tx_broadcast ) ,
I40E_VEB_STAT ( " rx_discards " , stats . rx_discards ) ,
I40E_VEB_STAT ( " tx_discards " , stats . tx_discards ) ,
I40E_VEB_STAT ( " tx_errors " , stats . tx_errors ) ,
I40E_VEB_STAT ( " rx_unknown_protocol " , stats . rx_unknown_protocol ) ,
} ;
2014-04-23 04:50:20 +00:00
static const struct i40e_stats i40e_gstrings_misc_stats [ ] = {
2014-04-23 04:50:07 +00:00
I40E_VSI_STAT ( " rx_unicast " , eth_stats . rx_unicast ) ,
I40E_VSI_STAT ( " tx_unicast " , eth_stats . tx_unicast ) ,
I40E_VSI_STAT ( " rx_multicast " , eth_stats . rx_multicast ) ,
I40E_VSI_STAT ( " tx_multicast " , eth_stats . tx_multicast ) ,
2014-04-23 04:50:20 +00:00
I40E_VSI_STAT ( " rx_broadcast " , eth_stats . rx_broadcast ) ,
I40E_VSI_STAT ( " tx_broadcast " , eth_stats . tx_broadcast ) ,
2014-04-23 04:50:07 +00:00
I40E_VSI_STAT ( " rx_unknown_protocol " , eth_stats . rx_unknown_protocol ) ,
2014-04-23 04:50:20 +00:00
} ;
2014-03-06 08:59:59 +00:00
static int i40e_add_fdir_ethtool ( struct i40e_vsi * vsi ,
struct ethtool_rxnfc * cmd ) ;
2014-02-12 01:45:30 +00:00
2013-09-11 08:39:56 +00:00
/* These PF_STATs might look like duplicates of some NETDEV_STATs,
* but they are separate . This device supports Virtualization , and
* as such might have several netdevs supporting VMDq and FCoE going
* through a single port . The NETDEV_STATs are for individual netdevs
* seen at the top of the stack , and the PF_STATs are for the physical
* function at the bottom of the stack hosting those netdevs .
*
* The PF_STATs are appended to the netdev stats only when ethtool - S
* is queried on the base PF netdev , not on the VMDq or FCoE netdev .
*/
static struct i40e_stats i40e_gstrings_stats [ ] = {
I40E_PF_STAT ( " rx_bytes " , stats . eth . rx_bytes ) ,
I40E_PF_STAT ( " tx_bytes " , stats . eth . tx_bytes ) ,
2014-04-23 04:50:09 +00:00
I40E_PF_STAT ( " rx_unicast " , stats . eth . rx_unicast ) ,
I40E_PF_STAT ( " tx_unicast " , stats . eth . tx_unicast ) ,
I40E_PF_STAT ( " rx_multicast " , stats . eth . rx_multicast ) ,
I40E_PF_STAT ( " tx_multicast " , stats . eth . tx_multicast ) ,
I40E_PF_STAT ( " rx_broadcast " , stats . eth . rx_broadcast ) ,
I40E_PF_STAT ( " tx_broadcast " , stats . eth . tx_broadcast ) ,
2013-09-11 08:39:56 +00:00
I40E_PF_STAT ( " tx_errors " , stats . eth . tx_errors ) ,
I40E_PF_STAT ( " rx_dropped " , stats . eth . rx_discards ) ,
I40E_PF_STAT ( " tx_dropped_link_down " , stats . tx_dropped_link_down ) ,
I40E_PF_STAT ( " crc_errors " , stats . crc_errors ) ,
I40E_PF_STAT ( " illegal_bytes " , stats . illegal_bytes ) ,
I40E_PF_STAT ( " mac_local_faults " , stats . mac_local_faults ) ,
I40E_PF_STAT ( " mac_remote_faults " , stats . mac_remote_faults ) ,
2014-02-06 05:51:09 +00:00
I40E_PF_STAT ( " tx_timeout " , tx_timeout_count ) ,
2014-05-20 08:01:43 +00:00
I40E_PF_STAT ( " rx_csum_bad " , hw_csum_rx_error ) ,
2013-09-11 08:39:56 +00:00
I40E_PF_STAT ( " rx_length_errors " , stats . rx_length_errors ) ,
I40E_PF_STAT ( " link_xon_rx " , stats . link_xon_rx ) ,
I40E_PF_STAT ( " link_xoff_rx " , stats . link_xoff_rx ) ,
I40E_PF_STAT ( " link_xon_tx " , stats . link_xon_tx ) ,
I40E_PF_STAT ( " link_xoff_tx " , stats . link_xoff_tx ) ,
I40E_PF_STAT ( " rx_size_64 " , stats . rx_size_64 ) ,
I40E_PF_STAT ( " rx_size_127 " , stats . rx_size_127 ) ,
I40E_PF_STAT ( " rx_size_255 " , stats . rx_size_255 ) ,
I40E_PF_STAT ( " rx_size_511 " , stats . rx_size_511 ) ,
I40E_PF_STAT ( " rx_size_1023 " , stats . rx_size_1023 ) ,
I40E_PF_STAT ( " rx_size_1522 " , stats . rx_size_1522 ) ,
I40E_PF_STAT ( " rx_size_big " , stats . rx_size_big ) ,
I40E_PF_STAT ( " tx_size_64 " , stats . tx_size_64 ) ,
I40E_PF_STAT ( " tx_size_127 " , stats . tx_size_127 ) ,
I40E_PF_STAT ( " tx_size_255 " , stats . tx_size_255 ) ,
I40E_PF_STAT ( " tx_size_511 " , stats . tx_size_511 ) ,
I40E_PF_STAT ( " tx_size_1023 " , stats . tx_size_1023 ) ,
I40E_PF_STAT ( " tx_size_1522 " , stats . tx_size_1522 ) ,
I40E_PF_STAT ( " tx_size_big " , stats . tx_size_big ) ,
I40E_PF_STAT ( " rx_undersize " , stats . rx_undersize ) ,
I40E_PF_STAT ( " rx_fragments " , stats . rx_fragments ) ,
I40E_PF_STAT ( " rx_oversize " , stats . rx_oversize ) ,
I40E_PF_STAT ( " rx_jabber " , stats . rx_jabber ) ,
I40E_PF_STAT ( " VF_admin_queue_requests " , vf_aq_requests ) ,
2014-01-11 05:43:19 +00:00
I40E_PF_STAT ( " rx_hwtstamp_cleared " , rx_hwtstamp_cleared ) ,
2014-07-09 07:46:23 +00:00
I40E_PF_STAT ( " fdir_flush_cnt " , fd_flush_cnt ) ,
2014-05-22 06:32:17 +00:00
I40E_PF_STAT ( " fdir_atr_match " , stats . fd_atr_match ) ,
I40E_PF_STAT ( " fdir_sb_match " , stats . fd_sb_match ) ,
2014-03-06 08:59:50 +00:00
/* LPI stats */
I40E_PF_STAT ( " tx_lpi_status " , stats . tx_lpi_status ) ,
I40E_PF_STAT ( " rx_lpi_status " , stats . rx_lpi_status ) ,
I40E_PF_STAT ( " tx_lpi_count " , stats . tx_lpi_count ) ,
I40E_PF_STAT ( " rx_lpi_count " , stats . rx_lpi_count ) ,
2013-09-11 08:39:56 +00:00
} ;
2014-08-01 20:27:03 +00:00
# ifdef I40E_FCOE
static const struct i40e_stats i40e_gstrings_fcoe_stats [ ] = {
I40E_VSI_STAT ( " fcoe_bad_fccrc " , fcoe_stats . fcoe_bad_fccrc ) ,
I40E_VSI_STAT ( " rx_fcoe_dropped " , fcoe_stats . rx_fcoe_dropped ) ,
I40E_VSI_STAT ( " rx_fcoe_packets " , fcoe_stats . rx_fcoe_packets ) ,
I40E_VSI_STAT ( " rx_fcoe_dwords " , fcoe_stats . rx_fcoe_dwords ) ,
I40E_VSI_STAT ( " fcoe_ddp_count " , fcoe_stats . fcoe_ddp_count ) ,
I40E_VSI_STAT ( " fcoe_last_error " , fcoe_stats . fcoe_last_error ) ,
I40E_VSI_STAT ( " tx_fcoe_packets " , fcoe_stats . tx_fcoe_packets ) ,
I40E_VSI_STAT ( " tx_fcoe_dwords " , fcoe_stats . tx_fcoe_dwords ) ,
} ;
# endif /* I40E_FCOE */
2013-09-11 08:39:56 +00:00
# define I40E_QUEUE_STATS_LEN(n) \
2014-04-04 04:43:12 +00:00
( ( ( struct i40e_netdev_priv * ) netdev_priv ( ( n ) ) ) - > vsi - > num_queue_pairs \
* 2 /* Tx and Rx together */ \
* ( sizeof ( struct i40e_queue_stats ) / sizeof ( u64 ) ) )
2013-09-11 08:39:56 +00:00
# define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
# define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
2014-04-23 04:50:20 +00:00
# define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats)
2014-08-01 20:27:03 +00:00
# ifdef I40E_FCOE
# define I40E_FCOE_STATS_LEN ARRAY_SIZE(i40e_gstrings_fcoe_stats)
# define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
I40E_FCOE_STATS_LEN + \
I40E_MISC_STATS_LEN + \
I40E_QUEUE_STATS_LEN ( ( n ) ) )
# else
2013-09-11 08:39:56 +00:00
# define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
2014-04-23 04:50:20 +00:00
I40E_MISC_STATS_LEN + \
2013-09-11 08:39:56 +00:00
I40E_QUEUE_STATS_LEN ( ( n ) ) )
2014-08-01 20:27:03 +00:00
# endif /* I40E_FCOE */
2013-09-11 08:39:56 +00:00
# define I40E_PFC_STATS_LEN ( \
( FIELD_SIZEOF ( struct i40e_pf , stats . priority_xoff_rx ) + \
FIELD_SIZEOF ( struct i40e_pf , stats . priority_xon_rx ) + \
FIELD_SIZEOF ( struct i40e_pf , stats . priority_xoff_tx ) + \
FIELD_SIZEOF ( struct i40e_pf , stats . priority_xon_tx ) + \
FIELD_SIZEOF ( struct i40e_pf , stats . priority_xon_2_xoff ) ) \
/ sizeof ( u64 ) )
2014-04-23 04:49:55 +00:00
# define I40E_VEB_STATS_LEN ARRAY_SIZE(i40e_gstrings_veb_stats)
2013-09-11 08:39:56 +00:00
# define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \
I40E_PFC_STATS_LEN + \
I40E_VSI_STATS_LEN ( ( n ) ) )
enum i40e_ethtool_test_id {
I40E_ETH_TEST_REG = 0 ,
I40E_ETH_TEST_EEPROM ,
I40E_ETH_TEST_INTR ,
I40E_ETH_TEST_LOOPBACK ,
I40E_ETH_TEST_LINK ,
} ;
static const char i40e_gstrings_test [ ] [ ETH_GSTRING_LEN ] = {
" Register test (offline) " ,
" Eeprom test (offline) " ,
" Interrupt test (offline) " ,
" Loopback test (offline) " ,
" Link test (on/offline) "
} ;
# define I40E_TEST_LEN (sizeof(i40e_gstrings_test) / ETH_GSTRING_LEN)
2014-12-11 07:06:32 +00:00
/**
* i40e_partition_setting_complaint - generic complaint for MFP restriction
* @ pf : the PF struct
* */
static void i40e_partition_setting_complaint ( struct i40e_pf * pf )
{
dev_info ( & pf - > pdev - > dev ,
" The link settings are allowed to be changed only from the first partition of a given port. Please switch to the first partition in order to change the setting. \n " ) ;
}
2013-09-11 08:39:56 +00:00
/**
* i40e_get_settings - Get Link Speed and Duplex settings
* @ netdev : network interface device structure
* @ ecmd : ethtool command
*
* Reports speed / duplex settings based on media_type
* */
static int i40e_get_settings ( struct net_device * netdev ,
struct ethtool_cmd * ecmd )
{
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_pf * pf = np - > vsi - > back ;
struct i40e_hw * hw = & pf - > hw ;
struct i40e_link_status * hw_link_info = & hw - > phy . link_info ;
bool link_up = hw_link_info - > link_info & I40E_AQ_LINK_UP ;
u32 link_speed = hw_link_info - > link_speed ;
/* hardware is either in 40G mode or 10G mode
* NOTE : this section initializes supported and advertising
*/
2014-06-04 08:45:23 +00:00
if ( ! link_up ) {
/* link is down and the driver needs to fall back on
* device ID to determine what kinds of info to display ,
* it ' s mostly a guess that may change when link is up
*/
switch ( hw - > device_id ) {
case I40E_DEV_ID_QSFP_A :
case I40E_DEV_ID_QSFP_B :
case I40E_DEV_ID_QSFP_C :
/* pluggable QSFP */
ecmd - > supported = SUPPORTED_40000baseSR4_Full |
SUPPORTED_40000baseCR4_Full |
SUPPORTED_40000baseLR4_Full ;
ecmd - > advertising = ADVERTISED_40000baseSR4_Full |
ADVERTISED_40000baseCR4_Full |
ADVERTISED_40000baseLR4_Full ;
break ;
case I40E_DEV_ID_KX_B :
/* backplane 40G */
ecmd - > supported = SUPPORTED_40000baseKR4_Full ;
ecmd - > advertising = ADVERTISED_40000baseKR4_Full ;
break ;
case I40E_DEV_ID_KX_C :
/* backplane 10G */
ecmd - > supported = SUPPORTED_10000baseKR_Full ;
ecmd - > advertising = ADVERTISED_10000baseKR_Full ;
break ;
2014-09-13 07:40:47 +00:00
case I40E_DEV_ID_10G_BASE_T :
ecmd - > supported = SUPPORTED_10000baseT_Full |
SUPPORTED_1000baseT_Full |
SUPPORTED_100baseT_Full ;
ecmd - > advertising = ADVERTISED_10000baseT_Full |
ADVERTISED_1000baseT_Full |
ADVERTISED_100baseT_Full ;
break ;
2014-06-04 08:45:23 +00:00
default :
/* all the rest are 10G/1G */
ecmd - > supported = SUPPORTED_10000baseT_Full |
SUPPORTED_1000baseT_Full ;
ecmd - > advertising = ADVERTISED_10000baseT_Full |
ADVERTISED_1000baseT_Full ;
break ;
}
/* skip phy_type use as it is zero when link is down */
goto no_valid_phy_type ;
}
2013-09-11 08:39:56 +00:00
switch ( hw_link_info - > phy_type ) {
case I40E_PHY_TYPE_40GBASE_CR4 :
case I40E_PHY_TYPE_40GBASE_CR4_CU :
2014-06-04 08:45:23 +00:00
ecmd - > supported = SUPPORTED_Autoneg |
SUPPORTED_40000baseCR4_Full ;
ecmd - > advertising = ADVERTISED_Autoneg |
ADVERTISED_40000baseCR4_Full ;
2013-09-11 08:39:56 +00:00
break ;
case I40E_PHY_TYPE_40GBASE_KR4 :
2014-06-04 08:45:23 +00:00
ecmd - > supported = SUPPORTED_Autoneg |
SUPPORTED_40000baseKR4_Full ;
ecmd - > advertising = ADVERTISED_Autoneg |
ADVERTISED_40000baseKR4_Full ;
2013-09-11 08:39:56 +00:00
break ;
case I40E_PHY_TYPE_40GBASE_SR4 :
2014-06-04 08:45:23 +00:00
case I40E_PHY_TYPE_XLPPI :
case I40E_PHY_TYPE_XLAUI :
2013-09-11 08:39:56 +00:00
ecmd - > supported = SUPPORTED_40000baseSR4_Full ;
break ;
case I40E_PHY_TYPE_40GBASE_LR4 :
ecmd - > supported = SUPPORTED_40000baseLR4_Full ;
break ;
case I40E_PHY_TYPE_10GBASE_KX4 :
2014-06-04 08:45:23 +00:00
ecmd - > supported = SUPPORTED_Autoneg |
SUPPORTED_10000baseKX4_Full ;
ecmd - > advertising = ADVERTISED_Autoneg |
ADVERTISED_10000baseKX4_Full ;
2013-09-11 08:39:56 +00:00
break ;
case I40E_PHY_TYPE_10GBASE_KR :
2014-06-04 08:45:23 +00:00
ecmd - > supported = SUPPORTED_Autoneg |
SUPPORTED_10000baseKR_Full ;
ecmd - > advertising = ADVERTISED_Autoneg |
ADVERTISED_10000baseKR_Full ;
2013-09-11 08:39:56 +00:00
break ;
2014-06-04 08:45:23 +00:00
case I40E_PHY_TYPE_10GBASE_SR :
case I40E_PHY_TYPE_10GBASE_LR :
2014-07-12 07:28:12 +00:00
case I40E_PHY_TYPE_1000BASE_SX :
case I40E_PHY_TYPE_1000BASE_LX :
2014-06-04 08:45:23 +00:00
ecmd - > supported = SUPPORTED_10000baseT_Full ;
2014-07-12 07:28:12 +00:00
ecmd - > supported | = SUPPORTED_1000baseT_Full ;
2014-06-04 08:45:23 +00:00
break ;
case I40E_PHY_TYPE_10GBASE_CR1_CU :
case I40E_PHY_TYPE_10GBASE_CR1 :
case I40E_PHY_TYPE_10GBASE_T :
ecmd - > supported = SUPPORTED_Autoneg |
2014-09-13 07:40:47 +00:00
SUPPORTED_10000baseT_Full |
SUPPORTED_1000baseT_Full |
SUPPORTED_100baseT_Full ;
2014-06-04 08:45:23 +00:00
ecmd - > advertising = ADVERTISED_Autoneg |
2014-09-13 07:40:47 +00:00
ADVERTISED_10000baseT_Full |
ADVERTISED_1000baseT_Full |
ADVERTISED_100baseT_Full ;
2014-06-04 08:45:23 +00:00
break ;
case I40E_PHY_TYPE_XAUI :
case I40E_PHY_TYPE_XFI :
case I40E_PHY_TYPE_SFI :
case I40E_PHY_TYPE_10GBASE_SFPP_CU :
ecmd - > supported = SUPPORTED_10000baseT_Full ;
break ;
case I40E_PHY_TYPE_1000BASE_KX :
case I40E_PHY_TYPE_1000BASE_T :
ecmd - > supported = SUPPORTED_Autoneg |
2014-09-13 07:40:47 +00:00
SUPPORTED_10000baseT_Full |
SUPPORTED_1000baseT_Full |
SUPPORTED_100baseT_Full ;
2014-06-04 08:45:23 +00:00
ecmd - > advertising = ADVERTISED_Autoneg |
2014-09-13 07:40:47 +00:00
ADVERTISED_10000baseT_Full |
ADVERTISED_1000baseT_Full |
ADVERTISED_100baseT_Full ;
2013-09-11 08:39:56 +00:00
break ;
2014-06-04 08:45:23 +00:00
case I40E_PHY_TYPE_100BASE_TX :
ecmd - > supported = SUPPORTED_Autoneg |
2014-09-13 07:40:47 +00:00
SUPPORTED_10000baseT_Full |
SUPPORTED_1000baseT_Full |
2014-06-04 08:45:23 +00:00
SUPPORTED_100baseT_Full ;
ecmd - > advertising = ADVERTISED_Autoneg |
2014-09-13 07:40:47 +00:00
ADVERTISED_10000baseT_Full |
ADVERTISED_1000baseT_Full |
2014-06-04 08:45:23 +00:00
ADVERTISED_100baseT_Full ;
break ;
case I40E_PHY_TYPE_SGMII :
ecmd - > supported = SUPPORTED_Autoneg |
SUPPORTED_1000baseT_Full |
SUPPORTED_100baseT_Full ;
ecmd - > advertising = ADVERTISED_Autoneg |
ADVERTISED_1000baseT_Full |
ADVERTISED_100baseT_Full ;
break ;
default :
/* if we got here and link is up something bad is afoot */
2014-07-12 07:28:12 +00:00
netdev_info ( netdev , " WARNING: Link is up but PHY type 0x%x is not recognized. \n " ,
hw_link_info - > phy_type ) ;
2013-09-11 08:39:56 +00:00
}
2014-06-04 08:45:23 +00:00
no_valid_phy_type :
/* this is if autoneg is enabled or disabled */
2013-11-26 10:49:10 +00:00
ecmd - > autoneg = ( ( hw_link_info - > an_info & I40E_AQ_AN_COMPLETED ) ?
AUTONEG_ENABLE : AUTONEG_DISABLE ) ;
2013-09-11 08:39:56 +00:00
2013-11-26 10:49:10 +00:00
switch ( hw - > phy . media_type ) {
case I40E_MEDIA_TYPE_BACKPLANE :
2014-06-04 08:45:23 +00:00
ecmd - > supported | = SUPPORTED_Autoneg |
SUPPORTED_Backplane ;
ecmd - > advertising | = ADVERTISED_Autoneg |
ADVERTISED_Backplane ;
2013-09-11 08:39:56 +00:00
ecmd - > port = PORT_NONE ;
2013-11-26 10:49:10 +00:00
break ;
case I40E_MEDIA_TYPE_BASET :
2013-09-11 08:39:56 +00:00
ecmd - > supported | = SUPPORTED_TP ;
ecmd - > advertising | = ADVERTISED_TP ;
ecmd - > port = PORT_TP ;
2013-11-26 10:49:10 +00:00
break ;
case I40E_MEDIA_TYPE_DA :
case I40E_MEDIA_TYPE_CX4 :
2013-11-20 10:02:50 +00:00
ecmd - > supported | = SUPPORTED_FIBRE ;
ecmd - > advertising | = ADVERTISED_FIBRE ;
ecmd - > port = PORT_DA ;
2013-11-26 10:49:10 +00:00
break ;
case I40E_MEDIA_TYPE_FIBER :
2013-09-11 08:39:56 +00:00
ecmd - > supported | = SUPPORTED_FIBRE ;
ecmd - > port = PORT_FIBRE ;
2013-11-26 10:49:10 +00:00
break ;
case I40E_MEDIA_TYPE_UNKNOWN :
default :
ecmd - > port = PORT_OTHER ;
break ;
2013-09-11 08:39:56 +00:00
}
ecmd - > transceiver = XCVR_EXTERNAL ;
2014-06-04 08:45:23 +00:00
ecmd - > supported | = SUPPORTED_Pause ;
switch ( hw - > fc . current_mode ) {
case I40E_FC_FULL :
ecmd - > advertising | = ADVERTISED_Pause ;
break ;
case I40E_FC_TX_PAUSE :
ecmd - > advertising | = ADVERTISED_Asym_Pause ;
break ;
case I40E_FC_RX_PAUSE :
ecmd - > advertising | = ( ADVERTISED_Pause |
ADVERTISED_Asym_Pause ) ;
break ;
default :
ecmd - > advertising & = ~ ( ADVERTISED_Pause |
ADVERTISED_Asym_Pause ) ;
break ;
}
2013-09-11 08:39:56 +00:00
if ( link_up ) {
switch ( link_speed ) {
case I40E_LINK_SPEED_40GB :
/* need a SPEED_40000 in ethtool.h */
ethtool_cmd_speed_set ( ecmd , 40000 ) ;
break ;
case I40E_LINK_SPEED_10GB :
ethtool_cmd_speed_set ( ecmd , SPEED_10000 ) ;
break ;
2014-06-04 08:45:23 +00:00
case I40E_LINK_SPEED_1GB :
ethtool_cmd_speed_set ( ecmd , SPEED_1000 ) ;
break ;
2014-09-13 07:40:47 +00:00
case I40E_LINK_SPEED_100MB :
ethtool_cmd_speed_set ( ecmd , SPEED_100 ) ;
break ;
2013-09-11 08:39:56 +00:00
default :
break ;
}
ecmd - > duplex = DUPLEX_FULL ;
} else {
ethtool_cmd_speed_set ( ecmd , SPEED_UNKNOWN ) ;
ecmd - > duplex = DUPLEX_UNKNOWN ;
}
return 0 ;
}
2014-06-04 08:45:28 +00:00
/**
* i40e_set_settings - Set Speed and Duplex
* @ netdev : network interface device structure
* @ ecmd : ethtool command
*
* Set speed / duplex per media_types advertised / forced
* */
static int i40e_set_settings ( struct net_device * netdev ,
struct ethtool_cmd * ecmd )
{
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_aq_get_phy_abilities_resp abilities ;
struct i40e_aq_set_phy_config config ;
struct i40e_pf * pf = np - > vsi - > back ;
struct i40e_vsi * vsi = np - > vsi ;
struct i40e_hw * hw = & pf - > hw ;
struct ethtool_cmd safe_ecmd ;
i40e_status status = 0 ;
bool change = false ;
int err = 0 ;
u8 autoneg ;
u32 advertise ;
2014-12-11 07:06:32 +00:00
/* Changing port settings is not supported if this isn't the
* port ' s controlling PF
*/
if ( hw - > partition_id ! = 1 ) {
i40e_partition_setting_complaint ( pf ) ;
return - EOPNOTSUPP ;
}
2014-06-04 08:45:28 +00:00
if ( vsi ! = pf - > vsi [ pf - > lan_vsi ] )
return - EOPNOTSUPP ;
if ( hw - > phy . media_type ! = I40E_MEDIA_TYPE_BASET & &
hw - > phy . media_type ! = I40E_MEDIA_TYPE_FIBER & &
2014-07-12 07:28:13 +00:00
hw - > phy . media_type ! = I40E_MEDIA_TYPE_BACKPLANE & &
hw - > phy . link_info . link_info & I40E_AQ_LINK_UP )
2014-06-04 08:45:28 +00:00
return - EOPNOTSUPP ;
/* get our own copy of the bits to check against */
memset ( & safe_ecmd , 0 , sizeof ( struct ethtool_cmd ) ) ;
i40e_get_settings ( netdev , & safe_ecmd ) ;
/* save autoneg and speed out of ecmd */
autoneg = ecmd - > autoneg ;
advertise = ecmd - > advertising ;
/* set autoneg and speed back to what they currently are */
ecmd - > autoneg = safe_ecmd . autoneg ;
ecmd - > advertising = safe_ecmd . advertising ;
ecmd - > cmd = safe_ecmd . cmd ;
/* If ecmd and safe_ecmd are not the same now, then they are
* trying to set something that we do not support
*/
if ( memcmp ( ecmd , & safe_ecmd , sizeof ( struct ethtool_cmd ) ) )
return - EOPNOTSUPP ;
while ( test_bit ( __I40E_CONFIG_BUSY , & vsi - > state ) )
usleep_range ( 1000 , 2000 ) ;
/* Get the current phy config */
status = i40e_aq_get_phy_capabilities ( hw , false , false , & abilities ,
NULL ) ;
if ( status )
return - EAGAIN ;
2014-07-12 07:28:12 +00:00
/* Copy abilities to config in case autoneg is not
2014-06-04 08:45:28 +00:00
* set below
*/
memset ( & config , 0 , sizeof ( struct i40e_aq_set_phy_config ) ) ;
config . abilities = abilities . abilities ;
/* Check autoneg */
if ( autoneg = = AUTONEG_ENABLE ) {
/* If autoneg is not supported, return error */
if ( ! ( safe_ecmd . supported & SUPPORTED_Autoneg ) ) {
netdev_info ( netdev , " Autoneg not supported on this phy \n " ) ;
return - EINVAL ;
}
/* If autoneg was not already enabled */
if ( ! ( hw - > phy . link_info . an_info & I40E_AQ_AN_COMPLETED ) ) {
config . abilities = abilities . abilities |
I40E_AQ_PHY_ENABLE_AN ;
change = true ;
}
} else {
/* If autoneg is supported 10GBASE_T is the only phy that
* can disable it , so otherwise return error
*/
if ( safe_ecmd . supported & SUPPORTED_Autoneg & &
hw - > phy . link_info . phy_type ! = I40E_PHY_TYPE_10GBASE_T ) {
netdev_info ( netdev , " Autoneg cannot be disabled on this phy \n " ) ;
return - EINVAL ;
}
/* If autoneg is currently enabled */
if ( hw - > phy . link_info . an_info & I40E_AQ_AN_COMPLETED ) {
2014-09-13 07:40:47 +00:00
config . abilities = abilities . abilities &
2014-06-04 08:45:28 +00:00
~ I40E_AQ_PHY_ENABLE_AN ;
change = true ;
}
}
if ( advertise & ~ safe_ecmd . supported )
return - EINVAL ;
if ( advertise & ADVERTISED_100baseT_Full )
2014-07-12 07:28:12 +00:00
config . link_speed | = I40E_LINK_SPEED_100MB ;
2014-06-04 08:45:28 +00:00
if ( advertise & ADVERTISED_1000baseT_Full | |
advertise & ADVERTISED_1000baseKX_Full )
2014-07-12 07:28:12 +00:00
config . link_speed | = I40E_LINK_SPEED_1GB ;
2014-06-04 08:45:28 +00:00
if ( advertise & ADVERTISED_10000baseT_Full | |
advertise & ADVERTISED_10000baseKX4_Full | |
advertise & ADVERTISED_10000baseKR_Full )
2014-07-12 07:28:12 +00:00
config . link_speed | = I40E_LINK_SPEED_10GB ;
2014-06-04 08:45:28 +00:00
if ( advertise & ADVERTISED_40000baseKR4_Full | |
advertise & ADVERTISED_40000baseCR4_Full | |
advertise & ADVERTISED_40000baseSR4_Full | |
advertise & ADVERTISED_40000baseLR4_Full )
2014-07-12 07:28:12 +00:00
config . link_speed | = I40E_LINK_SPEED_40GB ;
2014-06-04 08:45:28 +00:00
2014-07-12 07:28:12 +00:00
if ( change | | ( abilities . link_speed ! = config . link_speed ) ) {
2014-06-04 08:45:28 +00:00
/* copy over the rest of the abilities */
config . phy_type = abilities . phy_type ;
config . eee_capability = abilities . eee_capability ;
config . eeer = abilities . eeer_val ;
config . low_power_ctrl = abilities . d3_lpan ;
2014-07-12 07:28:16 +00:00
/* set link and auto negotiation so changes take effect */
config . abilities | = I40E_AQ_PHY_ENABLE_ATOMIC_LINK ;
/* If link is up put link down */
if ( hw - > phy . link_info . link_info & I40E_AQ_LINK_UP ) {
/* Tell the OS link is going down, the link will go
* back up when fw says it is ready asynchronously
*/
netdev_info ( netdev , " PHY settings change requested, NIC Link is going down. \n " ) ;
netif_carrier_off ( netdev ) ;
netif_tx_stop_all_queues ( netdev ) ;
}
2014-06-04 08:45:28 +00:00
/* make the aq call */
status = i40e_aq_set_phy_config ( hw , & config , NULL ) ;
if ( status ) {
netdev_info ( netdev , " Set phy config failed with error %d. \n " ,
status ) ;
return - EAGAIN ;
}
2015-01-24 09:58:41 +00:00
status = i40e_aq_get_link_info ( hw , true , NULL , NULL ) ;
2014-06-04 08:45:28 +00:00
if ( status )
netdev_info ( netdev , " Updating link info failed with error %d \n " ,
status ) ;
} else {
netdev_info ( netdev , " Nothing changed, exiting without setting anything. \n " ) ;
}
return err ;
}
2014-06-04 08:45:25 +00:00
static int i40e_nway_reset ( struct net_device * netdev )
{
/* restart autonegotiation */
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_pf * pf = np - > vsi - > back ;
struct i40e_hw * hw = & pf - > hw ;
bool link_up = hw - > phy . link_info . link_info & I40E_AQ_LINK_UP ;
i40e_status ret = 0 ;
ret = i40e_aq_set_link_restart_an ( hw , link_up , NULL ) ;
if ( ret ) {
netdev_info ( netdev , " link restart failed, aq_err=%d \n " ,
pf - > hw . aq . asq_last_status ) ;
return - EIO ;
}
return 0 ;
}
2013-09-11 08:39:56 +00:00
/**
* i40e_get_pauseparam - Get Flow Control status
* Return tx / rx - pause status
* */
static void i40e_get_pauseparam ( struct net_device * netdev ,
struct ethtool_pauseparam * pause )
{
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_pf * pf = np - > vsi - > back ;
struct i40e_hw * hw = & pf - > hw ;
struct i40e_link_status * hw_link_info = & hw - > phy . link_info ;
2014-11-12 00:18:57 +00:00
struct i40e_dcbx_config * dcbx_cfg = & hw - > local_dcbx_config ;
2013-09-11 08:39:56 +00:00
pause - > autoneg =
( ( hw_link_info - > an_info & I40E_AQ_AN_COMPLETED ) ?
AUTONEG_ENABLE : AUTONEG_DISABLE ) ;
2014-11-12 00:18:57 +00:00
/* PFC enabled so report LFC as off */
if ( dcbx_cfg - > pfc . pfcenable ) {
pause - > rx_pause = 0 ;
pause - > tx_pause = 0 ;
return ;
}
2013-11-26 10:49:15 +00:00
if ( hw - > fc . current_mode = = I40E_FC_RX_PAUSE ) {
2013-09-11 08:39:56 +00:00
pause - > rx_pause = 1 ;
2013-11-26 10:49:15 +00:00
} else if ( hw - > fc . current_mode = = I40E_FC_TX_PAUSE ) {
2013-09-11 08:39:56 +00:00
pause - > tx_pause = 1 ;
2013-11-26 10:49:15 +00:00
} else if ( hw - > fc . current_mode = = I40E_FC_FULL ) {
pause - > rx_pause = 1 ;
pause - > tx_pause = 1 ;
}
2013-09-11 08:39:56 +00:00
}
2014-06-04 08:45:27 +00:00
/**
* i40e_set_pauseparam - Set Flow Control parameter
* @ netdev : network interface device structure
* @ pause : return tx / rx flow control status
* */
static int i40e_set_pauseparam ( struct net_device * netdev ,
struct ethtool_pauseparam * pause )
{
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_pf * pf = np - > vsi - > back ;
struct i40e_vsi * vsi = np - > vsi ;
struct i40e_hw * hw = & pf - > hw ;
struct i40e_link_status * hw_link_info = & hw - > phy . link_info ;
2014-11-12 00:18:57 +00:00
struct i40e_dcbx_config * dcbx_cfg = & hw - > local_dcbx_config ;
2014-06-04 08:45:27 +00:00
bool link_up = hw_link_info - > link_info & I40E_AQ_LINK_UP ;
i40e_status status ;
u8 aq_failures ;
2014-07-09 07:46:18 +00:00
int err = 0 ;
2014-06-04 08:45:27 +00:00
2014-12-11 07:06:32 +00:00
/* Changing the port's flow control is not supported if this isn't the
* port ' s controlling PF
*/
if ( hw - > partition_id ! = 1 ) {
i40e_partition_setting_complaint ( pf ) ;
return - EOPNOTSUPP ;
}
2014-06-04 08:45:27 +00:00
if ( vsi ! = pf - > vsi [ pf - > lan_vsi ] )
return - EOPNOTSUPP ;
if ( pause - > autoneg ! = ( ( hw_link_info - > an_info & I40E_AQ_AN_COMPLETED ) ?
AUTONEG_ENABLE : AUTONEG_DISABLE ) ) {
netdev_info ( netdev , " To change autoneg please use: ethtool -s <dev> autoneg <on|off> \n " ) ;
return - EOPNOTSUPP ;
}
/* If we have link and don't have autoneg */
if ( ! test_bit ( __I40E_DOWN , & pf - > state ) & &
! ( hw_link_info - > an_info & I40E_AQ_AN_COMPLETED ) ) {
/* Send message that it might not necessarily work*/
netdev_info ( netdev , " Autoneg did not complete so changing settings may not result in an actual change. \n " ) ;
}
2014-11-12 00:18:57 +00:00
if ( dcbx_cfg - > pfc . pfcenable ) {
netdev_info ( netdev ,
" Priority flow control enabled. Cannot set link flow control. \n " ) ;
2014-06-04 08:45:27 +00:00
return - EOPNOTSUPP ;
}
if ( pause - > rx_pause & & pause - > tx_pause )
hw - > fc . requested_mode = I40E_FC_FULL ;
else if ( pause - > rx_pause & & ! pause - > tx_pause )
hw - > fc . requested_mode = I40E_FC_RX_PAUSE ;
else if ( ! pause - > rx_pause & & pause - > tx_pause )
hw - > fc . requested_mode = I40E_FC_TX_PAUSE ;
else if ( ! pause - > rx_pause & & ! pause - > tx_pause )
hw - > fc . requested_mode = I40E_FC_NONE ;
else
return - EINVAL ;
2014-07-12 07:28:16 +00:00
/* Tell the OS link is going down, the link will go back up when fw
* says it is ready asynchronously
*/
netdev_info ( netdev , " Flow control settings change requested, NIC Link is going down. \n " ) ;
netif_carrier_off ( netdev ) ;
netif_tx_stop_all_queues ( netdev ) ;
2014-06-04 08:45:27 +00:00
/* Set the fc mode and only restart an if link is up*/
status = i40e_set_fc ( hw , & aq_failures , link_up ) ;
if ( aq_failures & I40E_SET_FC_AQ_FAIL_GET ) {
netdev_info ( netdev , " Set fc failed on the get_phy_capabilities call with error %d and status %d \n " ,
status , hw - > aq . asq_last_status ) ;
err = - EAGAIN ;
}
if ( aq_failures & I40E_SET_FC_AQ_FAIL_SET ) {
netdev_info ( netdev , " Set fc failed on the set_phy_config call with error %d and status %d \n " ,
status , hw - > aq . asq_last_status ) ;
err = - EAGAIN ;
}
if ( aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE ) {
2015-01-24 09:58:41 +00:00
netdev_info ( netdev , " Set fc failed on the get_link_info call with error %d and status %d \n " ,
2014-06-04 08:45:27 +00:00
status , hw - > aq . asq_last_status ) ;
err = - EAGAIN ;
}
2014-07-09 07:46:18 +00:00
if ( ! test_bit ( __I40E_DOWN , & pf - > state ) ) {
/* Give it a little more time to try to come back */
msleep ( 75 ) ;
if ( ! test_bit ( __I40E_DOWN , & pf - > state ) )
return i40e_nway_reset ( netdev ) ;
}
2014-06-04 08:45:27 +00:00
return err ;
}
2013-09-11 08:39:56 +00:00
static u32 i40e_get_msglevel ( struct net_device * netdev )
{
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_pf * pf = np - > vsi - > back ;
return pf - > msg_enable ;
}
static void i40e_set_msglevel ( struct net_device * netdev , u32 data )
{
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_pf * pf = np - > vsi - > back ;
if ( I40E_DEBUG_USER & data )
pf - > hw . debug_mask = data ;
pf - > msg_enable = data ;
}
static int i40e_get_regs_len ( struct net_device * netdev )
{
int reg_count = 0 ;
int i ;
for ( i = 0 ; i40e_reg_list [ i ] . offset ! = 0 ; i + + )
reg_count + = i40e_reg_list [ i ] . elements ;
return reg_count * sizeof ( u32 ) ;
}
static void i40e_get_regs ( struct net_device * netdev , struct ethtool_regs * regs ,
void * p )
{
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_pf * pf = np - > vsi - > back ;
struct i40e_hw * hw = & pf - > hw ;
u32 * reg_buf = p ;
int i , j , ri ;
u32 reg ;
/* Tell ethtool which driver-version-specific regs output we have.
*
* At some point , if we have ethtool doing special formatting of
* this data , it will rely on this version number to know how to
* interpret things . Hence , this needs to be updated if / when the
* diags register table is changed .
*/
regs - > version = 1 ;
/* loop through the diags reg table for what to print */
ri = 0 ;
for ( i = 0 ; i40e_reg_list [ i ] . offset ! = 0 ; i + + ) {
for ( j = 0 ; j < i40e_reg_list [ i ] . elements ; j + + ) {
reg = i40e_reg_list [ i ] . offset
+ ( j * i40e_reg_list [ i ] . stride ) ;
reg_buf [ ri + + ] = rd32 ( hw , reg ) ;
}
}
}
static int i40e_get_eeprom ( struct net_device * netdev ,
struct ethtool_eeprom * eeprom , u8 * bytes )
{
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_hw * hw = & np - > vsi - > back - > hw ;
2013-11-28 06:39:28 +00:00
struct i40e_pf * pf = np - > vsi - > back ;
2014-11-13 08:23:13 +00:00
int ret_val = 0 , len , offset ;
2013-11-28 06:39:28 +00:00
u8 * eeprom_buff ;
u16 i , sectors ;
bool last ;
i40e/i40evf: Add nvmupdate support
This implements a state machine intended to support the userland tool for
updating the device eeprom. The state machine implements one-shot reads,
writes, multi-step write sessions, and checksum requests. If we're in the middle
of a multi-step write session, no one should fire off other writes, however, one
shot reads are valid. The userland tool is expected to keep track of its session
status, arrange the placement and ordering of the writes, and deal with the
checksum requirement.
This patch also adds nvmupdate support to ethtool callbacks.
The get_eeprom() and set_eeprom() services in ethtool are used here to
facilitate the userland NVMUpdate tool. The 'magic' value in the get and
set commands is used to pass additional control information for managing
the read and write steps.
The read operation works both as normally expected in the standard ethtool
method, as well as with the extra NVM controls. The write operation
works only for the expanded NVM functions - the normal ethtool method is
not allowed because of the NVM semaphore management needed for multipart
writes, as well as the checksum requirement.
Change-ID: I1d84a170153a9f437906744e2e350fd68fe7563d
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Tested-by: Jim Young <jamesx.m.young@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2014-07-09 07:46:09 +00:00
u32 magic ;
2013-11-28 06:39:28 +00:00
# define I40E_NVM_SECTOR_SIZE 4096
2013-09-11 08:39:56 +00:00
if ( eeprom - > len = = 0 )
return - EINVAL ;
i40e/i40evf: Add nvmupdate support
This implements a state machine intended to support the userland tool for
updating the device eeprom. The state machine implements one-shot reads,
writes, multi-step write sessions, and checksum requests. If we're in the middle
of a multi-step write session, no one should fire off other writes, however, one
shot reads are valid. The userland tool is expected to keep track of its session
status, arrange the placement and ordering of the writes, and deal with the
checksum requirement.
This patch also adds nvmupdate support to ethtool callbacks.
The get_eeprom() and set_eeprom() services in ethtool are used here to
facilitate the userland NVMUpdate tool. The 'magic' value in the get and
set commands is used to pass additional control information for managing
the read and write steps.
The read operation works both as normally expected in the standard ethtool
method, as well as with the extra NVM controls. The write operation
works only for the expanded NVM functions - the normal ethtool method is
not allowed because of the NVM semaphore management needed for multipart
writes, as well as the checksum requirement.
Change-ID: I1d84a170153a9f437906744e2e350fd68fe7563d
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Tested-by: Jim Young <jamesx.m.young@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2014-07-09 07:46:09 +00:00
/* check for NVMUpdate access method */
magic = hw - > vendor_id | ( hw - > device_id < < 16 ) ;
if ( eeprom - > magic & & eeprom - > magic ! = magic ) {
2014-11-13 08:23:13 +00:00
struct i40e_nvm_access * cmd ;
i40e/i40evf: Add nvmupdate support
This implements a state machine intended to support the userland tool for
updating the device eeprom. The state machine implements one-shot reads,
writes, multi-step write sessions, and checksum requests. If we're in the middle
of a multi-step write session, no one should fire off other writes, however, one
shot reads are valid. The userland tool is expected to keep track of its session
status, arrange the placement and ordering of the writes, and deal with the
checksum requirement.
This patch also adds nvmupdate support to ethtool callbacks.
The get_eeprom() and set_eeprom() services in ethtool are used here to
facilitate the userland NVMUpdate tool. The 'magic' value in the get and
set commands is used to pass additional control information for managing
the read and write steps.
The read operation works both as normally expected in the standard ethtool
method, as well as with the extra NVM controls. The write operation
works only for the expanded NVM functions - the normal ethtool method is
not allowed because of the NVM semaphore management needed for multipart
writes, as well as the checksum requirement.
Change-ID: I1d84a170153a9f437906744e2e350fd68fe7563d
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Tested-by: Jim Young <jamesx.m.young@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2014-07-09 07:46:09 +00:00
int errno ;
/* make sure it is the right magic for NVMUpdate */
if ( ( eeprom - > magic > > 16 ) ! = hw - > device_id )
return - EINVAL ;
2014-11-13 08:23:13 +00:00
cmd = ( struct i40e_nvm_access * ) eeprom ;
ret_val = i40e_nvmupd_command ( hw , cmd , bytes , & errno ) ;
i40e/i40evf: Add nvmupdate support
This implements a state machine intended to support the userland tool for
updating the device eeprom. The state machine implements one-shot reads,
writes, multi-step write sessions, and checksum requests. If we're in the middle
of a multi-step write session, no one should fire off other writes, however, one
shot reads are valid. The userland tool is expected to keep track of its session
status, arrange the placement and ordering of the writes, and deal with the
checksum requirement.
This patch also adds nvmupdate support to ethtool callbacks.
The get_eeprom() and set_eeprom() services in ethtool are used here to
facilitate the userland NVMUpdate tool. The 'magic' value in the get and
set commands is used to pass additional control information for managing
the read and write steps.
The read operation works both as normally expected in the standard ethtool
method, as well as with the extra NVM controls. The write operation
works only for the expanded NVM functions - the normal ethtool method is
not allowed because of the NVM semaphore management needed for multipart
writes, as well as the checksum requirement.
Change-ID: I1d84a170153a9f437906744e2e350fd68fe7563d
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Tested-by: Jim Young <jamesx.m.young@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2014-07-09 07:46:09 +00:00
if ( ret_val )
dev_info ( & pf - > pdev - > dev ,
2014-11-13 08:23:13 +00:00
" NVMUpdate read failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d \n " ,
ret_val , hw - > aq . asq_last_status , errno ,
( u8 ) ( cmd - > config & I40E_NVM_MOD_PNT_MASK ) ,
cmd - > offset , cmd - > data_size ) ;
i40e/i40evf: Add nvmupdate support
This implements a state machine intended to support the userland tool for
updating the device eeprom. The state machine implements one-shot reads,
writes, multi-step write sessions, and checksum requests. If we're in the middle
of a multi-step write session, no one should fire off other writes, however, one
shot reads are valid. The userland tool is expected to keep track of its session
status, arrange the placement and ordering of the writes, and deal with the
checksum requirement.
This patch also adds nvmupdate support to ethtool callbacks.
The get_eeprom() and set_eeprom() services in ethtool are used here to
facilitate the userland NVMUpdate tool. The 'magic' value in the get and
set commands is used to pass additional control information for managing
the read and write steps.
The read operation works both as normally expected in the standard ethtool
method, as well as with the extra NVM controls. The write operation
works only for the expanded NVM functions - the normal ethtool method is
not allowed because of the NVM semaphore management needed for multipart
writes, as well as the checksum requirement.
Change-ID: I1d84a170153a9f437906744e2e350fd68fe7563d
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Tested-by: Jim Young <jamesx.m.young@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2014-07-09 07:46:09 +00:00
return errno ;
}
/* normal ethtool get_eeprom support */
2013-09-11 08:39:56 +00:00
eeprom - > magic = hw - > vendor_id | ( hw - > device_id < < 16 ) ;
2013-11-28 06:39:28 +00:00
eeprom_buff = kzalloc ( eeprom - > len , GFP_KERNEL ) ;
2013-09-11 08:39:56 +00:00
if ( ! eeprom_buff )
return - ENOMEM ;
2013-11-28 06:39:28 +00:00
ret_val = i40e_acquire_nvm ( hw , I40E_RESOURCE_READ ) ;
if ( ret_val ) {
dev_info ( & pf - > pdev - > dev ,
" Failed Acquiring NVM resource for read err=%d status=0x%x \n " ,
ret_val , hw - > aq . asq_last_status ) ;
goto free_buff ;
2013-09-11 08:39:56 +00:00
}
2013-11-28 06:39:28 +00:00
sectors = eeprom - > len / I40E_NVM_SECTOR_SIZE ;
sectors + = ( eeprom - > len % I40E_NVM_SECTOR_SIZE ) ? 1 : 0 ;
len = I40E_NVM_SECTOR_SIZE ;
last = false ;
for ( i = 0 ; i < sectors ; i + + ) {
if ( i = = ( sectors - 1 ) ) {
len = eeprom - > len - ( I40E_NVM_SECTOR_SIZE * i ) ;
last = true ;
}
2014-11-13 08:23:13 +00:00
offset = eeprom - > offset + ( I40E_NVM_SECTOR_SIZE * i ) ,
ret_val = i40e_aq_read_nvm ( hw , 0x0 , offset , len ,
i40e/i40evf: Add nvmupdate support
This implements a state machine intended to support the userland tool for
updating the device eeprom. The state machine implements one-shot reads,
writes, multi-step write sessions, and checksum requests. If we're in the middle
of a multi-step write session, no one should fire off other writes, however, one
shot reads are valid. The userland tool is expected to keep track of its session
status, arrange the placement and ordering of the writes, and deal with the
checksum requirement.
This patch also adds nvmupdate support to ethtool callbacks.
The get_eeprom() and set_eeprom() services in ethtool are used here to
facilitate the userland NVMUpdate tool. The 'magic' value in the get and
set commands is used to pass additional control information for managing
the read and write steps.
The read operation works both as normally expected in the standard ethtool
method, as well as with the extra NVM controls. The write operation
works only for the expanded NVM functions - the normal ethtool method is
not allowed because of the NVM semaphore management needed for multipart
writes, as well as the checksum requirement.
Change-ID: I1d84a170153a9f437906744e2e350fd68fe7563d
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Tested-by: Jim Young <jamesx.m.young@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2014-07-09 07:46:09 +00:00
( u8 * ) eeprom_buff + ( I40E_NVM_SECTOR_SIZE * i ) ,
2013-11-28 06:39:28 +00:00
last , NULL ) ;
2014-11-13 08:23:13 +00:00
if ( ret_val & & hw - > aq . asq_last_status = = I40E_AQ_RC_EPERM ) {
2013-11-28 06:39:28 +00:00
dev_info ( & pf - > pdev - > dev ,
2014-11-13 08:23:13 +00:00
" read NVM failed, invalid offset 0x%x \n " ,
offset ) ;
break ;
} else if ( ret_val & &
hw - > aq . asq_last_status = = I40E_AQ_RC_EACCES ) {
dev_info ( & pf - > pdev - > dev ,
" read NVM failed, access, offset 0x%x \n " ,
offset ) ;
break ;
} else if ( ret_val ) {
dev_info ( & pf - > pdev - > dev ,
" read NVM failed offset %d err=%d status=0x%x \n " ,
offset , ret_val , hw - > aq . asq_last_status ) ;
break ;
2013-11-28 06:39:28 +00:00
}
}
2013-09-11 08:39:56 +00:00
2013-11-28 06:39:28 +00:00
i40e_release_nvm ( hw ) ;
i40e/i40evf: Add nvmupdate support
This implements a state machine intended to support the userland tool for
updating the device eeprom. The state machine implements one-shot reads,
writes, multi-step write sessions, and checksum requests. If we're in the middle
of a multi-step write session, no one should fire off other writes, however, one
shot reads are valid. The userland tool is expected to keep track of its session
status, arrange the placement and ordering of the writes, and deal with the
checksum requirement.
This patch also adds nvmupdate support to ethtool callbacks.
The get_eeprom() and set_eeprom() services in ethtool are used here to
facilitate the userland NVMUpdate tool. The 'magic' value in the get and
set commands is used to pass additional control information for managing
the read and write steps.
The read operation works both as normally expected in the standard ethtool
method, as well as with the extra NVM controls. The write operation
works only for the expanded NVM functions - the normal ethtool method is
not allowed because of the NVM semaphore management needed for multipart
writes, as well as the checksum requirement.
Change-ID: I1d84a170153a9f437906744e2e350fd68fe7563d
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Tested-by: Jim Young <jamesx.m.young@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2014-07-09 07:46:09 +00:00
memcpy ( bytes , ( u8 * ) eeprom_buff , eeprom - > len ) ;
2013-11-28 06:39:28 +00:00
free_buff :
2013-09-11 08:39:56 +00:00
kfree ( eeprom_buff ) ;
return ret_val ;
}
static int i40e_get_eeprom_len ( struct net_device * netdev )
{
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_hw * hw = & np - > vsi - > back - > hw ;
2013-11-28 06:39:28 +00:00
u32 val ;
val = ( rd32 ( hw , I40E_GLPCI_LBARCTRL )
& I40E_GLPCI_LBARCTRL_FL_SIZE_MASK )
> > I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT ;
/* register returns value in power of 2, 64Kbyte chunks. */
val = ( 64 * 1024 ) * ( 1 < < val ) ;
return val ;
2013-09-11 08:39:56 +00:00
}
i40e/i40evf: Add nvmupdate support
This implements a state machine intended to support the userland tool for
updating the device eeprom. The state machine implements one-shot reads,
writes, multi-step write sessions, and checksum requests. If we're in the middle
of a multi-step write session, no one should fire off other writes, however, one
shot reads are valid. The userland tool is expected to keep track of its session
status, arrange the placement and ordering of the writes, and deal with the
checksum requirement.
This patch also adds nvmupdate support to ethtool callbacks.
The get_eeprom() and set_eeprom() services in ethtool are used here to
facilitate the userland NVMUpdate tool. The 'magic' value in the get and
set commands is used to pass additional control information for managing
the read and write steps.
The read operation works both as normally expected in the standard ethtool
method, as well as with the extra NVM controls. The write operation
works only for the expanded NVM functions - the normal ethtool method is
not allowed because of the NVM semaphore management needed for multipart
writes, as well as the checksum requirement.
Change-ID: I1d84a170153a9f437906744e2e350fd68fe7563d
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Tested-by: Jim Young <jamesx.m.young@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2014-07-09 07:46:09 +00:00
static int i40e_set_eeprom ( struct net_device * netdev ,
struct ethtool_eeprom * eeprom , u8 * bytes )
{
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_hw * hw = & np - > vsi - > back - > hw ;
struct i40e_pf * pf = np - > vsi - > back ;
2014-11-13 08:23:13 +00:00
struct i40e_nvm_access * cmd ;
i40e/i40evf: Add nvmupdate support
This implements a state machine intended to support the userland tool for
updating the device eeprom. The state machine implements one-shot reads,
writes, multi-step write sessions, and checksum requests. If we're in the middle
of a multi-step write session, no one should fire off other writes, however, one
shot reads are valid. The userland tool is expected to keep track of its session
status, arrange the placement and ordering of the writes, and deal with the
checksum requirement.
This patch also adds nvmupdate support to ethtool callbacks.
The get_eeprom() and set_eeprom() services in ethtool are used here to
facilitate the userland NVMUpdate tool. The 'magic' value in the get and
set commands is used to pass additional control information for managing
the read and write steps.
The read operation works both as normally expected in the standard ethtool
method, as well as with the extra NVM controls. The write operation
works only for the expanded NVM functions - the normal ethtool method is
not allowed because of the NVM semaphore management needed for multipart
writes, as well as the checksum requirement.
Change-ID: I1d84a170153a9f437906744e2e350fd68fe7563d
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Tested-by: Jim Young <jamesx.m.young@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2014-07-09 07:46:09 +00:00
int ret_val = 0 ;
int errno ;
u32 magic ;
/* normal ethtool set_eeprom is not supported */
magic = hw - > vendor_id | ( hw - > device_id < < 16 ) ;
if ( eeprom - > magic = = magic )
return - EOPNOTSUPP ;
/* check for NVMUpdate access method */
if ( ! eeprom - > magic | | ( eeprom - > magic > > 16 ) ! = hw - > device_id )
return - EINVAL ;
if ( test_bit ( __I40E_RESET_RECOVERY_PENDING , & pf - > state ) | |
test_bit ( __I40E_RESET_INTR_RECEIVED , & pf - > state ) )
return - EBUSY ;
2014-11-13 08:23:13 +00:00
cmd = ( struct i40e_nvm_access * ) eeprom ;
ret_val = i40e_nvmupd_command ( hw , cmd , bytes , & errno ) ;
if ( ret_val & & hw - > aq . asq_last_status ! = I40E_AQ_RC_EBUSY )
i40e/i40evf: Add nvmupdate support
This implements a state machine intended to support the userland tool for
updating the device eeprom. The state machine implements one-shot reads,
writes, multi-step write sessions, and checksum requests. If we're in the middle
of a multi-step write session, no one should fire off other writes, however, one
shot reads are valid. The userland tool is expected to keep track of its session
status, arrange the placement and ordering of the writes, and deal with the
checksum requirement.
This patch also adds nvmupdate support to ethtool callbacks.
The get_eeprom() and set_eeprom() services in ethtool are used here to
facilitate the userland NVMUpdate tool. The 'magic' value in the get and
set commands is used to pass additional control information for managing
the read and write steps.
The read operation works both as normally expected in the standard ethtool
method, as well as with the extra NVM controls. The write operation
works only for the expanded NVM functions - the normal ethtool method is
not allowed because of the NVM semaphore management needed for multipart
writes, as well as the checksum requirement.
Change-ID: I1d84a170153a9f437906744e2e350fd68fe7563d
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Tested-by: Jim Young <jamesx.m.young@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2014-07-09 07:46:09 +00:00
dev_info ( & pf - > pdev - > dev ,
2014-11-13 08:23:13 +00:00
" NVMUpdate write failed err=%d status=0x%x errno=%d module=%d offset=0x%x size=%d \n " ,
ret_val , hw - > aq . asq_last_status , errno ,
( u8 ) ( cmd - > config & I40E_NVM_MOD_PNT_MASK ) ,
cmd - > offset , cmd - > data_size ) ;
i40e/i40evf: Add nvmupdate support
This implements a state machine intended to support the userland tool for
updating the device eeprom. The state machine implements one-shot reads,
writes, multi-step write sessions, and checksum requests. If we're in the middle
of a multi-step write session, no one should fire off other writes, however, one
shot reads are valid. The userland tool is expected to keep track of its session
status, arrange the placement and ordering of the writes, and deal with the
checksum requirement.
This patch also adds nvmupdate support to ethtool callbacks.
The get_eeprom() and set_eeprom() services in ethtool are used here to
facilitate the userland NVMUpdate tool. The 'magic' value in the get and
set commands is used to pass additional control information for managing
the read and write steps.
The read operation works both as normally expected in the standard ethtool
method, as well as with the extra NVM controls. The write operation
works only for the expanded NVM functions - the normal ethtool method is
not allowed because of the NVM semaphore management needed for multipart
writes, as well as the checksum requirement.
Change-ID: I1d84a170153a9f437906744e2e350fd68fe7563d
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Tested-by: Jim Young <jamesx.m.young@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2014-07-09 07:46:09 +00:00
return errno ;
}
2013-09-11 08:39:56 +00:00
static void i40e_get_drvinfo ( struct net_device * netdev ,
struct ethtool_drvinfo * drvinfo )
{
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_vsi * vsi = np - > vsi ;
struct i40e_pf * pf = vsi - > back ;
strlcpy ( drvinfo - > driver , i40e_driver_name , sizeof ( drvinfo - > driver ) ) ;
strlcpy ( drvinfo - > version , i40e_driver_version_str ,
sizeof ( drvinfo - > version ) ) ;
strlcpy ( drvinfo - > fw_version , i40e_fw_version_str ( & pf - > hw ) ,
sizeof ( drvinfo - > fw_version ) ) ;
strlcpy ( drvinfo - > bus_info , pci_name ( pf - > pdev ) ,
sizeof ( drvinfo - > bus_info ) ) ;
}
static void i40e_get_ringparam ( struct net_device * netdev ,
struct ethtool_ringparam * ring )
{
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_pf * pf = np - > vsi - > back ;
struct i40e_vsi * vsi = pf - > vsi [ pf - > lan_vsi ] ;
ring - > rx_max_pending = I40E_MAX_NUM_DESCRIPTORS ;
ring - > tx_max_pending = I40E_MAX_NUM_DESCRIPTORS ;
ring - > rx_mini_max_pending = 0 ;
ring - > rx_jumbo_max_pending = 0 ;
2013-09-28 06:00:58 +00:00
ring - > rx_pending = vsi - > rx_rings [ 0 ] - > count ;
ring - > tx_pending = vsi - > tx_rings [ 0 ] - > count ;
2013-09-11 08:39:56 +00:00
ring - > rx_mini_pending = 0 ;
ring - > rx_jumbo_pending = 0 ;
}
static int i40e_set_ringparam ( struct net_device * netdev ,
struct ethtool_ringparam * ring )
{
struct i40e_ring * tx_rings = NULL , * rx_rings = NULL ;
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_vsi * vsi = np - > vsi ;
struct i40e_pf * pf = vsi - > back ;
u32 new_rx_count , new_tx_count ;
int i , err = 0 ;
if ( ( ring - > rx_mini_pending ) | | ( ring - > rx_jumbo_pending ) )
return - EINVAL ;
2013-11-20 10:03:08 +00:00
if ( ring - > tx_pending > I40E_MAX_NUM_DESCRIPTORS | |
ring - > tx_pending < I40E_MIN_NUM_DESCRIPTORS | |
ring - > rx_pending > I40E_MAX_NUM_DESCRIPTORS | |
ring - > rx_pending < I40E_MIN_NUM_DESCRIPTORS ) {
netdev_info ( netdev ,
" Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] \n " ,
ring - > tx_pending , ring - > rx_pending ,
I40E_MIN_NUM_DESCRIPTORS , I40E_MAX_NUM_DESCRIPTORS ) ;
return - EINVAL ;
}
new_tx_count = ALIGN ( ring - > tx_pending , I40E_REQ_DESCRIPTOR_MULTIPLE ) ;
new_rx_count = ALIGN ( ring - > rx_pending , I40E_REQ_DESCRIPTOR_MULTIPLE ) ;
2013-09-11 08:39:56 +00:00
/* if nothing to do return success */
2013-09-28 06:00:58 +00:00
if ( ( new_tx_count = = vsi - > tx_rings [ 0 ] - > count ) & &
( new_rx_count = = vsi - > rx_rings [ 0 ] - > count ) )
2013-09-11 08:39:56 +00:00
return 0 ;
while ( test_and_set_bit ( __I40E_CONFIG_BUSY , & pf - > state ) )
usleep_range ( 1000 , 2000 ) ;
if ( ! netif_running ( vsi - > netdev ) ) {
/* simple case - set for the next time the netdev is started */
for ( i = 0 ; i < vsi - > num_queue_pairs ; i + + ) {
2013-09-28 06:00:58 +00:00
vsi - > tx_rings [ i ] - > count = new_tx_count ;
vsi - > rx_rings [ i ] - > count = new_rx_count ;
2013-09-11 08:39:56 +00:00
}
goto done ;
}
/* We can't just free everything and then setup again,
* because the ISRs in MSI - X mode get passed pointers
* to the Tx and Rx ring structs .
*/
/* alloc updated Tx resources */
2013-09-28 06:00:58 +00:00
if ( new_tx_count ! = vsi - > tx_rings [ 0 ] - > count ) {
2013-09-11 08:39:56 +00:00
netdev_info ( netdev ,
" Changing Tx descriptor count from %d to %d. \n " ,
2013-09-28 06:00:58 +00:00
vsi - > tx_rings [ 0 ] - > count , new_tx_count ) ;
2013-09-11 08:39:56 +00:00
tx_rings = kcalloc ( vsi - > alloc_queue_pairs ,
sizeof ( struct i40e_ring ) , GFP_KERNEL ) ;
if ( ! tx_rings ) {
err = - ENOMEM ;
goto done ;
}
for ( i = 0 ; i < vsi - > num_queue_pairs ; i + + ) {
/* clone ring and setup updated count */
2013-09-28 06:00:58 +00:00
tx_rings [ i ] = * vsi - > tx_rings [ i ] ;
2013-09-11 08:39:56 +00:00
tx_rings [ i ] . count = new_tx_count ;
err = i40e_setup_tx_descriptors ( & tx_rings [ i ] ) ;
if ( err ) {
while ( i ) {
i - - ;
i40e_free_tx_resources ( & tx_rings [ i ] ) ;
}
kfree ( tx_rings ) ;
tx_rings = NULL ;
goto done ;
}
}
}
/* alloc updated Rx resources */
2013-09-28 06:00:58 +00:00
if ( new_rx_count ! = vsi - > rx_rings [ 0 ] - > count ) {
2013-09-11 08:39:56 +00:00
netdev_info ( netdev ,
" Changing Rx descriptor count from %d to %d \n " ,
2013-09-28 06:00:58 +00:00
vsi - > rx_rings [ 0 ] - > count , new_rx_count ) ;
2013-09-11 08:39:56 +00:00
rx_rings = kcalloc ( vsi - > alloc_queue_pairs ,
sizeof ( struct i40e_ring ) , GFP_KERNEL ) ;
if ( ! rx_rings ) {
err = - ENOMEM ;
goto free_tx ;
}
for ( i = 0 ; i < vsi - > num_queue_pairs ; i + + ) {
/* clone ring and setup updated count */
2013-09-28 06:00:58 +00:00
rx_rings [ i ] = * vsi - > rx_rings [ i ] ;
2013-09-11 08:39:56 +00:00
rx_rings [ i ] . count = new_rx_count ;
err = i40e_setup_rx_descriptors ( & rx_rings [ i ] ) ;
if ( err ) {
while ( i ) {
i - - ;
i40e_free_rx_resources ( & rx_rings [ i ] ) ;
}
kfree ( rx_rings ) ;
rx_rings = NULL ;
goto free_tx ;
}
}
}
/* Bring interface down, copy in the new ring info,
* then restore the interface
*/
i40e_down ( vsi ) ;
if ( tx_rings ) {
for ( i = 0 ; i < vsi - > num_queue_pairs ; i + + ) {
2013-09-28 06:00:58 +00:00
i40e_free_tx_resources ( vsi - > tx_rings [ i ] ) ;
* vsi - > tx_rings [ i ] = tx_rings [ i ] ;
2013-09-11 08:39:56 +00:00
}
kfree ( tx_rings ) ;
tx_rings = NULL ;
}
if ( rx_rings ) {
for ( i = 0 ; i < vsi - > num_queue_pairs ; i + + ) {
2013-09-28 06:00:58 +00:00
i40e_free_rx_resources ( vsi - > rx_rings [ i ] ) ;
* vsi - > rx_rings [ i ] = rx_rings [ i ] ;
2013-09-11 08:39:56 +00:00
}
kfree ( rx_rings ) ;
rx_rings = NULL ;
}
i40e_up ( vsi ) ;
free_tx :
/* error cleanup if the Rx allocations failed after getting Tx */
if ( tx_rings ) {
for ( i = 0 ; i < vsi - > num_queue_pairs ; i + + )
i40e_free_tx_resources ( & tx_rings [ i ] ) ;
kfree ( tx_rings ) ;
tx_rings = NULL ;
}
done :
clear_bit ( __I40E_CONFIG_BUSY , & pf - > state ) ;
return err ;
}
static int i40e_get_sset_count ( struct net_device * netdev , int sset )
{
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_vsi * vsi = np - > vsi ;
struct i40e_pf * pf = vsi - > back ;
switch ( sset ) {
case ETH_SS_TEST :
return I40E_TEST_LEN ;
case ETH_SS_STATS :
2014-04-23 04:49:55 +00:00
if ( vsi = = pf - > vsi [ pf - > lan_vsi ] ) {
int len = I40E_PF_STATS_LEN ( netdev ) ;
if ( pf - > lan_veb ! = I40E_NO_VEB )
len + = I40E_VEB_STATS_LEN ;
return len ;
} else {
2013-09-11 08:39:56 +00:00
return I40E_VSI_STATS_LEN ( netdev ) ;
2014-04-23 04:49:55 +00:00
}
2013-09-11 08:39:56 +00:00
default :
return - EOPNOTSUPP ;
}
}
static void i40e_get_ethtool_stats ( struct net_device * netdev ,
struct ethtool_stats * stats , u64 * data )
{
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
2014-04-09 05:58:58 +00:00
struct i40e_ring * tx_ring , * rx_ring ;
2013-09-11 08:39:56 +00:00
struct i40e_vsi * vsi = np - > vsi ;
struct i40e_pf * pf = vsi - > back ;
int i = 0 ;
char * p ;
int j ;
struct rtnl_link_stats64 * net_stats = i40e_get_vsi_stats_struct ( vsi ) ;
2013-09-28 06:01:03 +00:00
unsigned int start ;
2013-09-11 08:39:56 +00:00
i40e_update_stats ( vsi ) ;
for ( j = 0 ; j < I40E_NETDEV_STATS_LEN ; j + + ) {
p = ( char * ) net_stats + i40e_gstrings_net_stats [ j ] . stat_offset ;
data [ i + + ] = ( i40e_gstrings_net_stats [ j ] . sizeof_stat = =
sizeof ( u64 ) ) ? * ( u64 * ) p : * ( u32 * ) p ;
}
2014-04-23 04:50:20 +00:00
for ( j = 0 ; j < I40E_MISC_STATS_LEN ; j + + ) {
p = ( char * ) vsi + i40e_gstrings_misc_stats [ j ] . stat_offset ;
data [ i + + ] = ( i40e_gstrings_misc_stats [ j ] . sizeof_stat = =
sizeof ( u64 ) ) ? * ( u64 * ) p : * ( u32 * ) p ;
}
2014-08-01 20:27:03 +00:00
# ifdef I40E_FCOE
for ( j = 0 ; j < I40E_FCOE_STATS_LEN ; j + + ) {
p = ( char * ) vsi + i40e_gstrings_fcoe_stats [ j ] . stat_offset ;
data [ i + + ] = ( i40e_gstrings_fcoe_stats [ j ] . sizeof_stat = =
sizeof ( u64 ) ) ? * ( u64 * ) p : * ( u32 * ) p ;
}
# endif
2013-09-28 06:01:03 +00:00
rcu_read_lock ( ) ;
2014-03-14 07:32:30 +00:00
for ( j = 0 ; j < vsi - > num_queue_pairs ; j + + ) {
2014-04-09 05:58:58 +00:00
tx_ring = ACCESS_ONCE ( vsi - > tx_rings [ j ] ) ;
2013-09-28 06:01:03 +00:00
if ( ! tx_ring )
continue ;
/* process Tx ring statistics */
do {
2014-03-14 04:26:42 +00:00
start = u64_stats_fetch_begin_irq ( & tx_ring - > syncp ) ;
2013-09-28 06:01:03 +00:00
data [ i ] = tx_ring - > stats . packets ;
data [ i + 1 ] = tx_ring - > stats . bytes ;
2014-03-14 04:26:42 +00:00
} while ( u64_stats_fetch_retry_irq ( & tx_ring - > syncp , start ) ) ;
2014-03-14 07:32:30 +00:00
i + = 2 ;
2013-09-28 06:01:03 +00:00
/* Rx ring is the 2nd half of the queue pair */
rx_ring = & tx_ring [ 1 ] ;
do {
2014-03-14 04:26:42 +00:00
start = u64_stats_fetch_begin_irq ( & rx_ring - > syncp ) ;
2014-03-14 07:32:30 +00:00
data [ i ] = rx_ring - > stats . packets ;
data [ i + 1 ] = rx_ring - > stats . bytes ;
2014-03-14 04:26:42 +00:00
} while ( u64_stats_fetch_retry_irq ( & rx_ring - > syncp , start ) ) ;
2014-03-14 07:32:30 +00:00
i + = 2 ;
2013-09-11 08:39:56 +00:00
}
2013-09-28 06:01:03 +00:00
rcu_read_unlock ( ) ;
2014-04-23 04:49:55 +00:00
if ( vsi ! = pf - > vsi [ pf - > lan_vsi ] )
return ;
if ( pf - > lan_veb ! = I40E_NO_VEB ) {
struct i40e_veb * veb = pf - > veb [ pf - > lan_veb ] ;
for ( j = 0 ; j < I40E_VEB_STATS_LEN ; j + + ) {
p = ( char * ) veb ;
p + = i40e_gstrings_veb_stats [ j ] . stat_offset ;
data [ i + + ] = ( i40e_gstrings_veb_stats [ j ] . sizeof_stat = =
sizeof ( u64 ) ) ? * ( u64 * ) p : * ( u32 * ) p ;
2013-09-11 08:39:56 +00:00
}
}
2014-04-23 04:49:55 +00:00
for ( j = 0 ; j < I40E_GLOBAL_STATS_LEN ; j + + ) {
p = ( char * ) pf + i40e_gstrings_stats [ j ] . stat_offset ;
data [ i + + ] = ( i40e_gstrings_stats [ j ] . sizeof_stat = =
sizeof ( u64 ) ) ? * ( u64 * ) p : * ( u32 * ) p ;
}
for ( j = 0 ; j < I40E_MAX_USER_PRIORITY ; j + + ) {
data [ i + + ] = pf - > stats . priority_xon_tx [ j ] ;
data [ i + + ] = pf - > stats . priority_xoff_tx [ j ] ;
}
for ( j = 0 ; j < I40E_MAX_USER_PRIORITY ; j + + ) {
data [ i + + ] = pf - > stats . priority_xon_rx [ j ] ;
data [ i + + ] = pf - > stats . priority_xoff_rx [ j ] ;
}
for ( j = 0 ; j < I40E_MAX_USER_PRIORITY ; j + + )
data [ i + + ] = pf - > stats . priority_xon_2_xoff [ j ] ;
2013-09-11 08:39:56 +00:00
}
static void i40e_get_strings ( struct net_device * netdev , u32 stringset ,
u8 * data )
{
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_vsi * vsi = np - > vsi ;
struct i40e_pf * pf = vsi - > back ;
char * p = ( char * ) data ;
int i ;
switch ( stringset ) {
case ETH_SS_TEST :
for ( i = 0 ; i < I40E_TEST_LEN ; i + + ) {
memcpy ( data , i40e_gstrings_test [ i ] , ETH_GSTRING_LEN ) ;
data + = ETH_GSTRING_LEN ;
}
break ;
case ETH_SS_STATS :
for ( i = 0 ; i < I40E_NETDEV_STATS_LEN ; i + + ) {
snprintf ( p , ETH_GSTRING_LEN , " %s " ,
i40e_gstrings_net_stats [ i ] . stat_string ) ;
p + = ETH_GSTRING_LEN ;
}
2014-04-23 04:50:20 +00:00
for ( i = 0 ; i < I40E_MISC_STATS_LEN ; i + + ) {
snprintf ( p , ETH_GSTRING_LEN , " %s " ,
i40e_gstrings_misc_stats [ i ] . stat_string ) ;
p + = ETH_GSTRING_LEN ;
}
2014-08-01 20:27:03 +00:00
# ifdef I40E_FCOE
for ( i = 0 ; i < I40E_FCOE_STATS_LEN ; i + + ) {
snprintf ( p , ETH_GSTRING_LEN , " %s " ,
i40e_gstrings_fcoe_stats [ i ] . stat_string ) ;
p + = ETH_GSTRING_LEN ;
}
# endif
2013-09-11 08:39:56 +00:00
for ( i = 0 ; i < vsi - > num_queue_pairs ; i + + ) {
snprintf ( p , ETH_GSTRING_LEN , " tx-%u.tx_packets " , i ) ;
p + = ETH_GSTRING_LEN ;
snprintf ( p , ETH_GSTRING_LEN , " tx-%u.tx_bytes " , i ) ;
p + = ETH_GSTRING_LEN ;
snprintf ( p , ETH_GSTRING_LEN , " rx-%u.rx_packets " , i ) ;
p + = ETH_GSTRING_LEN ;
snprintf ( p , ETH_GSTRING_LEN , " rx-%u.rx_bytes " , i ) ;
p + = ETH_GSTRING_LEN ;
}
2014-04-23 04:49:55 +00:00
if ( vsi ! = pf - > vsi [ pf - > lan_vsi ] )
return ;
if ( pf - > lan_veb ! = I40E_NO_VEB ) {
for ( i = 0 ; i < I40E_VEB_STATS_LEN ; i + + ) {
snprintf ( p , ETH_GSTRING_LEN , " veb.%s " ,
i40e_gstrings_veb_stats [ i ] . stat_string ) ;
2013-09-11 08:39:56 +00:00
p + = ETH_GSTRING_LEN ;
}
}
2014-04-23 04:49:55 +00:00
for ( i = 0 ; i < I40E_GLOBAL_STATS_LEN ; i + + ) {
snprintf ( p , ETH_GSTRING_LEN , " port.%s " ,
i40e_gstrings_stats [ i ] . stat_string ) ;
p + = ETH_GSTRING_LEN ;
}
for ( i = 0 ; i < I40E_MAX_USER_PRIORITY ; i + + ) {
snprintf ( p , ETH_GSTRING_LEN ,
" port.tx_priority_%u_xon " , i ) ;
p + = ETH_GSTRING_LEN ;
snprintf ( p , ETH_GSTRING_LEN ,
" port.tx_priority_%u_xoff " , i ) ;
p + = ETH_GSTRING_LEN ;
}
for ( i = 0 ; i < I40E_MAX_USER_PRIORITY ; i + + ) {
snprintf ( p , ETH_GSTRING_LEN ,
" port.rx_priority_%u_xon " , i ) ;
p + = ETH_GSTRING_LEN ;
snprintf ( p , ETH_GSTRING_LEN ,
" port.rx_priority_%u_xoff " , i ) ;
p + = ETH_GSTRING_LEN ;
}
for ( i = 0 ; i < I40E_MAX_USER_PRIORITY ; i + + ) {
snprintf ( p , ETH_GSTRING_LEN ,
" port.rx_priority_%u_xon_2_xoff " , i ) ;
p + = ETH_GSTRING_LEN ;
}
2013-09-11 08:39:56 +00:00
/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
break ;
}
}
static int i40e_get_ts_info ( struct net_device * dev ,
struct ethtool_ts_info * info )
{
2014-01-11 05:43:19 +00:00
struct i40e_pf * pf = i40e_netdev_to_pf ( dev ) ;
i40e: don't enable PTP support on more than one PF per port
Resolve an issue related to images with multiple PFs per physical
port. We cannot fully support 1588 PTP features, since only one port
should control (ie: write) the registers at a time. Doing so can cause
interference of functionality.
It may be possible to partially implement the API for only those
features without side effects. However, this at minimum means non
controlling PFs lose Tx timestamps, frequency atunement, and possibly
SYSTIME adjustment. There may be further impact I did not discover.
Since the API in the kernel expects these features to work, it is
simpler and less dangerous to just disable PTP features on all PFs not
identified as the controlling PF in PRTTSYN_CTL0.PF_ID.
This change also removes the warning printed when hwtstaml IOCTL is
called on the wrong PF. This is actually meaningless now, since only one
PF per port will support it. In addition, the ethtool get_ts_info IOCTL
was updated so that only the controlling port will even indicate support
(so as not to confuse users).
The overall downside is complete loss of functionality on non
controlling PF, vs the possible gain of partial support. The biggest
factor for choosing this approach is simplicity and ensuring that the
main PF will work. There could easily be other portions of the 1588
logic with side effects I am not aware, and the reduced functionality
that might be made available is significantly less useful. In addition,
the API does not allow for proper indication of why particular features
are not supported. These reasons are enough to decide for the simpler
approach to resolving this issue.
Change-ID: If4696bae686fc18aef6552b67dd417213d987c16
Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Jim Young <jamesx.m.young@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2014-11-11 20:05:58 +00:00
/* only report HW timestamping if PTP is enabled */
if ( ! ( pf - > flags & I40E_FLAG_PTP ) )
return ethtool_op_get_ts_info ( dev , info ) ;
2014-01-11 05:43:19 +00:00
info - > so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
SOF_TIMESTAMPING_SOFTWARE |
SOF_TIMESTAMPING_TX_HARDWARE |
SOF_TIMESTAMPING_RX_HARDWARE |
SOF_TIMESTAMPING_RAW_HARDWARE ;
if ( pf - > ptp_clock )
info - > phc_index = ptp_clock_index ( pf - > ptp_clock ) ;
else
info - > phc_index = - 1 ;
info - > tx_types = ( 1 < < HWTSTAMP_TX_OFF ) | ( 1 < < HWTSTAMP_TX_ON ) ;
info - > rx_filters = ( 1 < < HWTSTAMP_FILTER_NONE ) |
( 1 < < HWTSTAMP_FILTER_PTP_V1_L4_SYNC ) |
( 1 < < HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ ) |
( 1 < < HWTSTAMP_FILTER_PTP_V2_EVENT ) |
( 1 < < HWTSTAMP_FILTER_PTP_V2_L2_EVENT ) |
( 1 < < HWTSTAMP_FILTER_PTP_V2_L4_EVENT ) |
( 1 < < HWTSTAMP_FILTER_PTP_V2_SYNC ) |
( 1 < < HWTSTAMP_FILTER_PTP_V2_L2_SYNC ) |
( 1 < < HWTSTAMP_FILTER_PTP_V2_L4_SYNC ) |
( 1 < < HWTSTAMP_FILTER_PTP_V2_DELAY_REQ ) |
( 1 < < HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ ) |
( 1 < < HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ ) ;
return 0 ;
2013-09-11 08:39:56 +00:00
}
2013-11-20 10:02:59 +00:00
static int i40e_link_test ( struct net_device * netdev , u64 * data )
2013-09-11 08:39:56 +00:00
{
2013-11-20 10:02:59 +00:00
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_pf * pf = np - > vsi - > back ;
2013-11-20 10:03:06 +00:00
netif_info ( pf , hw , netdev , " link test \n " ) ;
2013-09-11 08:39:56 +00:00
if ( i40e_get_link_status ( & pf - > hw ) )
* data = 0 ;
else
* data = 1 ;
return * data ;
}
2013-11-20 10:02:59 +00:00
static int i40e_reg_test ( struct net_device * netdev , u64 * data )
2013-09-11 08:39:56 +00:00
{
2013-11-20 10:02:59 +00:00
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_pf * pf = np - > vsi - > back ;
2013-09-11 08:39:56 +00:00
2013-11-20 10:03:06 +00:00
netif_info ( pf , hw , netdev , " register test \n " ) ;
2013-11-20 10:02:59 +00:00
* data = i40e_diag_reg_test ( & pf - > hw ) ;
2013-09-11 08:39:56 +00:00
2013-11-20 10:02:59 +00:00
return * data ;
2013-09-11 08:39:56 +00:00
}
2013-11-20 10:02:59 +00:00
static int i40e_eeprom_test ( struct net_device * netdev , u64 * data )
2013-09-11 08:39:56 +00:00
{
2013-11-20 10:02:59 +00:00
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_pf * pf = np - > vsi - > back ;
2013-09-11 08:39:56 +00:00
2013-11-20 10:03:06 +00:00
netif_info ( pf , hw , netdev , " eeprom test \n " ) ;
2013-11-20 10:02:59 +00:00
* data = i40e_diag_eeprom_test ( & pf - > hw ) ;
2013-09-11 08:39:56 +00:00
2014-11-13 08:23:12 +00:00
/* forcebly clear the NVM Update state machine */
pf - > hw . nvmupd_state = I40E_NVMUPD_STATE_INIT ;
2013-11-20 10:02:59 +00:00
return * data ;
2013-09-11 08:39:56 +00:00
}
2013-11-20 10:02:59 +00:00
static int i40e_intr_test ( struct net_device * netdev , u64 * data )
2013-09-11 08:39:56 +00:00
{
2013-11-20 10:02:59 +00:00
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_pf * pf = np - > vsi - > back ;
2013-11-16 10:00:44 +00:00
u16 swc_old = pf - > sw_int_count ;
2013-11-20 10:03:06 +00:00
netif_info ( pf , hw , netdev , " interrupt test \n " ) ;
2013-11-16 10:00:44 +00:00
wr32 ( & pf - > hw , I40E_PFINT_DYN_CTL0 ,
( I40E_PFINT_DYN_CTL0_INTENA_MASK |
2014-11-11 20:04:35 +00:00
I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK |
I40E_PFINT_DYN_CTL0_ITR_INDX_MASK |
I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK |
I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK ) ) ;
2013-11-16 10:00:44 +00:00
usleep_range ( 1000 , 2000 ) ;
* data = ( swc_old = = pf - > sw_int_count ) ;
2013-09-11 08:39:56 +00:00
return * data ;
}
2013-11-20 10:02:59 +00:00
static int i40e_loopback_test ( struct net_device * netdev , u64 * data )
2013-09-11 08:39:56 +00:00
{
2013-11-20 10:03:06 +00:00
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_pf * pf = np - > vsi - > back ;
netif_info ( pf , hw , netdev , " loopback test not implemented \n " ) ;
2013-11-16 10:00:44 +00:00
* data = 0 ;
2013-09-11 08:39:56 +00:00
return * data ;
}
static void i40e_diag_test ( struct net_device * netdev ,
struct ethtool_test * eth_test , u64 * data )
{
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_pf * pf = np - > vsi - > back ;
if ( eth_test - > flags = = ETH_TEST_FL_OFFLINE ) {
/* Offline tests */
2013-11-20 10:03:06 +00:00
netif_info ( pf , drv , netdev , " offline testing starting \n " ) ;
2013-09-11 08:39:56 +00:00
2013-11-26 10:49:12 +00:00
set_bit ( __I40E_TESTING , & pf - > state ) ;
2013-09-11 08:39:56 +00:00
/* Link test performed before hardware reset
* so autoneg doesn ' t interfere with test result
*/
2013-11-20 10:02:59 +00:00
if ( i40e_link_test ( netdev , & data [ I40E_ETH_TEST_LINK ] ) )
2013-09-11 08:39:56 +00:00
eth_test - > flags | = ETH_TEST_FL_FAILED ;
2013-11-20 10:02:59 +00:00
if ( i40e_eeprom_test ( netdev , & data [ I40E_ETH_TEST_EEPROM ] ) )
2013-09-11 08:39:56 +00:00
eth_test - > flags | = ETH_TEST_FL_FAILED ;
2013-11-20 10:02:59 +00:00
if ( i40e_intr_test ( netdev , & data [ I40E_ETH_TEST_INTR ] ) )
2013-09-11 08:39:56 +00:00
eth_test - > flags | = ETH_TEST_FL_FAILED ;
2013-11-20 10:02:59 +00:00
if ( i40e_loopback_test ( netdev , & data [ I40E_ETH_TEST_LOOPBACK ] ) )
2013-09-11 08:39:56 +00:00
eth_test - > flags | = ETH_TEST_FL_FAILED ;
2013-11-26 10:49:12 +00:00
/* run reg test last, a reset is required after it */
if ( i40e_reg_test ( netdev , & data [ I40E_ETH_TEST_REG ] ) )
eth_test - > flags | = ETH_TEST_FL_FAILED ;
clear_bit ( __I40E_TESTING , & pf - > state ) ;
i40e_do_reset ( pf , ( 1 < < __I40E_PF_RESET_REQUESTED ) ) ;
2013-09-11 08:39:56 +00:00
} else {
/* Online tests */
2013-11-20 10:03:06 +00:00
netif_info ( pf , drv , netdev , " online testing starting \n " ) ;
2013-11-20 10:02:59 +00:00
if ( i40e_link_test ( netdev , & data [ I40E_ETH_TEST_LINK ] ) )
2013-09-11 08:39:56 +00:00
eth_test - > flags | = ETH_TEST_FL_FAILED ;
/* Offline only tests, not run in online; pass by default */
data [ I40E_ETH_TEST_REG ] = 0 ;
data [ I40E_ETH_TEST_EEPROM ] = 0 ;
data [ I40E_ETH_TEST_INTR ] = 0 ;
data [ I40E_ETH_TEST_LOOPBACK ] = 0 ;
}
2013-11-20 10:02:58 +00:00
2013-11-20 10:03:06 +00:00
netif_info ( pf , drv , netdev , " testing finished \n " ) ;
2013-09-11 08:39:56 +00:00
}
static void i40e_get_wol ( struct net_device * netdev ,
struct ethtool_wolinfo * wol )
{
2013-11-28 06:39:22 +00:00
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_pf * pf = np - > vsi - > back ;
struct i40e_hw * hw = & pf - > hw ;
u16 wol_nvm_bits ;
/* NVM bit on means WoL disabled for the port */
i40e_read_nvm_word ( hw , I40E_SR_NVM_WAKE_ON_LAN , & wol_nvm_bits ) ;
2014-12-11 07:06:32 +00:00
if ( ( 1 < < hw - > port ) & wol_nvm_bits | | hw - > partition_id ! = 1 ) {
2013-11-28 06:39:22 +00:00
wol - > supported = 0 ;
wol - > wolopts = 0 ;
} else {
wol - > supported = WAKE_MAGIC ;
wol - > wolopts = ( pf - > wol_en ? WAKE_MAGIC : 0 ) ;
}
}
2014-12-11 07:06:32 +00:00
/**
* i40e_set_wol - set the WakeOnLAN configuration
* @ netdev : the netdev in question
* @ wol : the ethtool WoL setting data
* */
2013-11-28 06:39:22 +00:00
static int i40e_set_wol ( struct net_device * netdev , struct ethtool_wolinfo * wol )
{
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_pf * pf = np - > vsi - > back ;
2014-12-11 07:06:32 +00:00
struct i40e_vsi * vsi = np - > vsi ;
2013-11-28 06:39:22 +00:00
struct i40e_hw * hw = & pf - > hw ;
u16 wol_nvm_bits ;
2014-12-11 07:06:32 +00:00
/* WoL not supported if this isn't the controlling PF on the port */
if ( hw - > partition_id ! = 1 ) {
i40e_partition_setting_complaint ( pf ) ;
return - EOPNOTSUPP ;
}
if ( vsi ! = pf - > vsi [ pf - > lan_vsi ] )
return - EOPNOTSUPP ;
2013-11-28 06:39:22 +00:00
/* NVM bit on means WoL disabled for the port */
i40e_read_nvm_word ( hw , I40E_SR_NVM_WAKE_ON_LAN , & wol_nvm_bits ) ;
if ( ( ( 1 < < hw - > port ) & wol_nvm_bits ) )
return - EOPNOTSUPP ;
/* only magic packet is supported */
if ( wol - > wolopts & & ( wol - > wolopts ! = WAKE_MAGIC ) )
return - EOPNOTSUPP ;
/* is this a new value? */
if ( pf - > wol_en ! = ! ! wol - > wolopts ) {
pf - > wol_en = ! ! wol - > wolopts ;
device_set_wakeup_enable ( & pf - > pdev - > dev , pf - > wol_en ) ;
}
2013-09-11 08:39:56 +00:00
return 0 ;
}
static int i40e_set_phys_id ( struct net_device * netdev ,
enum ethtool_phys_id_state state )
{
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_pf * pf = np - > vsi - > back ;
struct i40e_hw * hw = & pf - > hw ;
int blink_freq = 2 ;
switch ( state ) {
case ETHTOOL_ID_ACTIVE :
pf - > led_status = i40e_led_get ( hw ) ;
return blink_freq ;
case ETHTOOL_ID_ON :
2013-11-28 06:39:33 +00:00
i40e_led_set ( hw , 0xF , false ) ;
2013-09-11 08:39:56 +00:00
break ;
case ETHTOOL_ID_OFF :
2013-11-28 06:39:33 +00:00
i40e_led_set ( hw , 0x0 , false ) ;
2013-09-11 08:39:56 +00:00
break ;
case ETHTOOL_ID_INACTIVE :
2013-11-28 06:39:33 +00:00
i40e_led_set ( hw , pf - > led_status , false ) ;
2013-09-11 08:39:56 +00:00
break ;
}
return 0 ;
}
/* NOTE: i40e hardware uses a conversion factor of 2 for Interrupt
* Throttle Rate ( ITR ) ie . ITR ( 1 ) = 2u s ITR ( 10 ) = 20 us , and also
* 125u s ( 8000 interrupts per second ) = = ITR ( 62 )
*/
static int i40e_get_coalesce ( struct net_device * netdev ,
struct ethtool_coalesce * ec )
{
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_vsi * vsi = np - > vsi ;
ec - > tx_max_coalesced_frames_irq = vsi - > work_limit ;
ec - > rx_max_coalesced_frames_irq = vsi - > work_limit ;
if ( ITR_IS_DYNAMIC ( vsi - > rx_itr_setting ) )
2014-04-04 04:43:10 +00:00
ec - > use_adaptive_rx_coalesce = 1 ;
2013-09-11 08:39:56 +00:00
if ( ITR_IS_DYNAMIC ( vsi - > tx_itr_setting ) )
2014-04-04 04:43:10 +00:00
ec - > use_adaptive_tx_coalesce = 1 ;
ec - > rx_coalesce_usecs = vsi - > rx_itr_setting & ~ I40E_ITR_DYNAMIC ;
ec - > tx_coalesce_usecs = vsi - > tx_itr_setting & ~ I40E_ITR_DYNAMIC ;
2013-09-11 08:39:56 +00:00
return 0 ;
}
static int i40e_set_coalesce ( struct net_device * netdev ,
struct ethtool_coalesce * ec )
{
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_q_vector * q_vector ;
struct i40e_vsi * vsi = np - > vsi ;
struct i40e_pf * pf = vsi - > back ;
struct i40e_hw * hw = & pf - > hw ;
u16 vector ;
int i ;
if ( ec - > tx_max_coalesced_frames_irq | | ec - > rx_max_coalesced_frames_irq )
vsi - > work_limit = ec - > tx_max_coalesced_frames_irq ;
2014-06-04 01:23:18 +00:00
vector = vsi - > base_vector ;
2014-04-04 04:43:10 +00:00
if ( ( ec - > rx_coalesce_usecs > = ( I40E_MIN_ITR < < 1 ) ) & &
2014-06-04 01:23:18 +00:00
( ec - > rx_coalesce_usecs < = ( I40E_MAX_ITR < < 1 ) ) ) {
2013-09-11 08:39:56 +00:00
vsi - > rx_itr_setting = ec - > rx_coalesce_usecs ;
2014-06-04 01:23:18 +00:00
} else if ( ec - > rx_coalesce_usecs = = 0 ) {
vsi - > rx_itr_setting = ec - > rx_coalesce_usecs ;
if ( ec - > use_adaptive_rx_coalesce )
2014-10-25 03:24:32 +00:00
netif_info ( pf , drv , netdev , " rx-usecs=0, need to disable adaptive-rx for a complete disable \n " ) ;
2014-06-04 01:23:18 +00:00
} else {
2014-10-25 03:24:32 +00:00
netif_info ( pf , drv , netdev , " Invalid value, rx-usecs range is 0-8160 \n " ) ;
2014-04-04 04:43:10 +00:00
return - EINVAL ;
2014-06-04 01:23:18 +00:00
}
2013-09-11 08:39:56 +00:00
2014-04-04 04:43:10 +00:00
if ( ( ec - > tx_coalesce_usecs > = ( I40E_MIN_ITR < < 1 ) ) & &
2014-06-04 01:23:18 +00:00
( ec - > tx_coalesce_usecs < = ( I40E_MAX_ITR < < 1 ) ) ) {
2013-09-11 08:39:56 +00:00
vsi - > tx_itr_setting = ec - > tx_coalesce_usecs ;
2014-06-04 01:23:18 +00:00
} else if ( ec - > tx_coalesce_usecs = = 0 ) {
vsi - > tx_itr_setting = ec - > tx_coalesce_usecs ;
if ( ec - > use_adaptive_tx_coalesce )
2014-10-25 03:24:32 +00:00
netif_info ( pf , drv , netdev , " tx-usecs=0, need to disable adaptive-tx for a complete disable \n " ) ;
2014-06-04 01:23:18 +00:00
} else {
netif_info ( pf , drv , netdev ,
2014-10-25 03:24:32 +00:00
" Invalid value, tx-usecs range is 0-8160 \n " ) ;
2014-04-04 04:43:10 +00:00
return - EINVAL ;
2014-06-04 01:23:18 +00:00
}
2014-04-04 04:43:10 +00:00
if ( ec - > use_adaptive_rx_coalesce )
vsi - > rx_itr_setting | = I40E_ITR_DYNAMIC ;
else
vsi - > rx_itr_setting & = ~ I40E_ITR_DYNAMIC ;
if ( ec - > use_adaptive_tx_coalesce )
vsi - > tx_itr_setting | = I40E_ITR_DYNAMIC ;
else
vsi - > tx_itr_setting & = ~ I40E_ITR_DYNAMIC ;
2013-09-11 08:39:56 +00:00
2013-09-28 07:01:44 +00:00
for ( i = 0 ; i < vsi - > num_q_vectors ; i + + , vector + + ) {
q_vector = vsi - > q_vectors [ i ] ;
2013-09-11 08:39:56 +00:00
q_vector - > rx . itr = ITR_TO_REG ( vsi - > rx_itr_setting ) ;
wr32 ( hw , I40E_PFINT_ITRN ( 0 , vector - 1 ) , q_vector - > rx . itr ) ;
q_vector - > tx . itr = ITR_TO_REG ( vsi - > tx_itr_setting ) ;
wr32 ( hw , I40E_PFINT_ITRN ( 1 , vector - 1 ) , q_vector - > tx . itr ) ;
i40e_flush ( hw ) ;
}
return 0 ;
}
/**
* i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
* @ pf : pointer to the physical function struct
* @ cmd : ethtool rxnfc command
*
* Returns Success if the flow is supported , else Invalid Input .
* */
static int i40e_get_rss_hash_opts ( struct i40e_pf * pf , struct ethtool_rxnfc * cmd )
{
cmd - > data = 0 ;
/* Report default options for RSS on i40e */
switch ( cmd - > flow_type ) {
case TCP_V4_FLOW :
case UDP_V4_FLOW :
cmd - > data | = RXH_L4_B_0_1 | RXH_L4_B_2_3 ;
/* fall through to add IP fields */
case SCTP_V4_FLOW :
case AH_ESP_V4_FLOW :
case AH_V4_FLOW :
case ESP_V4_FLOW :
case IPV4_FLOW :
cmd - > data | = RXH_IP_SRC | RXH_IP_DST ;
break ;
case TCP_V6_FLOW :
case UDP_V6_FLOW :
cmd - > data | = RXH_L4_B_0_1 | RXH_L4_B_2_3 ;
/* fall through to add IP fields */
case SCTP_V6_FLOW :
case AH_ESP_V6_FLOW :
case AH_V6_FLOW :
case ESP_V6_FLOW :
case IPV6_FLOW :
cmd - > data | = RXH_IP_SRC | RXH_IP_DST ;
break ;
default :
return - EINVAL ;
}
return 0 ;
}
2014-02-12 01:45:30 +00:00
/**
* i40e_get_ethtool_fdir_all - Populates the rule count of a command
* @ pf : Pointer to the physical function struct
* @ cmd : The command to get or set Rx flow classification rules
* @ rule_locs : Array of used rule locations
*
* This function populates both the total and actual rule count of
* the ethtool flow classification command
*
* Returns 0 on success or - EMSGSIZE if entry not found
* */
static int i40e_get_ethtool_fdir_all ( struct i40e_pf * pf ,
struct ethtool_rxnfc * cmd ,
u32 * rule_locs )
{
struct i40e_fdir_filter * rule ;
struct hlist_node * node2 ;
int cnt = 0 ;
/* report total rule count */
2014-04-09 05:59:00 +00:00
cmd - > data = i40e_get_fd_cnt_all ( pf ) ;
2014-02-12 01:45:30 +00:00
hlist_for_each_entry_safe ( rule , node2 ,
& pf - > fdir_filter_list , fdir_node ) {
if ( cnt = = cmd - > rule_cnt )
return - EMSGSIZE ;
rule_locs [ cnt ] = rule - > fd_id ;
cnt + + ;
}
cmd - > rule_cnt = cnt ;
return 0 ;
}
/**
* i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow
* @ pf : Pointer to the physical function struct
* @ cmd : The command to get or set Rx flow classification rules
*
* This function looks up a filter based on the Rx flow classification
* command and fills the flow spec info for it if found
*
* Returns 0 on success or - EINVAL if filter not found
* */
static int i40e_get_ethtool_fdir_entry ( struct i40e_pf * pf ,
struct ethtool_rxnfc * cmd )
{
struct ethtool_rx_flow_spec * fsp =
( struct ethtool_rx_flow_spec * ) & cmd - > fs ;
struct i40e_fdir_filter * rule = NULL ;
struct hlist_node * node2 ;
hlist_for_each_entry_safe ( rule , node2 ,
& pf - > fdir_filter_list , fdir_node ) {
if ( fsp - > location < = rule - > fd_id )
break ;
}
if ( ! rule | | fsp - > location ! = rule - > fd_id )
return - EINVAL ;
fsp - > flow_type = rule - > flow_type ;
2014-03-14 07:32:21 +00:00
if ( fsp - > flow_type = = IP_USER_FLOW ) {
fsp - > h_u . usr_ip4_spec . ip_ver = ETH_RX_NFC_IP4 ;
fsp - > h_u . usr_ip4_spec . proto = 0 ;
fsp - > m_u . usr_ip4_spec . proto = 0 ;
}
2014-05-22 06:31:41 +00:00
/* Reverse the src and dest notion, since the HW views them from
* Tx perspective where as the user expects it from Rx filter view .
*/
fsp - > h_u . tcp_ip4_spec . psrc = rule - > dst_port ;
fsp - > h_u . tcp_ip4_spec . pdst = rule - > src_port ;
fsp - > h_u . tcp_ip4_spec . ip4src = rule - > dst_ip [ 0 ] ;
fsp - > h_u . tcp_ip4_spec . ip4dst = rule - > src_ip [ 0 ] ;
2014-05-22 06:32:23 +00:00
if ( rule - > dest_ctl = = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET )
fsp - > ring_cookie = RX_CLS_FLOW_DISC ;
else
fsp - > ring_cookie = rule - > q_index ;
2014-02-12 01:45:30 +00:00
return 0 ;
}
2013-09-11 08:39:56 +00:00
/**
* i40e_get_rxnfc - command to get RX flow classification rules
* @ netdev : network interface device structure
* @ cmd : ethtool rxnfc command
*
* Returns Success if the command is supported .
* */
static int i40e_get_rxnfc ( struct net_device * netdev , struct ethtool_rxnfc * cmd ,
u32 * rule_locs )
{
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_vsi * vsi = np - > vsi ;
struct i40e_pf * pf = vsi - > back ;
int ret = - EOPNOTSUPP ;
switch ( cmd - > cmd ) {
case ETHTOOL_GRXRINGS :
cmd - > data = vsi - > alloc_queue_pairs ;
ret = 0 ;
break ;
case ETHTOOL_GRXFH :
ret = i40e_get_rss_hash_opts ( pf , cmd ) ;
break ;
case ETHTOOL_GRXCLSRLCNT :
2014-02-12 01:45:30 +00:00
cmd - > rule_cnt = pf - > fdir_pf_active_filters ;
2014-04-09 05:59:00 +00:00
/* report total rule count */
cmd - > data = i40e_get_fd_cnt_all ( pf ) ;
2013-09-11 08:39:56 +00:00
ret = 0 ;
break ;
case ETHTOOL_GRXCLSRULE :
2014-02-12 01:45:30 +00:00
ret = i40e_get_ethtool_fdir_entry ( pf , cmd ) ;
2013-09-11 08:39:56 +00:00
break ;
case ETHTOOL_GRXCLSRLALL :
2014-02-12 01:45:30 +00:00
ret = i40e_get_ethtool_fdir_all ( pf , cmd , rule_locs ) ;
break ;
2013-09-11 08:39:56 +00:00
default :
break ;
}
return ret ;
}
/**
* i40e_set_rss_hash_opt - Enable / Disable flow types for RSS hash
* @ pf : pointer to the physical function struct
* @ cmd : ethtool rxnfc command
*
* Returns Success if the flow input set is supported .
* */
static int i40e_set_rss_hash_opt ( struct i40e_pf * pf , struct ethtool_rxnfc * nfc )
{
struct i40e_hw * hw = & pf - > hw ;
u64 hena = ( u64 ) rd32 ( hw , I40E_PFQF_HENA ( 0 ) ) |
( ( u64 ) rd32 ( hw , I40E_PFQF_HENA ( 1 ) ) < < 32 ) ;
/* RSS does not support anything other than hashing
* to queues on src and dst IPs and ports
*/
if ( nfc - > data & ~ ( RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3 ) )
return - EINVAL ;
/* We need at least the IP SRC and DEST fields for hashing */
if ( ! ( nfc - > data & RXH_IP_SRC ) | |
! ( nfc - > data & RXH_IP_DST ) )
return - EINVAL ;
switch ( nfc - > flow_type ) {
case TCP_V4_FLOW :
switch ( nfc - > data & ( RXH_L4_B_0_1 | RXH_L4_B_2_3 ) ) {
case 0 :
hena & = ~ ( ( u64 ) 1 < < I40E_FILTER_PCTYPE_NONF_IPV4_TCP ) ;
break ;
case ( RXH_L4_B_0_1 | RXH_L4_B_2_3 ) :
hena | = ( ( u64 ) 1 < < I40E_FILTER_PCTYPE_NONF_IPV4_TCP ) ;
break ;
default :
return - EINVAL ;
}
break ;
case TCP_V6_FLOW :
switch ( nfc - > data & ( RXH_L4_B_0_1 | RXH_L4_B_2_3 ) ) {
case 0 :
hena & = ~ ( ( u64 ) 1 < < I40E_FILTER_PCTYPE_NONF_IPV6_TCP ) ;
break ;
case ( RXH_L4_B_0_1 | RXH_L4_B_2_3 ) :
hena | = ( ( u64 ) 1 < < I40E_FILTER_PCTYPE_NONF_IPV6_TCP ) ;
break ;
default :
return - EINVAL ;
}
break ;
case UDP_V4_FLOW :
switch ( nfc - > data & ( RXH_L4_B_0_1 | RXH_L4_B_2_3 ) ) {
case 0 :
2014-04-09 05:58:59 +00:00
hena & = ~ ( ( ( u64 ) 1 < < I40E_FILTER_PCTYPE_NONF_IPV4_UDP ) |
( ( u64 ) 1 < < I40E_FILTER_PCTYPE_FRAG_IPV4 ) ) ;
2013-09-11 08:39:56 +00:00
break ;
case ( RXH_L4_B_0_1 | RXH_L4_B_2_3 ) :
2014-04-09 05:58:59 +00:00
hena | = ( ( ( u64 ) 1 < < I40E_FILTER_PCTYPE_NONF_IPV4_UDP ) |
( ( u64 ) 1 < < I40E_FILTER_PCTYPE_FRAG_IPV4 ) ) ;
2013-09-11 08:39:56 +00:00
break ;
default :
return - EINVAL ;
}
break ;
case UDP_V6_FLOW :
switch ( nfc - > data & ( RXH_L4_B_0_1 | RXH_L4_B_2_3 ) ) {
case 0 :
2014-04-09 05:58:59 +00:00
hena & = ~ ( ( ( u64 ) 1 < < I40E_FILTER_PCTYPE_NONF_IPV6_UDP ) |
( ( u64 ) 1 < < I40E_FILTER_PCTYPE_FRAG_IPV6 ) ) ;
2013-09-11 08:39:56 +00:00
break ;
case ( RXH_L4_B_0_1 | RXH_L4_B_2_3 ) :
2014-04-09 05:58:59 +00:00
hena | = ( ( ( u64 ) 1 < < I40E_FILTER_PCTYPE_NONF_IPV6_UDP ) |
( ( u64 ) 1 < < I40E_FILTER_PCTYPE_FRAG_IPV6 ) ) ;
2013-09-11 08:39:56 +00:00
break ;
default :
return - EINVAL ;
}
break ;
case AH_ESP_V4_FLOW :
case AH_V4_FLOW :
case ESP_V4_FLOW :
case SCTP_V4_FLOW :
if ( ( nfc - > data & RXH_L4_B_0_1 ) | |
( nfc - > data & RXH_L4_B_2_3 ) )
return - EINVAL ;
hena | = ( ( u64 ) 1 < < I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ) ;
break ;
case AH_ESP_V6_FLOW :
case AH_V6_FLOW :
case ESP_V6_FLOW :
case SCTP_V6_FLOW :
if ( ( nfc - > data & RXH_L4_B_0_1 ) | |
( nfc - > data & RXH_L4_B_2_3 ) )
return - EINVAL ;
hena | = ( ( u64 ) 1 < < I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ) ;
break ;
case IPV4_FLOW :
hena | = ( ( u64 ) 1 < < I40E_FILTER_PCTYPE_NONF_IPV4_OTHER ) |
( ( u64 ) 1 < < I40E_FILTER_PCTYPE_FRAG_IPV4 ) ;
break ;
case IPV6_FLOW :
hena | = ( ( u64 ) 1 < < I40E_FILTER_PCTYPE_NONF_IPV6_OTHER ) |
( ( u64 ) 1 < < I40E_FILTER_PCTYPE_FRAG_IPV6 ) ;
break ;
default :
return - EINVAL ;
}
wr32 ( hw , I40E_PFQF_HENA ( 0 ) , ( u32 ) hena ) ;
wr32 ( hw , I40E_PFQF_HENA ( 1 ) , ( u32 ) ( hena > > 32 ) ) ;
i40e_flush ( hw ) ;
return 0 ;
}
2014-02-11 08:24:09 +00:00
/**
* i40e_match_fdir_input_set - Match a new filter against an existing one
* @ rule : The filter already added
* @ input : The new filter to comapre against
*
* Returns true if the two input set match
* */
static bool i40e_match_fdir_input_set ( struct i40e_fdir_filter * rule ,
struct i40e_fdir_filter * input )
{
if ( ( rule - > dst_ip [ 0 ] ! = input - > dst_ip [ 0 ] ) | |
( rule - > src_ip [ 0 ] ! = input - > src_ip [ 0 ] ) | |
( rule - > dst_port ! = input - > dst_port ) | |
( rule - > src_port ! = input - > src_port ) )
return false ;
return true ;
}
2013-09-11 08:39:56 +00:00
/**
2014-02-12 01:45:30 +00:00
* i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
* @ vsi : Pointer to the targeted VSI
* @ input : The filter to update or NULL to indicate deletion
* @ sw_idx : Software index to the filter
* @ cmd : The command to get or set Rx flow classification rules
2013-09-11 08:39:56 +00:00
*
2014-02-12 01:45:30 +00:00
* This function updates ( or deletes ) a Flow Director entry from
* the hlist of the corresponding PF
*
* Returns 0 on success
2013-09-11 08:39:56 +00:00
* */
2014-02-12 01:45:30 +00:00
static int i40e_update_ethtool_fdir_entry ( struct i40e_vsi * vsi ,
struct i40e_fdir_filter * input ,
u16 sw_idx ,
struct ethtool_rxnfc * cmd )
2013-09-11 08:39:56 +00:00
{
2014-02-12 01:45:30 +00:00
struct i40e_fdir_filter * rule , * parent ;
2013-09-11 08:39:56 +00:00
struct i40e_pf * pf = vsi - > back ;
2014-02-12 01:45:30 +00:00
struct hlist_node * node2 ;
int err = - EINVAL ;
2013-09-11 08:39:56 +00:00
2014-02-12 01:45:30 +00:00
parent = NULL ;
rule = NULL ;
2013-09-11 08:39:56 +00:00
2014-02-12 01:45:30 +00:00
hlist_for_each_entry_safe ( rule , node2 ,
& pf - > fdir_filter_list , fdir_node ) {
/* hash found, or no matching entry */
if ( rule - > fd_id > = sw_idx )
break ;
parent = rule ;
2013-09-11 08:39:56 +00:00
}
2014-02-12 01:45:30 +00:00
/* if there is an old rule occupying our place remove it */
if ( rule & & ( rule - > fd_id = = sw_idx ) ) {
2014-02-11 08:24:09 +00:00
if ( input & & ! i40e_match_fdir_input_set ( rule , input ) )
err = i40e_add_del_fdir ( vsi , rule , false ) ;
else if ( ! input )
err = i40e_add_del_fdir ( vsi , rule , false ) ;
2014-02-12 01:45:30 +00:00
hlist_del ( & rule - > fdir_node ) ;
kfree ( rule ) ;
pf - > fdir_pf_active_filters - - ;
2014-01-17 23:36:35 +00:00
}
2014-02-12 01:45:30 +00:00
/* If no input this was a delete, err should be 0 if a rule was
* successfully found and removed from the list else - EINVAL
*/
if ( ! input )
return err ;
2013-09-11 08:39:56 +00:00
2014-02-12 01:45:30 +00:00
/* initialize node and set software index */
INIT_HLIST_NODE ( & input - > fdir_node ) ;
2013-09-11 08:39:56 +00:00
2014-02-12 01:45:30 +00:00
/* add filter to the list */
if ( parent )
2014-08-06 23:09:16 +00:00
hlist_add_behind ( & input - > fdir_node , & parent - > fdir_node ) ;
2014-02-12 01:45:30 +00:00
else
hlist_add_head ( & input - > fdir_node ,
& pf - > fdir_filter_list ) ;
2013-09-11 08:39:56 +00:00
2014-02-12 01:45:30 +00:00
/* update counts */
pf - > fdir_pf_active_filters + + ;
2013-09-11 08:39:56 +00:00
2014-02-12 01:45:30 +00:00
return 0 ;
2013-09-11 08:39:56 +00:00
}
/**
2014-02-12 01:45:30 +00:00
* i40e_del_fdir_entry - Deletes a Flow Director filter entry
* @ vsi : Pointer to the targeted VSI
* @ cmd : The command to get or set Rx flow classification rules
2013-09-11 08:39:56 +00:00
*
2014-02-12 01:45:30 +00:00
* The function removes a Flow Director filter entry from the
* hlist of the corresponding PF
2013-09-11 08:39:56 +00:00
*
2014-02-12 01:45:30 +00:00
* Returns 0 on success
*/
static int i40e_del_fdir_entry ( struct i40e_vsi * vsi ,
struct ethtool_rxnfc * cmd )
2013-09-11 08:39:56 +00:00
{
2014-02-12 01:45:30 +00:00
struct ethtool_rx_flow_spec * fsp =
( struct ethtool_rx_flow_spec * ) & cmd - > fs ;
2013-09-11 08:39:56 +00:00
struct i40e_pf * pf = vsi - > back ;
2014-02-12 01:45:30 +00:00
int ret = 0 ;
2013-09-11 08:39:56 +00:00
2014-07-09 07:46:23 +00:00
if ( test_bit ( __I40E_RESET_RECOVERY_PENDING , & pf - > state ) | |
test_bit ( __I40E_RESET_INTR_RECEIVED , & pf - > state ) )
return - EBUSY ;
2014-07-10 08:03:26 +00:00
if ( test_bit ( __I40E_FD_FLUSH_REQUESTED , & pf - > state ) )
return - EBUSY ;
2014-02-12 01:45:30 +00:00
ret = i40e_update_ethtool_fdir_entry ( vsi , NULL , fsp - > location , cmd ) ;
2013-09-11 08:39:56 +00:00
2014-02-12 06:33:25 +00:00
i40e_fdir_check_and_reenable ( pf ) ;
2014-02-12 01:45:30 +00:00
return ret ;
2013-09-11 08:39:56 +00:00
}
/**
2014-03-06 08:59:59 +00:00
* i40e_add_fdir_ethtool - Add / Remove Flow Director filters
2013-09-11 08:39:56 +00:00
* @ vsi : pointer to the targeted VSI
* @ cmd : command to get or set RX flow classification rules
*
2014-03-06 08:59:59 +00:00
* Add Flow Director filters for a specific flow spec based on their
* protocol . Returns 0 if the filters were successfully added .
2013-09-11 08:39:56 +00:00
* */
2014-03-06 08:59:59 +00:00
static int i40e_add_fdir_ethtool ( struct i40e_vsi * vsi ,
struct ethtool_rxnfc * cmd )
2013-09-11 08:39:56 +00:00
{
2014-02-12 01:45:30 +00:00
struct ethtool_rx_flow_spec * fsp ;
struct i40e_fdir_filter * input ;
2013-09-11 08:39:56 +00:00
struct i40e_pf * pf ;
2014-02-12 01:45:30 +00:00
int ret = - EINVAL ;
2013-09-11 08:39:56 +00:00
if ( ! vsi )
return - EINVAL ;
pf = vsi - > back ;
2014-02-12 06:33:25 +00:00
if ( ! ( pf - > flags & I40E_FLAG_FD_SB_ENABLED ) )
return - EOPNOTSUPP ;
2014-03-06 08:59:59 +00:00
if ( pf - > auto_disable_flags & I40E_FLAG_FD_SB_ENABLED )
2014-02-12 06:33:25 +00:00
return - ENOSPC ;
2014-07-09 07:46:23 +00:00
if ( test_bit ( __I40E_RESET_RECOVERY_PENDING , & pf - > state ) | |
test_bit ( __I40E_RESET_INTR_RECEIVED , & pf - > state ) )
return - EBUSY ;
2014-07-10 08:03:26 +00:00
if ( test_bit ( __I40E_FD_FLUSH_REQUESTED , & pf - > state ) )
return - EBUSY ;
2014-02-12 06:33:25 +00:00
fsp = ( struct ethtool_rx_flow_spec * ) & cmd - > fs ;
2014-02-12 01:45:30 +00:00
if ( fsp - > location > = ( pf - > hw . func_caps . fd_filters_best_effort +
pf - > hw . func_caps . fd_filters_guaranteed ) ) {
2013-09-11 08:39:56 +00:00
return - EINVAL ;
2014-02-12 01:45:30 +00:00
}
2013-09-11 08:39:56 +00:00
2014-05-22 06:32:23 +00:00
if ( ( fsp - > ring_cookie ! = RX_CLS_FLOW_DISC ) & &
( fsp - > ring_cookie > = vsi - > num_queue_pairs ) )
2014-02-12 01:45:30 +00:00
return - EINVAL ;
2013-09-11 08:39:56 +00:00
2014-02-12 01:45:30 +00:00
input = kzalloc ( sizeof ( * input ) , GFP_KERNEL ) ;
2013-09-11 08:39:56 +00:00
2014-02-12 01:45:30 +00:00
if ( ! input )
return - ENOMEM ;
2013-09-11 08:39:56 +00:00
2014-02-12 01:45:30 +00:00
input - > fd_id = fsp - > location ;
2014-03-06 09:00:00 +00:00
if ( fsp - > ring_cookie = = RX_CLS_FLOW_DISC )
input - > dest_ctl = I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET ;
else
input - > dest_ctl =
I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX ;
2014-02-12 01:45:30 +00:00
input - > q_index = fsp - > ring_cookie ;
input - > flex_off = 0 ;
input - > pctype = 0 ;
input - > dest_vsi = vsi - > id ;
input - > fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID ;
2014-05-22 06:32:17 +00:00
input - > cnt_index = pf - > fd_sb_cnt_idx ;
2014-02-12 01:45:30 +00:00
input - > flow_type = fsp - > flow_type ;
input - > ip4_proto = fsp - > h_u . usr_ip4_spec . proto ;
2014-05-22 06:31:41 +00:00
/* Reverse the src and dest notion, since the HW expects them to be from
* Tx perspective where as the input from user is from Rx filter view .
*/
input - > dst_port = fsp - > h_u . tcp_ip4_spec . psrc ;
input - > src_port = fsp - > h_u . tcp_ip4_spec . pdst ;
input - > dst_ip [ 0 ] = fsp - > h_u . tcp_ip4_spec . ip4src ;
input - > src_ip [ 0 ] = fsp - > h_u . tcp_ip4_spec . ip4dst ;
2014-02-12 01:45:30 +00:00
2014-03-06 08:59:59 +00:00
ret = i40e_add_del_fdir ( vsi , input , true ) ;
if ( ret )
2014-02-12 01:45:30 +00:00
kfree ( input ) ;
else
2014-03-06 08:59:59 +00:00
i40e_update_ethtool_fdir_entry ( vsi , input , fsp - > location , NULL ) ;
2013-09-11 08:39:56 +00:00
return ret ;
}
2014-01-17 23:36:33 +00:00
2013-09-11 08:39:56 +00:00
/**
* i40e_set_rxnfc - command to set RX flow classification rules
* @ netdev : network interface device structure
* @ cmd : ethtool rxnfc command
*
* Returns Success if the command is supported .
* */
static int i40e_set_rxnfc ( struct net_device * netdev , struct ethtool_rxnfc * cmd )
{
struct i40e_netdev_priv * np = netdev_priv ( netdev ) ;
struct i40e_vsi * vsi = np - > vsi ;
struct i40e_pf * pf = vsi - > back ;
int ret = - EOPNOTSUPP ;
switch ( cmd - > cmd ) {
case ETHTOOL_SRXFH :
ret = i40e_set_rss_hash_opt ( pf , cmd ) ;
break ;
case ETHTOOL_SRXCLSRLINS :
2014-03-06 08:59:59 +00:00
ret = i40e_add_fdir_ethtool ( vsi , cmd ) ;
2013-09-11 08:39:56 +00:00
break ;
case ETHTOOL_SRXCLSRLDEL :
2014-02-12 01:45:30 +00:00
ret = i40e_del_fdir_entry ( vsi , cmd ) ;
2013-09-11 08:39:56 +00:00
break ;
default :
break ;
}
return ret ;
}
2013-11-26 11:59:30 +00:00
/**
* i40e_max_channels - get Max number of combined channels supported
* @ vsi : vsi pointer
* */
static unsigned int i40e_max_channels ( struct i40e_vsi * vsi )
{
/* TODO: This code assumes DCB and FD is disabled for now. */
return vsi - > alloc_queue_pairs ;
}
/**
* i40e_get_channels - Get the current channels enabled and max supported etc .
* @ netdev : network interface device structure
* @ ch : ethtool channels structure
*
* We don ' t support separate tx and rx queues as channels . The other count
* represents how many queues are being used for control . max_combined counts
* how many queue pairs we can support . They may not be mapped 1 to 1 with
* q_vectors since we support a lot more queue pairs than q_vectors .
* */
static void i40e_get_channels ( struct net_device * dev ,
struct ethtool_channels * ch )
{
struct i40e_netdev_priv * np = netdev_priv ( dev ) ;
struct i40e_vsi * vsi = np - > vsi ;
struct i40e_pf * pf = vsi - > back ;
/* report maximum channels */
ch - > max_combined = i40e_max_channels ( vsi ) ;
/* report info for other vector */
2014-01-17 23:36:34 +00:00
ch - > other_count = ( pf - > flags & I40E_FLAG_FD_SB_ENABLED ) ? 1 : 0 ;
2013-11-26 11:59:30 +00:00
ch - > max_other = ch - > other_count ;
/* Note: This code assumes DCB is disabled for now. */
ch - > combined_count = vsi - > num_queue_pairs ;
}
/**
* i40e_set_channels - Set the new channels count .
* @ netdev : network interface device structure
* @ ch : ethtool channels structure
*
* The new channels count may not be the same as requested by the user
* since it gets rounded down to a power of 2 value .
* */
static int i40e_set_channels ( struct net_device * dev ,
struct ethtool_channels * ch )
{
struct i40e_netdev_priv * np = netdev_priv ( dev ) ;
unsigned int count = ch - > combined_count ;
struct i40e_vsi * vsi = np - > vsi ;
struct i40e_pf * pf = vsi - > back ;
int new_count ;
/* We do not support setting channels for any other VSI at present */
if ( vsi - > type ! = I40E_VSI_MAIN )
return - EINVAL ;
/* verify they are not requesting separate vectors */
if ( ! count | | ch - > rx_count | | ch - > tx_count )
return - EINVAL ;
/* verify other_count has not changed */
2014-01-17 23:36:34 +00:00
if ( ch - > other_count ! = ( ( pf - > flags & I40E_FLAG_FD_SB_ENABLED ) ? 1 : 0 ) )
2013-11-26 11:59:30 +00:00
return - EINVAL ;
/* verify the number of channels does not exceed hardware limits */
if ( count > i40e_max_channels ( vsi ) )
return - EINVAL ;
/* update feature limits from largest to smallest supported values */
/* TODO: Flow director limit, DCB etc */
/* cap RSS limit */
if ( count > pf - > rss_size_max )
count = pf - > rss_size_max ;
/* use rss_reconfig to rebuild with new queue count and update traffic
* class queue mapping
*/
new_count = i40e_reconfig_rss_queues ( pf , count ) ;
2013-12-21 05:44:43 +00:00
if ( new_count > 0 )
2013-11-26 11:59:30 +00:00
return 0 ;
else
return - EINVAL ;
}
2013-09-11 08:39:56 +00:00
static const struct ethtool_ops i40e_ethtool_ops = {
. get_settings = i40e_get_settings ,
2014-06-04 08:45:28 +00:00
. set_settings = i40e_set_settings ,
2013-09-11 08:39:56 +00:00
. get_drvinfo = i40e_get_drvinfo ,
. get_regs_len = i40e_get_regs_len ,
. get_regs = i40e_get_regs ,
. nway_reset = i40e_nway_reset ,
. get_link = ethtool_op_get_link ,
. get_wol = i40e_get_wol ,
2013-11-28 06:39:22 +00:00
. set_wol = i40e_set_wol ,
i40e/i40evf: Add nvmupdate support
This implements a state machine intended to support the userland tool for
updating the device eeprom. The state machine implements one-shot reads,
writes, multi-step write sessions, and checksum requests. If we're in the middle
of a multi-step write session, no one should fire off other writes, however, one
shot reads are valid. The userland tool is expected to keep track of its session
status, arrange the placement and ordering of the writes, and deal with the
checksum requirement.
This patch also adds nvmupdate support to ethtool callbacks.
The get_eeprom() and set_eeprom() services in ethtool are used here to
facilitate the userland NVMUpdate tool. The 'magic' value in the get and
set commands is used to pass additional control information for managing
the read and write steps.
The read operation works both as normally expected in the standard ethtool
method, as well as with the extra NVM controls. The write operation
works only for the expanded NVM functions - the normal ethtool method is
not allowed because of the NVM semaphore management needed for multipart
writes, as well as the checksum requirement.
Change-ID: I1d84a170153a9f437906744e2e350fd68fe7563d
Signed-off-by: Shannon Nelson <shannon.nelson@intel.com>
Tested-by: Jim Young <jamesx.m.young@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
2014-07-09 07:46:09 +00:00
. set_eeprom = i40e_set_eeprom ,
2013-09-11 08:39:56 +00:00
. get_eeprom_len = i40e_get_eeprom_len ,
. get_eeprom = i40e_get_eeprom ,
. get_ringparam = i40e_get_ringparam ,
. set_ringparam = i40e_set_ringparam ,
. get_pauseparam = i40e_get_pauseparam ,
2014-06-04 08:45:27 +00:00
. set_pauseparam = i40e_set_pauseparam ,
2013-09-11 08:39:56 +00:00
. get_msglevel = i40e_get_msglevel ,
. set_msglevel = i40e_set_msglevel ,
. get_rxnfc = i40e_get_rxnfc ,
. set_rxnfc = i40e_set_rxnfc ,
. self_test = i40e_diag_test ,
. get_strings = i40e_get_strings ,
. set_phys_id = i40e_set_phys_id ,
. get_sset_count = i40e_get_sset_count ,
. get_ethtool_stats = i40e_get_ethtool_stats ,
. get_coalesce = i40e_get_coalesce ,
. set_coalesce = i40e_set_coalesce ,
2013-11-26 11:59:30 +00:00
. get_channels = i40e_get_channels ,
. set_channels = i40e_set_channels ,
2013-09-11 08:39:56 +00:00
. get_ts_info = i40e_get_ts_info ,
} ;
void i40e_set_ethtool_ops ( struct net_device * netdev )
{
2014-05-11 00:12:32 +00:00
netdev - > ethtool_ops = & i40e_ethtool_ops ;
2013-09-11 08:39:56 +00:00
}