forked from Minki/linux
Merge branch 'xgbe-next'
Tom Lendacky says: ==================== amd-xgbe: AMD XGBE driver updates 2015-04-09 The following series of patches includes functional updates and changes to the driver. - Allow ethtool rx-frames coalescing to be changed while the device is up - Consolidate initialization routine into the init function - Add support for the TX watchdog timeout This patch series is based on net-next. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
2e9095234d
@ -853,6 +853,22 @@ static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
struct net_device *netdev = pdata->netdev;
|
||||
unsigned int pr_mode, am_mode;
|
||||
|
||||
pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
|
||||
am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
|
||||
|
||||
xgbe_set_promiscuous_mode(pdata, pr_mode);
|
||||
xgbe_set_all_multicast_mode(pdata, am_mode);
|
||||
|
||||
xgbe_add_mac_addresses(pdata);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
|
||||
int mmd_reg)
|
||||
{
|
||||
@ -1101,9 +1117,24 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
|
||||
DBGPR("<--tx_desc_init\n");
|
||||
}
|
||||
|
||||
static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
|
||||
static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
|
||||
struct xgbe_ring_data *rdata, unsigned int index)
|
||||
{
|
||||
struct xgbe_ring_desc *rdesc = rdata->rdesc;
|
||||
unsigned int rx_usecs = pdata->rx_usecs;
|
||||
unsigned int rx_frames = pdata->rx_frames;
|
||||
unsigned int inte;
|
||||
|
||||
if (!rx_usecs && !rx_frames) {
|
||||
/* No coalescing, interrupt for every descriptor */
|
||||
inte = 1;
|
||||
} else {
|
||||
/* Set interrupt based on Rx frame coalescing setting */
|
||||
if (rx_frames && !((index + 1) % rx_frames))
|
||||
inte = 1;
|
||||
else
|
||||
inte = 0;
|
||||
}
|
||||
|
||||
/* Reset the Rx descriptor
|
||||
* Set buffer 1 (lo) address to header dma address (lo)
|
||||
@ -1117,8 +1148,7 @@ static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
|
||||
rdesc->desc2 = cpu_to_le32(lower_32_bits(rdata->rx.buf.dma));
|
||||
rdesc->desc3 = cpu_to_le32(upper_32_bits(rdata->rx.buf.dma));
|
||||
|
||||
XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
|
||||
rdata->interrupt ? 1 : 0);
|
||||
XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
|
||||
|
||||
/* Since the Rx DMA engine is likely running, make sure everything
|
||||
* is written to the descriptor(s) before setting the OWN bit
|
||||
@ -1138,26 +1168,16 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
|
||||
struct xgbe_ring *ring = channel->rx_ring;
|
||||
struct xgbe_ring_data *rdata;
|
||||
unsigned int start_index = ring->cur;
|
||||
unsigned int rx_coalesce, rx_frames;
|
||||
unsigned int i;
|
||||
|
||||
DBGPR("-->rx_desc_init\n");
|
||||
|
||||
rx_coalesce = (pdata->rx_riwt || pdata->rx_frames) ? 1 : 0;
|
||||
rx_frames = pdata->rx_frames;
|
||||
|
||||
/* Initialize all descriptors */
|
||||
for (i = 0; i < ring->rdesc_count; i++) {
|
||||
rdata = XGBE_GET_DESC_DATA(ring, i);
|
||||
|
||||
/* Set interrupt on completion bit as appropriate */
|
||||
if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames)))
|
||||
rdata->interrupt = 0;
|
||||
else
|
||||
rdata->interrupt = 1;
|
||||
|
||||
/* Initialize Rx descriptor */
|
||||
xgbe_rx_desc_reset(rdata);
|
||||
xgbe_rx_desc_reset(pdata, rdata, i);
|
||||
}
|
||||
|
||||
/* Update the total number of Rx descriptors */
|
||||
@ -2804,6 +2824,7 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
|
||||
* Initialize MAC related features
|
||||
*/
|
||||
xgbe_config_mac_address(pdata);
|
||||
xgbe_config_rx_mode(pdata);
|
||||
xgbe_config_jumbo_enable(pdata);
|
||||
xgbe_config_flow_control(pdata);
|
||||
xgbe_config_mac_speed(pdata);
|
||||
@ -2823,10 +2844,8 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
|
||||
|
||||
hw_if->tx_complete = xgbe_tx_complete;
|
||||
|
||||
hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
|
||||
hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
|
||||
hw_if->add_mac_addresses = xgbe_add_mac_addresses;
|
||||
hw_if->set_mac_address = xgbe_set_mac_address;
|
||||
hw_if->config_rx_mode = xgbe_config_rx_mode;
|
||||
|
||||
hw_if->enable_rx_csum = xgbe_enable_rx_csum;
|
||||
hw_if->disable_rx_csum = xgbe_disable_rx_csum;
|
||||
|
@ -129,7 +129,6 @@
|
||||
|
||||
static int xgbe_one_poll(struct napi_struct *, int);
|
||||
static int xgbe_all_poll(struct napi_struct *, int);
|
||||
static void xgbe_set_rx_mode(struct net_device *);
|
||||
|
||||
static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
|
||||
{
|
||||
@ -952,8 +951,6 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
|
||||
|
||||
DBGPR("-->xgbe_start\n");
|
||||
|
||||
xgbe_set_rx_mode(netdev);
|
||||
|
||||
hw_if->init(pdata);
|
||||
|
||||
phy_start(pdata->phydev);
|
||||
@ -1533,17 +1530,10 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
|
||||
{
|
||||
struct xgbe_prv_data *pdata = netdev_priv(netdev);
|
||||
struct xgbe_hw_if *hw_if = &pdata->hw_if;
|
||||
unsigned int pr_mode, am_mode;
|
||||
|
||||
DBGPR("-->xgbe_set_rx_mode\n");
|
||||
|
||||
pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
|
||||
am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
|
||||
|
||||
hw_if->set_promiscuous_mode(pdata, pr_mode);
|
||||
hw_if->set_all_multicast_mode(pdata, am_mode);
|
||||
|
||||
hw_if->add_mac_addresses(pdata);
|
||||
hw_if->config_rx_mode(pdata);
|
||||
|
||||
DBGPR("<--xgbe_set_rx_mode\n");
|
||||
}
|
||||
@ -1610,6 +1600,14 @@ static int xgbe_change_mtu(struct net_device *netdev, int mtu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void xgbe_tx_timeout(struct net_device *netdev)
|
||||
{
|
||||
struct xgbe_prv_data *pdata = netdev_priv(netdev);
|
||||
|
||||
netdev_warn(netdev, "tx timeout, device restarting\n");
|
||||
schedule_work(&pdata->restart_work);
|
||||
}
|
||||
|
||||
static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
|
||||
struct rtnl_link_stats64 *s)
|
||||
{
|
||||
@ -1774,6 +1772,7 @@ static const struct net_device_ops xgbe_netdev_ops = {
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_do_ioctl = xgbe_ioctl,
|
||||
.ndo_change_mtu = xgbe_change_mtu,
|
||||
.ndo_tx_timeout = xgbe_tx_timeout,
|
||||
.ndo_get_stats64 = xgbe_get_stats64,
|
||||
.ndo_vlan_rx_add_vid = xgbe_vlan_rx_add_vid,
|
||||
.ndo_vlan_rx_kill_vid = xgbe_vlan_rx_kill_vid,
|
||||
@ -1806,7 +1805,7 @@ static void xgbe_rx_refresh(struct xgbe_channel *channel)
|
||||
if (desc_if->map_rx_buffer(pdata, ring, rdata))
|
||||
break;
|
||||
|
||||
hw_if->rx_desc_reset(rdata);
|
||||
hw_if->rx_desc_reset(pdata, rdata, ring->dirty);
|
||||
|
||||
ring->dirty++;
|
||||
}
|
||||
|
@ -424,16 +424,6 @@ static int xgbe_set_coalesce(struct net_device *netdev,
|
||||
(ec->rate_sample_interval))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Can only change rx-frames when interface is down (see
|
||||
* rx_descriptor_init in xgbe-dev.c)
|
||||
*/
|
||||
rx_frames = pdata->rx_frames;
|
||||
if (rx_frames != ec->rx_max_coalesced_frames && netif_running(netdev)) {
|
||||
netdev_alert(netdev,
|
||||
"interface must be down to change rx-frames\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
|
||||
rx_usecs = ec->rx_coalesce_usecs;
|
||||
rx_frames = ec->rx_max_coalesced_frames;
|
||||
|
@ -491,6 +491,9 @@ static int xgbe_probe(struct platform_device *pdev)
|
||||
|
||||
netdev->priv_flags |= IFF_UNICAST_FLT;
|
||||
|
||||
/* Use default watchdog timeout */
|
||||
netdev->watchdog_timeo = 0;
|
||||
|
||||
xgbe_init_rx_coalesce(pdata);
|
||||
xgbe_init_tx_coalesce(pdata);
|
||||
|
||||
|
@ -325,8 +325,6 @@ struct xgbe_ring_data {
|
||||
struct xgbe_tx_ring_data tx; /* Tx-related data */
|
||||
struct xgbe_rx_ring_data rx; /* Rx-related data */
|
||||
|
||||
unsigned int interrupt; /* Interrupt indicator */
|
||||
|
||||
unsigned int mapped_as_page;
|
||||
|
||||
/* Incomplete receive save location. If the budget is exhausted
|
||||
@ -497,10 +495,8 @@ struct xgbe_mmc_stats {
|
||||
struct xgbe_hw_if {
|
||||
int (*tx_complete)(struct xgbe_ring_desc *);
|
||||
|
||||
int (*set_promiscuous_mode)(struct xgbe_prv_data *, unsigned int);
|
||||
int (*set_all_multicast_mode)(struct xgbe_prv_data *, unsigned int);
|
||||
int (*add_mac_addresses)(struct xgbe_prv_data *);
|
||||
int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr);
|
||||
int (*config_rx_mode)(struct xgbe_prv_data *);
|
||||
|
||||
int (*enable_rx_csum)(struct xgbe_prv_data *);
|
||||
int (*disable_rx_csum)(struct xgbe_prv_data *);
|
||||
@ -536,8 +532,9 @@ struct xgbe_hw_if {
|
||||
int (*dev_read)(struct xgbe_channel *);
|
||||
void (*tx_desc_init)(struct xgbe_channel *);
|
||||
void (*rx_desc_init)(struct xgbe_channel *);
|
||||
void (*rx_desc_reset)(struct xgbe_ring_data *);
|
||||
void (*tx_desc_reset)(struct xgbe_ring_data *);
|
||||
void (*rx_desc_reset)(struct xgbe_prv_data *, struct xgbe_ring_data *,
|
||||
unsigned int);
|
||||
int (*is_last_desc)(struct xgbe_ring_desc *);
|
||||
int (*is_context_desc)(struct xgbe_ring_desc *);
|
||||
void (*tx_start_xmit)(struct xgbe_channel *, struct xgbe_ring *);
|
||||
|
Loading…
Reference in New Issue
Block a user