2019-06-04 08:11:33 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2014-04-25 01:08:57 +00:00
|
|
|
/*
|
|
|
|
* Broadcom BCM7xxx System Port Ethernet MAC driver
|
|
|
|
*
|
|
|
|
* Copyright (C) 2014 Broadcom Corporation
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/platform_device.h>
|
|
|
|
#include <linux/of.h>
|
|
|
|
#include <linux/of_net.h>
|
|
|
|
#include <linux/of_mdio.h>
|
|
|
|
#include <linux/phy.h>
|
|
|
|
#include <linux/phy_fixed.h>
|
2017-03-28 21:45:06 +00:00
|
|
|
#include <net/dsa.h>
|
2014-04-25 01:08:57 +00:00
|
|
|
#include <net/ip.h>
|
|
|
|
#include <net/ipv6.h>
|
|
|
|
|
|
|
|
#include "bcmsysport.h"
|
|
|
|
|
|
|
|
/* I/O accessors register helpers */
|
|
|
|
#define BCM_SYSPORT_IO_MACRO(name, offset) \
|
|
|
|
static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
|
|
|
|
{ \
|
2017-08-29 20:35:15 +00:00
|
|
|
u32 reg = readl_relaxed(priv->base + offset + off); \
|
2014-04-25 01:08:57 +00:00
|
|
|
return reg; \
|
|
|
|
} \
|
|
|
|
static inline void name##_writel(struct bcm_sysport_priv *priv, \
|
|
|
|
u32 val, u32 off) \
|
|
|
|
{ \
|
2017-08-29 20:35:15 +00:00
|
|
|
writel_relaxed(val, priv->base + offset + off); \
|
2014-04-25 01:08:57 +00:00
|
|
|
} \
|
|
|
|
|
|
|
|
BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
|
|
|
|
BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
|
|
|
|
BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
|
2017-01-20 19:08:27 +00:00
|
|
|
BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
|
2014-04-25 01:08:57 +00:00
|
|
|
BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
|
|
|
|
BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
|
|
|
|
BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
|
|
|
|
BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
|
|
|
|
BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
|
|
|
|
BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
|
|
|
|
|
2017-01-20 19:08:27 +00:00
|
|
|
/* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
|
|
|
|
* same layout, except it has been moved by 4 bytes up, *sigh*
|
|
|
|
*/
|
|
|
|
static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
|
|
|
|
{
|
|
|
|
if (priv->is_lite && off >= RDMA_STATUS)
|
|
|
|
off += 4;
|
2017-08-29 20:35:15 +00:00
|
|
|
return readl_relaxed(priv->base + SYS_PORT_RDMA_OFFSET + off);
|
2017-01-20 19:08:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
|
|
|
|
{
|
|
|
|
if (priv->is_lite && off >= RDMA_STATUS)
|
|
|
|
off += 4;
|
2017-08-29 20:35:15 +00:00
|
|
|
writel_relaxed(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
|
2017-01-20 19:08:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
|
|
|
|
{
|
|
|
|
if (!priv->is_lite) {
|
|
|
|
return BIT(bit);
|
|
|
|
} else {
|
|
|
|
if (bit >= ACB_ALGO)
|
|
|
|
return BIT(bit + 1);
|
|
|
|
else
|
|
|
|
return BIT(bit);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
|
|
|
|
* mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
|
|
|
|
*/
|
|
|
|
#define BCM_SYSPORT_INTR_L2(which) \
|
|
|
|
static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
|
|
|
|
u32 mask) \
|
|
|
|
{ \
|
|
|
|
priv->irq##which##_mask &= ~(mask); \
|
2016-08-24 21:21:41 +00:00
|
|
|
intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
|
2014-04-25 01:08:57 +00:00
|
|
|
} \
|
|
|
|
static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
|
|
|
|
u32 mask) \
|
|
|
|
{ \
|
|
|
|
intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
|
|
|
|
priv->irq##which##_mask |= (mask); \
|
|
|
|
} \
|
|
|
|
|
|
|
|
BCM_SYSPORT_INTR_L2(0)
|
|
|
|
BCM_SYSPORT_INTR_L2(1)
|
|
|
|
|
|
|
|
/* Register accesses to GISB/RBUS registers are expensive (few hundred
|
|
|
|
* nanoseconds), so keep the check for 64-bits explicit here to save
|
|
|
|
* one register write per-packet on 32-bits platforms.
|
|
|
|
*/
|
|
|
|
static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
|
|
|
|
void __iomem *d,
|
|
|
|
dma_addr_t addr)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_PHYS_ADDR_T_64BIT
|
2017-08-29 20:35:15 +00:00
|
|
|
writel_relaxed(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
|
2014-07-10 00:36:46 +00:00
|
|
|
d + DESC_ADDR_HI_STATUS_LEN);
|
2014-04-25 01:08:57 +00:00
|
|
|
#endif
|
2017-08-29 20:35:15 +00:00
|
|
|
writel_relaxed(lower_32_bits(addr), d + DESC_ADDR_LO);
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Ethtool operations */
|
2018-09-27 22:36:10 +00:00
|
|
|
static void bcm_sysport_set_rx_csum(struct net_device *dev,
|
|
|
|
netdev_features_t wanted)
|
2014-04-25 01:08:57 +00:00
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
|
|
|
u32 reg;
|
|
|
|
|
2014-07-02 04:08:39 +00:00
|
|
|
priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
|
2014-04-25 01:08:57 +00:00
|
|
|
reg = rxchk_readl(priv, RXCHK_CONTROL);
|
2019-02-15 20:16:51 +00:00
|
|
|
/* Clear L2 header checks, which would prevent BPDUs
|
|
|
|
* from being received.
|
|
|
|
*/
|
|
|
|
reg &= ~RXCHK_L2_HDR_DIS;
|
2014-07-02 04:08:39 +00:00
|
|
|
if (priv->rx_chk_en)
|
2014-04-25 01:08:57 +00:00
|
|
|
reg |= RXCHK_EN;
|
|
|
|
else
|
|
|
|
reg &= ~RXCHK_EN;
|
|
|
|
|
|
|
|
/* If UniMAC forwards CRC, we need to skip over it to get
|
|
|
|
* a valid CHK bit to be set in the per-packet status word
|
|
|
|
*/
|
2014-07-02 04:08:39 +00:00
|
|
|
if (priv->rx_chk_en && priv->crc_fwd)
|
2014-04-25 01:08:57 +00:00
|
|
|
reg |= RXCHK_SKIP_FCS;
|
|
|
|
else
|
|
|
|
reg &= ~RXCHK_SKIP_FCS;
|
|
|
|
|
2014-08-28 22:11:03 +00:00
|
|
|
/* If Broadcom tags are enabled (e.g: using a switch), make
|
|
|
|
* sure we tell the RXCHK hardware to expect a 4-bytes Broadcom
|
|
|
|
* tag after the Ethernet MAC Source Address.
|
|
|
|
*/
|
|
|
|
if (netdev_uses_dsa(dev))
|
|
|
|
reg |= RXCHK_BRCM_TAG_EN;
|
|
|
|
else
|
|
|
|
reg &= ~RXCHK_BRCM_TAG_EN;
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
rxchk_writel(priv, reg, RXCHK_CONTROL);
|
|
|
|
}
|
|
|
|
|
2018-09-27 22:36:10 +00:00
|
|
|
static void bcm_sysport_set_tx_csum(struct net_device *dev,
|
|
|
|
netdev_features_t wanted)
|
2014-04-25 01:08:57 +00:00
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
/* Hardware transmit checksum requires us to enable the Transmit status
|
|
|
|
* block prepended to the packet contents
|
|
|
|
*/
|
|
|
|
priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
|
|
|
|
reg = tdma_readl(priv, TDMA_CONTROL);
|
|
|
|
if (priv->tsb_en)
|
2017-01-20 19:08:27 +00:00
|
|
|
reg |= tdma_control_bit(priv, TSB_EN);
|
2014-04-25 01:08:57 +00:00
|
|
|
else
|
2017-01-20 19:08:27 +00:00
|
|
|
reg &= ~tdma_control_bit(priv, TSB_EN);
|
2014-04-25 01:08:57 +00:00
|
|
|
tdma_writel(priv, reg, TDMA_CONTROL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bcm_sysport_set_features(struct net_device *dev,
|
2014-07-10 00:36:46 +00:00
|
|
|
netdev_features_t features)
|
2014-04-25 01:08:57 +00:00
|
|
|
{
|
2018-09-27 22:36:10 +00:00
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
2018-09-27 22:36:10 +00:00
|
|
|
/* Read CRC forward */
|
|
|
|
if (!priv->is_lite)
|
|
|
|
priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
|
|
|
|
else
|
|
|
|
priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
|
|
|
|
GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
2018-09-27 22:36:10 +00:00
|
|
|
bcm_sysport_set_rx_csum(dev, features);
|
|
|
|
bcm_sysport_set_tx_csum(dev, features);
|
|
|
|
|
|
|
|
return 0;
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Hardware counters must be kept in sync because the order/offset
|
|
|
|
* is important here (order in structure declaration = order in hardware)
|
|
|
|
*/
|
|
|
|
static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
|
|
|
|
/* general stats */
|
2017-08-03 23:07:45 +00:00
|
|
|
STAT_NETDEV64(rx_packets),
|
|
|
|
STAT_NETDEV64(tx_packets),
|
|
|
|
STAT_NETDEV64(rx_bytes),
|
|
|
|
STAT_NETDEV64(tx_bytes),
|
2014-04-25 01:08:57 +00:00
|
|
|
STAT_NETDEV(rx_errors),
|
|
|
|
STAT_NETDEV(tx_errors),
|
|
|
|
STAT_NETDEV(rx_dropped),
|
|
|
|
STAT_NETDEV(tx_dropped),
|
|
|
|
STAT_NETDEV(multicast),
|
|
|
|
/* UniMAC RSV counters */
|
|
|
|
STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
|
|
|
|
STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
|
|
|
|
STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
|
|
|
|
STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
|
|
|
|
STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
|
|
|
|
STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
|
|
|
|
STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
|
|
|
|
STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
|
|
|
|
STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
|
|
|
|
STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
|
|
|
|
STAT_MIB_RX("rx_pkts", mib.rx.pkt),
|
|
|
|
STAT_MIB_RX("rx_bytes", mib.rx.bytes),
|
|
|
|
STAT_MIB_RX("rx_multicast", mib.rx.mca),
|
|
|
|
STAT_MIB_RX("rx_broadcast", mib.rx.bca),
|
|
|
|
STAT_MIB_RX("rx_fcs", mib.rx.fcs),
|
|
|
|
STAT_MIB_RX("rx_control", mib.rx.cf),
|
|
|
|
STAT_MIB_RX("rx_pause", mib.rx.pf),
|
|
|
|
STAT_MIB_RX("rx_unknown", mib.rx.uo),
|
|
|
|
STAT_MIB_RX("rx_align", mib.rx.aln),
|
|
|
|
STAT_MIB_RX("rx_outrange", mib.rx.flr),
|
|
|
|
STAT_MIB_RX("rx_code", mib.rx.cde),
|
|
|
|
STAT_MIB_RX("rx_carrier", mib.rx.fcr),
|
|
|
|
STAT_MIB_RX("rx_oversize", mib.rx.ovr),
|
|
|
|
STAT_MIB_RX("rx_jabber", mib.rx.jbr),
|
|
|
|
STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
|
|
|
|
STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
|
|
|
|
STAT_MIB_RX("rx_unicast", mib.rx.uc),
|
|
|
|
STAT_MIB_RX("rx_ppp", mib.rx.ppp),
|
|
|
|
STAT_MIB_RX("rx_crc", mib.rx.rcrc),
|
|
|
|
/* UniMAC TSV counters */
|
|
|
|
STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
|
|
|
|
STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
|
|
|
|
STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
|
|
|
|
STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
|
|
|
|
STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
|
|
|
|
STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
|
|
|
|
STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
|
|
|
|
STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
|
|
|
|
STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
|
|
|
|
STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
|
|
|
|
STAT_MIB_TX("tx_pkts", mib.tx.pkts),
|
|
|
|
STAT_MIB_TX("tx_multicast", mib.tx.mca),
|
|
|
|
STAT_MIB_TX("tx_broadcast", mib.tx.bca),
|
|
|
|
STAT_MIB_TX("tx_pause", mib.tx.pf),
|
|
|
|
STAT_MIB_TX("tx_control", mib.tx.cf),
|
|
|
|
STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
|
|
|
|
STAT_MIB_TX("tx_oversize", mib.tx.ovr),
|
|
|
|
STAT_MIB_TX("tx_defer", mib.tx.drf),
|
|
|
|
STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
|
|
|
|
STAT_MIB_TX("tx_single_col", mib.tx.scl),
|
|
|
|
STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
|
|
|
|
STAT_MIB_TX("tx_late_col", mib.tx.lcl),
|
|
|
|
STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
|
|
|
|
STAT_MIB_TX("tx_frags", mib.tx.frg),
|
|
|
|
STAT_MIB_TX("tx_total_col", mib.tx.ncl),
|
|
|
|
STAT_MIB_TX("tx_jabber", mib.tx.jbr),
|
|
|
|
STAT_MIB_TX("tx_bytes", mib.tx.bytes),
|
|
|
|
STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
|
|
|
|
STAT_MIB_TX("tx_unicast", mib.tx.uc),
|
|
|
|
/* UniMAC RUNT counters */
|
|
|
|
STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
|
|
|
|
STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
|
|
|
|
STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
|
|
|
|
STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
|
|
|
|
/* RXCHK misc statistics */
|
|
|
|
STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
|
|
|
|
STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
|
2014-07-10 00:36:46 +00:00
|
|
|
RXCHK_OTHER_DISC_CNTR),
|
2014-04-25 01:08:57 +00:00
|
|
|
/* RBUF misc statistics */
|
|
|
|
STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
|
|
|
|
STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
|
2015-03-01 02:09:17 +00:00
|
|
|
STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
|
|
|
|
STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed),
|
|
|
|
STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed),
|
2018-09-27 22:36:14 +00:00
|
|
|
STAT_MIB_SOFT("tx_realloc_tsb", mib.tx_realloc_tsb),
|
|
|
|
STAT_MIB_SOFT("tx_realloc_tsb_failed", mib.tx_realloc_tsb_failed),
|
2017-03-23 17:36:46 +00:00
|
|
|
/* Per TX-queue statistics are dynamically appended */
|
2014-04-25 01:08:57 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
|
|
|
|
|
|
|
|
static void bcm_sysport_get_drvinfo(struct net_device *dev,
|
2014-07-10 00:36:46 +00:00
|
|
|
struct ethtool_drvinfo *info)
|
2014-04-25 01:08:57 +00:00
|
|
|
{
|
|
|
|
strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
|
|
|
|
strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
|
|
|
|
}
|
|
|
|
|
|
|
|
static u32 bcm_sysport_get_msglvl(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
|
|
|
|
|
|
|
return priv->msg_enable;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
|
|
|
|
|
|
|
priv->msg_enable = enable;
|
|
|
|
}
|
|
|
|
|
2017-01-20 19:08:27 +00:00
|
|
|
static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
|
|
|
|
{
|
|
|
|
switch (type) {
|
|
|
|
case BCM_SYSPORT_STAT_NETDEV:
|
2017-08-03 23:07:45 +00:00
|
|
|
case BCM_SYSPORT_STAT_NETDEV64:
|
2017-01-20 19:08:27 +00:00
|
|
|
case BCM_SYSPORT_STAT_RXCHK:
|
|
|
|
case BCM_SYSPORT_STAT_RBUF:
|
|
|
|
case BCM_SYSPORT_STAT_SOFT:
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
|
|
|
|
{
|
2017-01-20 19:08:27 +00:00
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
|
|
|
const struct bcm_sysport_stats *s;
|
|
|
|
unsigned int i, j;
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
switch (string_set) {
|
|
|
|
case ETH_SS_STATS:
|
2017-01-20 19:08:27 +00:00
|
|
|
for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
|
|
|
|
s = &bcm_sysport_gstrings_stats[i];
|
|
|
|
if (priv->is_lite &&
|
|
|
|
!bcm_sysport_lite_stat_valid(s->type))
|
|
|
|
continue;
|
|
|
|
j++;
|
|
|
|
}
|
2017-03-23 17:36:46 +00:00
|
|
|
/* Include per-queue statistics */
|
|
|
|
return j + dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
|
2014-04-25 01:08:57 +00:00
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bcm_sysport_get_strings(struct net_device *dev,
|
2014-07-10 00:36:46 +00:00
|
|
|
u32 stringset, u8 *data)
|
2014-04-25 01:08:57 +00:00
|
|
|
{
|
2017-01-20 19:08:27 +00:00
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
|
|
|
const struct bcm_sysport_stats *s;
|
2017-03-23 17:36:46 +00:00
|
|
|
char buf[128];
|
2017-01-20 19:08:27 +00:00
|
|
|
int i, j;
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
switch (stringset) {
|
|
|
|
case ETH_SS_STATS:
|
2017-01-20 19:08:27 +00:00
|
|
|
for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
|
|
|
|
s = &bcm_sysport_gstrings_stats[i];
|
|
|
|
if (priv->is_lite &&
|
|
|
|
!bcm_sysport_lite_stat_valid(s->type))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
|
2014-07-10 00:36:46 +00:00
|
|
|
ETH_GSTRING_LEN);
|
2017-01-20 19:08:27 +00:00
|
|
|
j++;
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
2017-03-23 17:36:46 +00:00
|
|
|
|
|
|
|
for (i = 0; i < dev->num_tx_queues; i++) {
|
|
|
|
snprintf(buf, sizeof(buf), "txq%d_packets", i);
|
|
|
|
memcpy(data + j * ETH_GSTRING_LEN, buf,
|
|
|
|
ETH_GSTRING_LEN);
|
|
|
|
j++;
|
|
|
|
|
|
|
|
snprintf(buf, sizeof(buf), "txq%d_bytes", i);
|
|
|
|
memcpy(data + j * ETH_GSTRING_LEN, buf,
|
|
|
|
ETH_GSTRING_LEN);
|
|
|
|
j++;
|
|
|
|
}
|
2014-04-25 01:08:57 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
|
|
|
|
{
|
|
|
|
int i, j = 0;
|
|
|
|
|
|
|
|
for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
|
|
|
|
const struct bcm_sysport_stats *s;
|
|
|
|
u8 offset = 0;
|
|
|
|
u32 val = 0;
|
|
|
|
char *p;
|
|
|
|
|
|
|
|
s = &bcm_sysport_gstrings_stats[i];
|
|
|
|
switch (s->type) {
|
|
|
|
case BCM_SYSPORT_STAT_NETDEV:
|
2017-08-03 23:07:45 +00:00
|
|
|
case BCM_SYSPORT_STAT_NETDEV64:
|
2015-03-01 02:09:17 +00:00
|
|
|
case BCM_SYSPORT_STAT_SOFT:
|
2014-04-25 01:08:57 +00:00
|
|
|
continue;
|
|
|
|
case BCM_SYSPORT_STAT_MIB_RX:
|
|
|
|
case BCM_SYSPORT_STAT_MIB_TX:
|
|
|
|
case BCM_SYSPORT_STAT_RUNT:
|
2017-01-20 19:08:27 +00:00
|
|
|
if (priv->is_lite)
|
|
|
|
continue;
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
if (s->type != BCM_SYSPORT_STAT_MIB_RX)
|
|
|
|
offset = UMAC_MIB_STAT_OFFSET;
|
|
|
|
val = umac_readl(priv, UMAC_MIB_START + j + offset);
|
|
|
|
break;
|
|
|
|
case BCM_SYSPORT_STAT_RXCHK:
|
|
|
|
val = rxchk_readl(priv, s->reg_offset);
|
|
|
|
if (val == ~0)
|
|
|
|
rxchk_writel(priv, 0, s->reg_offset);
|
|
|
|
break;
|
|
|
|
case BCM_SYSPORT_STAT_RBUF:
|
|
|
|
val = rbuf_readl(priv, s->reg_offset);
|
|
|
|
if (val == ~0)
|
|
|
|
rbuf_writel(priv, 0, s->reg_offset);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
j += s->stat_sizeof;
|
|
|
|
p = (char *)priv + s->stat_offset;
|
|
|
|
*(u32 *)p = val;
|
|
|
|
}
|
|
|
|
|
|
|
|
netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
|
|
|
|
}
|
|
|
|
|
2017-09-18 23:31:30 +00:00
|
|
|
static void bcm_sysport_update_tx_stats(struct bcm_sysport_priv *priv,
|
|
|
|
u64 *tx_bytes, u64 *tx_packets)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_tx_ring *ring;
|
|
|
|
u64 bytes = 0, packets = 0;
|
|
|
|
unsigned int start;
|
|
|
|
unsigned int q;
|
|
|
|
|
|
|
|
for (q = 0; q < priv->netdev->num_tx_queues; q++) {
|
|
|
|
ring = &priv->tx_rings[q];
|
|
|
|
do {
|
|
|
|
start = u64_stats_fetch_begin_irq(&priv->syncp);
|
|
|
|
bytes = ring->bytes;
|
|
|
|
packets = ring->packets;
|
|
|
|
} while (u64_stats_fetch_retry_irq(&priv->syncp, start));
|
|
|
|
|
|
|
|
*tx_bytes += bytes;
|
|
|
|
*tx_packets += packets;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
static void bcm_sysport_get_stats(struct net_device *dev,
|
2014-07-10 00:36:46 +00:00
|
|
|
struct ethtool_stats *stats, u64 *data)
|
2014-04-25 01:08:57 +00:00
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
2017-08-03 23:07:45 +00:00
|
|
|
struct bcm_sysport_stats64 *stats64 = &priv->stats64;
|
|
|
|
struct u64_stats_sync *syncp = &priv->syncp;
|
2017-03-23 17:36:46 +00:00
|
|
|
struct bcm_sysport_tx_ring *ring;
|
2017-09-18 23:31:30 +00:00
|
|
|
u64 tx_bytes = 0, tx_packets = 0;
|
2017-08-03 23:07:45 +00:00
|
|
|
unsigned int start;
|
2017-01-20 19:08:27 +00:00
|
|
|
int i, j;
|
2014-04-25 01:08:57 +00:00
|
|
|
|
2017-09-18 23:31:30 +00:00
|
|
|
if (netif_running(dev)) {
|
2014-04-25 01:08:57 +00:00
|
|
|
bcm_sysport_update_mib_counters(priv);
|
2017-09-18 23:31:30 +00:00
|
|
|
bcm_sysport_update_tx_stats(priv, &tx_bytes, &tx_packets);
|
|
|
|
stats64->tx_bytes = tx_bytes;
|
|
|
|
stats64->tx_packets = tx_packets;
|
|
|
|
}
|
2014-04-25 01:08:57 +00:00
|
|
|
|
2017-01-20 19:08:27 +00:00
|
|
|
for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
|
2014-04-25 01:08:57 +00:00
|
|
|
const struct bcm_sysport_stats *s;
|
|
|
|
char *p;
|
|
|
|
|
|
|
|
s = &bcm_sysport_gstrings_stats[i];
|
|
|
|
if (s->type == BCM_SYSPORT_STAT_NETDEV)
|
|
|
|
p = (char *)&dev->stats;
|
2017-08-03 23:07:45 +00:00
|
|
|
else if (s->type == BCM_SYSPORT_STAT_NETDEV64)
|
|
|
|
p = (char *)stats64;
|
2014-04-25 01:08:57 +00:00
|
|
|
else
|
|
|
|
p = (char *)priv;
|
2017-08-03 23:07:45 +00:00
|
|
|
|
2017-08-08 21:45:09 +00:00
|
|
|
if (priv->is_lite && !bcm_sysport_lite_stat_valid(s->type))
|
|
|
|
continue;
|
2014-04-25 01:08:57 +00:00
|
|
|
p += s->stat_offset;
|
2017-08-03 23:07:45 +00:00
|
|
|
|
2017-09-18 23:31:30 +00:00
|
|
|
if (s->stat_sizeof == sizeof(u64) &&
|
|
|
|
s->type == BCM_SYSPORT_STAT_NETDEV64) {
|
2017-08-03 23:07:45 +00:00
|
|
|
do {
|
|
|
|
start = u64_stats_fetch_begin_irq(syncp);
|
|
|
|
data[i] = *(u64 *)p;
|
|
|
|
} while (u64_stats_fetch_retry_irq(syncp, start));
|
2017-09-18 23:31:30 +00:00
|
|
|
} else
|
2017-08-03 23:07:45 +00:00
|
|
|
data[i] = *(u32 *)p;
|
2017-01-20 19:08:27 +00:00
|
|
|
j++;
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
2017-03-23 17:36:46 +00:00
|
|
|
|
|
|
|
/* For SYSTEMPORT Lite since we have holes in our statistics, j would
|
|
|
|
* be equal to BCM_SYSPORT_STATS_LEN at the end of the loop, but it
|
|
|
|
* needs to point to how many total statistics we have minus the
|
|
|
|
* number of per TX queue statistics
|
|
|
|
*/
|
|
|
|
j = bcm_sysport_get_sset_count(dev, ETH_SS_STATS) -
|
|
|
|
dev->num_tx_queues * NUM_SYSPORT_TXQ_STAT;
|
|
|
|
|
|
|
|
for (i = 0; i < dev->num_tx_queues; i++) {
|
|
|
|
ring = &priv->tx_rings[i];
|
|
|
|
data[j] = ring->packets;
|
|
|
|
j++;
|
|
|
|
data[j] = ring->bytes;
|
|
|
|
j++;
|
|
|
|
}
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
2014-07-02 04:08:40 +00:00
|
|
|
static void bcm_sysport_get_wol(struct net_device *dev,
|
|
|
|
struct ethtool_wolinfo *wol)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
|
|
|
|
2018-08-07 17:50:23 +00:00
|
|
|
wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
|
2014-07-02 04:08:40 +00:00
|
|
|
wol->wolopts = priv->wolopts;
|
|
|
|
|
|
|
|
if (!(priv->wolopts & WAKE_MAGICSECURE))
|
|
|
|
return;
|
|
|
|
|
2019-02-01 21:23:38 +00:00
|
|
|
memcpy(wol->sopass, priv->sopass, sizeof(priv->sopass));
|
2014-07-02 04:08:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int bcm_sysport_set_wol(struct net_device *dev,
|
2014-07-10 00:36:46 +00:00
|
|
|
struct ethtool_wolinfo *wol)
|
2014-07-02 04:08:40 +00:00
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
|
|
|
struct device *kdev = &priv->pdev->dev;
|
2018-08-07 17:50:23 +00:00
|
|
|
u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE | WAKE_FILTER;
|
2014-07-02 04:08:40 +00:00
|
|
|
|
|
|
|
if (!device_can_wakeup(kdev))
|
|
|
|
return -ENOTSUPP;
|
|
|
|
|
|
|
|
if (wol->wolopts & ~supported)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2019-02-01 21:23:38 +00:00
|
|
|
if (wol->wolopts & WAKE_MAGICSECURE)
|
|
|
|
memcpy(priv->sopass, wol->sopass, sizeof(priv->sopass));
|
2014-07-02 04:08:40 +00:00
|
|
|
|
|
|
|
/* Flag the device and relevant IRQ as wakeup capable */
|
|
|
|
if (wol->wolopts) {
|
|
|
|
device_set_wakeup_enable(kdev, 1);
|
2014-10-10 17:51:54 +00:00
|
|
|
if (priv->wol_irq_disabled)
|
|
|
|
enable_irq_wake(priv->wol_irq);
|
2014-07-02 04:08:40 +00:00
|
|
|
priv->wol_irq_disabled = 0;
|
|
|
|
} else {
|
|
|
|
device_set_wakeup_enable(kdev, 0);
|
|
|
|
/* Avoid unbalanced disable_irq_wake calls */
|
|
|
|
if (!priv->wol_irq_disabled)
|
|
|
|
disable_irq_wake(priv->wol_irq);
|
|
|
|
priv->wol_irq_disabled = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
priv->wolopts = wol->wolopts;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-03-28 22:15:37 +00:00
|
|
|
static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv,
|
|
|
|
u32 usecs, u32 pkts)
|
2018-03-23 01:19:32 +00:00
|
|
|
{
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
reg = rdma_readl(priv, RDMA_MBDONE_INTR);
|
|
|
|
reg &= ~(RDMA_INTR_THRESH_MASK |
|
|
|
|
RDMA_TIMEOUT_MASK << RDMA_TIMEOUT_SHIFT);
|
2018-03-28 22:15:37 +00:00
|
|
|
reg |= pkts;
|
|
|
|
reg |= DIV_ROUND_UP(usecs * 1000, 8192) << RDMA_TIMEOUT_SHIFT;
|
2018-03-23 01:19:32 +00:00
|
|
|
rdma_writel(priv, reg, RDMA_MBDONE_INTR);
|
|
|
|
}
|
|
|
|
|
2018-03-28 22:15:36 +00:00
|
|
|
static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring,
|
|
|
|
struct ethtool_coalesce *ec)
|
2018-03-23 01:19:32 +00:00
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv = ring->priv;
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index));
|
|
|
|
reg &= ~(RING_INTR_THRESH_MASK |
|
|
|
|
RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
|
2018-03-28 22:15:36 +00:00
|
|
|
reg |= ec->tx_max_coalesced_frames;
|
|
|
|
reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
|
2018-03-23 01:19:32 +00:00
|
|
|
RING_TIMEOUT_SHIFT;
|
|
|
|
tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index));
|
|
|
|
}
|
|
|
|
|
2015-05-11 22:12:41 +00:00
|
|
|
static int bcm_sysport_get_coalesce(struct net_device *dev,
|
|
|
|
struct ethtool_coalesce *ec)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
|
|
|
|
|
|
|
|
ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
|
|
|
|
ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
|
|
|
|
|
2015-05-11 22:12:42 +00:00
|
|
|
reg = rdma_readl(priv, RDMA_MBDONE_INTR);
|
|
|
|
|
|
|
|
ec->rx_coalesce_usecs = (reg >> RDMA_TIMEOUT_SHIFT) * 8192 / 1000;
|
|
|
|
ec->rx_max_coalesced_frames = reg & RDMA_INTR_THRESH_MASK;
|
2018-03-23 01:19:32 +00:00
|
|
|
ec->use_adaptive_rx_coalesce = priv->dim.use_dim;
|
2015-05-11 22:12:42 +00:00
|
|
|
|
2015-05-11 22:12:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bcm_sysport_set_coalesce(struct net_device *dev,
|
|
|
|
struct ethtool_coalesce *ec)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
2019-01-31 14:44:48 +00:00
|
|
|
struct dim_cq_moder moder;
|
2018-03-28 22:15:37 +00:00
|
|
|
u32 usecs, pkts;
|
2015-05-11 22:12:41 +00:00
|
|
|
unsigned int i;
|
|
|
|
|
2015-05-11 22:12:42 +00:00
|
|
|
/* Base system clock is 125Mhz, DMA timeout is this reference clock
|
|
|
|
* divided by 1024, which yield roughly 8.192 us, our maximum value has
|
|
|
|
* to fit in the RING_TIMEOUT_MASK (16 bits).
|
2015-05-11 22:12:41 +00:00
|
|
|
*/
|
|
|
|
if (ec->tx_max_coalesced_frames > RING_INTR_THRESH_MASK ||
|
2015-05-11 22:12:42 +00:00
|
|
|
ec->tx_coalesce_usecs > (RING_TIMEOUT_MASK * 8) + 1 ||
|
|
|
|
ec->rx_max_coalesced_frames > RDMA_INTR_THRESH_MASK ||
|
|
|
|
ec->rx_coalesce_usecs > (RDMA_TIMEOUT_MASK * 8) + 1)
|
2015-05-11 22:12:41 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2015-05-11 22:12:42 +00:00
|
|
|
if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
|
2020-03-10 02:15:00 +00:00
|
|
|
(ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0))
|
2015-05-11 22:12:41 +00:00
|
|
|
return -EINVAL;
|
|
|
|
|
2018-03-28 22:15:36 +00:00
|
|
|
for (i = 0; i < dev->num_tx_queues; i++)
|
|
|
|
bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec);
|
2015-05-11 22:12:41 +00:00
|
|
|
|
2018-03-28 22:15:37 +00:00
|
|
|
priv->rx_coalesce_usecs = ec->rx_coalesce_usecs;
|
|
|
|
priv->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
|
|
|
|
usecs = priv->rx_coalesce_usecs;
|
|
|
|
pkts = priv->rx_max_coalesced_frames;
|
2018-03-23 01:19:32 +00:00
|
|
|
|
2018-03-28 22:15:37 +00:00
|
|
|
if (ec->use_adaptive_rx_coalesce && !priv->dim.use_dim) {
|
2018-04-24 10:36:01 +00:00
|
|
|
moder = net_dim_get_def_rx_moderation(priv->dim.dim.mode);
|
2018-03-28 22:15:37 +00:00
|
|
|
usecs = moder.usec;
|
|
|
|
pkts = moder.pkts;
|
2018-03-23 01:19:32 +00:00
|
|
|
}
|
2018-03-28 22:15:37 +00:00
|
|
|
|
2018-03-23 01:19:32 +00:00
|
|
|
priv->dim.use_dim = ec->use_adaptive_rx_coalesce;
|
2018-03-28 22:15:37 +00:00
|
|
|
|
|
|
|
/* Apply desired coalescing parameters */
|
|
|
|
bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
|
2015-05-11 22:12:42 +00:00
|
|
|
|
2015-05-11 22:12:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
|
|
|
|
{
|
2017-08-24 22:20:41 +00:00
|
|
|
dev_consume_skb_any(cb->skb);
|
2014-04-25 01:08:57 +00:00
|
|
|
cb->skb = NULL;
|
|
|
|
dma_unmap_addr_set(cb, dma_addr, 0);
|
|
|
|
}
|
|
|
|
|
2015-05-28 22:24:43 +00:00
|
|
|
static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
|
|
|
|
struct bcm_sysport_cb *cb)
|
2014-04-25 01:08:57 +00:00
|
|
|
{
|
|
|
|
struct device *kdev = &priv->pdev->dev;
|
|
|
|
struct net_device *ndev = priv->netdev;
|
2015-05-28 22:24:43 +00:00
|
|
|
struct sk_buff *skb, *rx_skb;
|
2014-04-25 01:08:57 +00:00
|
|
|
dma_addr_t mapping;
|
|
|
|
|
2015-05-28 22:24:43 +00:00
|
|
|
/* Allocate a new SKB for a new packet */
|
2020-04-23 23:13:30 +00:00
|
|
|
skb = __netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH,
|
|
|
|
GFP_ATOMIC | __GFP_NOWARN);
|
2015-05-28 22:24:43 +00:00
|
|
|
if (!skb) {
|
|
|
|
priv->mib.alloc_rx_buff_failed++;
|
2014-04-25 01:08:57 +00:00
|
|
|
netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
|
2015-05-28 22:24:43 +00:00
|
|
|
return NULL;
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
2015-05-28 22:24:43 +00:00
|
|
|
mapping = dma_map_single(kdev, skb->data,
|
2014-07-10 00:36:46 +00:00
|
|
|
RX_BUF_LENGTH, DMA_FROM_DEVICE);
|
2015-05-28 22:24:43 +00:00
|
|
|
if (dma_mapping_error(kdev, mapping)) {
|
2014-11-19 18:29:55 +00:00
|
|
|
priv->mib.rx_dma_failed++;
|
2015-05-28 22:24:43 +00:00
|
|
|
dev_kfree_skb_any(skb);
|
2014-04-25 01:08:57 +00:00
|
|
|
netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
|
2015-05-28 22:24:43 +00:00
|
|
|
return NULL;
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
2015-05-28 22:24:43 +00:00
|
|
|
/* Grab the current SKB on the ring */
|
|
|
|
rx_skb = cb->skb;
|
|
|
|
if (likely(rx_skb))
|
|
|
|
dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
|
|
|
|
RX_BUF_LENGTH, DMA_FROM_DEVICE);
|
|
|
|
|
|
|
|
/* Put the new SKB on the ring */
|
|
|
|
cb->skb = skb;
|
2014-04-25 01:08:57 +00:00
|
|
|
dma_unmap_addr_set(cb, dma_addr, mapping);
|
2015-05-28 22:24:42 +00:00
|
|
|
dma_desc_set_addr(priv, cb->bd_addr, mapping);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
netif_dbg(priv, rx_status, ndev, "RX refill\n");
|
|
|
|
|
2015-05-28 22:24:43 +00:00
|
|
|
/* Return the current SKB to the caller */
|
|
|
|
return rx_skb;
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_cb *cb;
|
2015-05-28 22:24:43 +00:00
|
|
|
struct sk_buff *skb;
|
2014-04-25 01:08:57 +00:00
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
for (i = 0; i < priv->num_rx_bds; i++) {
|
2015-05-28 22:24:42 +00:00
|
|
|
cb = &priv->rx_cbs[i];
|
2015-05-28 22:24:43 +00:00
|
|
|
skb = bcm_sysport_rx_refill(priv, cb);
|
2019-08-22 18:02:56 +00:00
|
|
|
dev_kfree_skb(skb);
|
2015-05-28 22:24:43 +00:00
|
|
|
if (!cb->skb)
|
|
|
|
return -ENOMEM;
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
2015-05-28 22:24:43 +00:00
|
|
|
return 0;
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Poll the hardware for up to budget packets to process */
|
|
|
|
static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
|
|
|
|
unsigned int budget)
|
|
|
|
{
|
2017-08-03 23:07:45 +00:00
|
|
|
struct bcm_sysport_stats64 *stats64 = &priv->stats64;
|
2014-04-25 01:08:57 +00:00
|
|
|
struct net_device *ndev = priv->netdev;
|
|
|
|
unsigned int processed = 0, to_process;
|
2018-03-23 01:19:32 +00:00
|
|
|
unsigned int processed_bytes = 0;
|
2014-04-25 01:08:57 +00:00
|
|
|
struct bcm_sysport_cb *cb;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
unsigned int p_index;
|
|
|
|
u16 len, status;
|
2014-05-30 19:39:30 +00:00
|
|
|
struct bcm_rsb *rsb;
|
2014-04-25 01:08:57 +00:00
|
|
|
|
2017-03-23 17:36:47 +00:00
|
|
|
/* Clear status before servicing to reduce spurious interrupts */
|
|
|
|
intrl2_0_writel(priv, INTRL2_0_RDMA_MBDONE, INTRL2_CPU_CLEAR);
|
|
|
|
|
2017-01-20 19:08:27 +00:00
|
|
|
/* Determine how much we should process since last call, SYSTEMPORT Lite
|
|
|
|
* groups the producer and consumer indexes into the same 32-bit
|
|
|
|
* which we access using RDMA_CONS_INDEX
|
|
|
|
*/
|
|
|
|
if (!priv->is_lite)
|
|
|
|
p_index = rdma_readl(priv, RDMA_PROD_INDEX);
|
|
|
|
else
|
|
|
|
p_index = rdma_readl(priv, RDMA_CONS_INDEX);
|
2014-04-25 01:08:57 +00:00
|
|
|
p_index &= RDMA_PROD_INDEX_MASK;
|
|
|
|
|
2017-03-23 17:36:48 +00:00
|
|
|
to_process = (p_index - priv->rx_c_index) & RDMA_CONS_INDEX_MASK;
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
netif_dbg(priv, rx_status, ndev,
|
2014-07-10 00:36:46 +00:00
|
|
|
"p_index=%d rx_c_index=%d to_process=%d\n",
|
|
|
|
p_index, priv->rx_c_index, to_process);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
2014-07-10 00:36:46 +00:00
|
|
|
while ((processed < to_process) && (processed < budget)) {
|
2014-04-25 01:08:57 +00:00
|
|
|
cb = &priv->rx_cbs[priv->rx_read_ptr];
|
2015-05-28 22:24:43 +00:00
|
|
|
skb = bcm_sysport_rx_refill(priv, cb);
|
2014-09-08 18:37:51 +00:00
|
|
|
|
|
|
|
|
|
|
|
/* We do not have a backing SKB, so we do not a corresponding
|
|
|
|
* DMA mapping for this incoming packet since
|
|
|
|
* bcm_sysport_rx_refill always either has both skb and mapping
|
|
|
|
* or none.
|
|
|
|
*/
|
|
|
|
if (unlikely(!skb)) {
|
|
|
|
netif_err(priv, rx_err, ndev, "out of memory!\n");
|
|
|
|
ndev->stats.rx_dropped++;
|
|
|
|
ndev->stats.rx_errors++;
|
2015-05-28 22:24:43 +00:00
|
|
|
goto next;
|
2014-09-08 18:37:51 +00:00
|
|
|
}
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
/* Extract the Receive Status Block prepended */
|
2014-05-30 19:39:30 +00:00
|
|
|
rsb = (struct bcm_rsb *)skb->data;
|
2014-04-25 01:08:57 +00:00
|
|
|
len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
|
|
|
|
status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
|
2014-07-10 00:36:46 +00:00
|
|
|
DESC_STATUS_MASK;
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
netif_dbg(priv, rx_status, ndev,
|
2014-07-10 00:36:46 +00:00
|
|
|
"p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
|
|
|
|
p_index, priv->rx_c_index, priv->rx_read_ptr,
|
|
|
|
len, status);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
2015-05-28 22:24:44 +00:00
|
|
|
if (unlikely(len > RX_BUF_LENGTH)) {
|
|
|
|
netif_err(priv, rx_status, ndev, "oversized packet\n");
|
|
|
|
ndev->stats.rx_length_errors++;
|
|
|
|
ndev->stats.rx_errors++;
|
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
goto next;
|
|
|
|
}
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
|
|
|
|
netif_err(priv, rx_status, ndev, "fragmented packet!\n");
|
|
|
|
ndev->stats.rx_dropped++;
|
|
|
|
ndev->stats.rx_errors++;
|
2015-05-28 22:24:43 +00:00
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
goto next;
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
|
|
|
|
netif_err(priv, rx_err, ndev, "error packet\n");
|
2014-06-05 17:22:16 +00:00
|
|
|
if (status & RX_STATUS_OVFLOW)
|
2014-04-25 01:08:57 +00:00
|
|
|
ndev->stats.rx_over_errors++;
|
|
|
|
ndev->stats.rx_dropped++;
|
|
|
|
ndev->stats.rx_errors++;
|
2015-05-28 22:24:43 +00:00
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
goto next;
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
skb_put(skb, len);
|
|
|
|
|
|
|
|
/* Hardware validated our checksum */
|
|
|
|
if (likely(status & DESC_L4_CSUM))
|
|
|
|
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
|
|
|
|
2014-06-05 17:22:17 +00:00
|
|
|
/* Hardware pre-pends packets with 2bytes before Ethernet
|
|
|
|
* header plus we have the Receive Status Block, strip off all
|
|
|
|
* of this from the SKB.
|
2014-04-25 01:08:57 +00:00
|
|
|
*/
|
|
|
|
skb_pull(skb, sizeof(*rsb) + 2);
|
|
|
|
len -= (sizeof(*rsb) + 2);
|
2018-03-23 01:19:32 +00:00
|
|
|
processed_bytes += len;
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
/* UniMAC may forward CRC */
|
|
|
|
if (priv->crc_fwd) {
|
|
|
|
skb_trim(skb, len - ETH_FCS_LEN);
|
|
|
|
len -= ETH_FCS_LEN;
|
|
|
|
}
|
|
|
|
|
|
|
|
skb->protocol = eth_type_trans(skb, ndev);
|
|
|
|
ndev->stats.rx_packets++;
|
|
|
|
ndev->stats.rx_bytes += len;
|
2017-08-03 23:07:45 +00:00
|
|
|
u64_stats_update_begin(&priv->syncp);
|
|
|
|
stats64->rx_packets++;
|
|
|
|
stats64->rx_bytes += len;
|
|
|
|
u64_stats_update_end(&priv->syncp);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
napi_gro_receive(&priv->napi, skb);
|
2015-05-28 22:24:43 +00:00
|
|
|
next:
|
|
|
|
processed++;
|
|
|
|
priv->rx_read_ptr++;
|
|
|
|
|
|
|
|
if (priv->rx_read_ptr == priv->num_rx_bds)
|
|
|
|
priv->rx_read_ptr = 0;
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
2018-03-23 01:19:32 +00:00
|
|
|
priv->dim.packets = processed;
|
|
|
|
priv->dim.bytes = processed_bytes;
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
return processed;
|
|
|
|
}
|
|
|
|
|
2017-03-23 17:36:46 +00:00
|
|
|
static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_tx_ring *ring,
|
2014-07-10 00:36:46 +00:00
|
|
|
struct bcm_sysport_cb *cb,
|
|
|
|
unsigned int *bytes_compl,
|
|
|
|
unsigned int *pkts_compl)
|
2014-04-25 01:08:57 +00:00
|
|
|
{
|
2017-03-23 17:36:46 +00:00
|
|
|
struct bcm_sysport_priv *priv = ring->priv;
|
2014-04-25 01:08:57 +00:00
|
|
|
struct device *kdev = &priv->pdev->dev;
|
|
|
|
|
|
|
|
if (cb->skb) {
|
|
|
|
*bytes_compl += cb->skb->len;
|
|
|
|
dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
|
2014-07-10 00:36:46 +00:00
|
|
|
dma_unmap_len(cb, dma_len),
|
|
|
|
DMA_TO_DEVICE);
|
2014-04-25 01:08:57 +00:00
|
|
|
(*pkts_compl)++;
|
|
|
|
bcm_sysport_free_cb(cb);
|
|
|
|
/* SKB fragment */
|
|
|
|
} else if (dma_unmap_addr(cb, dma_addr)) {
|
2017-08-03 23:07:45 +00:00
|
|
|
*bytes_compl += dma_unmap_len(cb, dma_len);
|
2014-04-25 01:08:57 +00:00
|
|
|
dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
|
2014-07-10 00:36:46 +00:00
|
|
|
dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
|
2014-04-25 01:08:57 +00:00
|
|
|
dma_unmap_addr_set(cb, dma_addr, 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Reclaim queued SKBs for transmission completion, lockless version */
|
|
|
|
static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
|
|
|
|
struct bcm_sysport_tx_ring *ring)
|
|
|
|
{
|
|
|
|
unsigned int pkts_compl = 0, bytes_compl = 0;
|
2017-08-03 23:07:45 +00:00
|
|
|
struct net_device *ndev = priv->netdev;
|
net: systemport: Rewrite __bcm_sysport_tx_reclaim()
There is no need for complex checking between the last consumed index
and current consumed index, a simple subtraction will do.
This also eliminates the possibility of a permanent transmit queue stall
under the following conditions:
- one CPU bursts ring->size worth of traffic (up to 256 buffers), to the
point where we run out of free descriptors, so we stop the transmit
queue at the end of bcm_sysport_xmit()
- because of our locking, we have the transmit process disable
interrupts which means we can be blocking the TX reclamation process
- when TX reclamation finally runs, we will be computing the difference
between ring->c_index (last consumed index by SW) and what the HW
reports through its register
- this register is masked with (ring->size - 1) = 0xff, which will lead
to stripping the upper bits of the index (register is 16-bits wide)
- we will be computing last_tx_cn as 0, which means there is no work to
be done, and we never wake-up the transmit queue, leaving it
permanently disabled
A practical example is e.g: ring->c_index aka last_c_index = 12, we
pushed 256 entries, HW consumer index = 268, we mask it with 0xff = 12,
so last_tx_cn == 0, nothing happens.
Fixes: 80105befdb4b ("net: systemport: add Broadcom SYSTEMPORT Ethernet MAC driver")
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-03-13 21:45:07 +00:00
|
|
|
unsigned int txbds_processed = 0;
|
2014-04-25 01:08:57 +00:00
|
|
|
struct bcm_sysport_cb *cb;
|
net: systemport: Rewrite __bcm_sysport_tx_reclaim()
There is no need for complex checking between the last consumed index
and current consumed index, a simple subtraction will do.
This also eliminates the possibility of a permanent transmit queue stall
under the following conditions:
- one CPU bursts ring->size worth of traffic (up to 256 buffers), to the
point where we run out of free descriptors, so we stop the transmit
queue at the end of bcm_sysport_xmit()
- because of our locking, we have the transmit process disable
interrupts which means we can be blocking the TX reclamation process
- when TX reclamation finally runs, we will be computing the difference
between ring->c_index (last consumed index by SW) and what the HW
reports through its register
- this register is masked with (ring->size - 1) = 0xff, which will lead
to stripping the upper bits of the index (register is 16-bits wide)
- we will be computing last_tx_cn as 0, which means there is no work to
be done, and we never wake-up the transmit queue, leaving it
permanently disabled
A practical example is e.g: ring->c_index aka last_c_index = 12, we
pushed 256 entries, HW consumer index = 268, we mask it with 0xff = 12,
so last_tx_cn == 0, nothing happens.
Fixes: 80105befdb4b ("net: systemport: add Broadcom SYSTEMPORT Ethernet MAC driver")
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-03-13 21:45:07 +00:00
|
|
|
unsigned int txbds_ready;
|
|
|
|
unsigned int c_index;
|
2014-04-25 01:08:57 +00:00
|
|
|
u32 hw_ind;
|
|
|
|
|
2017-03-23 17:36:47 +00:00
|
|
|
/* Clear status before servicing to reduce spurious interrupts */
|
|
|
|
if (!ring->priv->is_lite)
|
|
|
|
intrl2_1_writel(ring->priv, BIT(ring->index), INTRL2_CPU_CLEAR);
|
|
|
|
else
|
|
|
|
intrl2_0_writel(ring->priv, BIT(ring->index +
|
|
|
|
INTRL2_0_TDMA_MBDONE_SHIFT), INTRL2_CPU_CLEAR);
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
/* Compute how many descriptors have been processed since last call */
|
|
|
|
hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
|
|
|
|
c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
|
net: systemport: Rewrite __bcm_sysport_tx_reclaim()
There is no need for complex checking between the last consumed index
and current consumed index, a simple subtraction will do.
This also eliminates the possibility of a permanent transmit queue stall
under the following conditions:
- one CPU bursts ring->size worth of traffic (up to 256 buffers), to the
point where we run out of free descriptors, so we stop the transmit
queue at the end of bcm_sysport_xmit()
- because of our locking, we have the transmit process disable
interrupts which means we can be blocking the TX reclamation process
- when TX reclamation finally runs, we will be computing the difference
between ring->c_index (last consumed index by SW) and what the HW
reports through its register
- this register is masked with (ring->size - 1) = 0xff, which will lead
to stripping the upper bits of the index (register is 16-bits wide)
- we will be computing last_tx_cn as 0, which means there is no work to
be done, and we never wake-up the transmit queue, leaving it
permanently disabled
A practical example is e.g: ring->c_index aka last_c_index = 12, we
pushed 256 entries, HW consumer index = 268, we mask it with 0xff = 12,
so last_tx_cn == 0, nothing happens.
Fixes: 80105befdb4b ("net: systemport: add Broadcom SYSTEMPORT Ethernet MAC driver")
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-03-13 21:45:07 +00:00
|
|
|
txbds_ready = (c_index - ring->c_index) & RING_CONS_INDEX_MASK;
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
netif_dbg(priv, tx_done, ndev,
|
net: systemport: Rewrite __bcm_sysport_tx_reclaim()
There is no need for complex checking between the last consumed index
and current consumed index, a simple subtraction will do.
This also eliminates the possibility of a permanent transmit queue stall
under the following conditions:
- one CPU bursts ring->size worth of traffic (up to 256 buffers), to the
point where we run out of free descriptors, so we stop the transmit
queue at the end of bcm_sysport_xmit()
- because of our locking, we have the transmit process disable
interrupts which means we can be blocking the TX reclamation process
- when TX reclamation finally runs, we will be computing the difference
between ring->c_index (last consumed index by SW) and what the HW
reports through its register
- this register is masked with (ring->size - 1) = 0xff, which will lead
to stripping the upper bits of the index (register is 16-bits wide)
- we will be computing last_tx_cn as 0, which means there is no work to
be done, and we never wake-up the transmit queue, leaving it
permanently disabled
A practical example is e.g: ring->c_index aka last_c_index = 12, we
pushed 256 entries, HW consumer index = 268, we mask it with 0xff = 12,
so last_tx_cn == 0, nothing happens.
Fixes: 80105befdb4b ("net: systemport: add Broadcom SYSTEMPORT Ethernet MAC driver")
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-03-13 21:45:07 +00:00
|
|
|
"ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
|
|
|
|
ring->index, ring->c_index, c_index, txbds_ready);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
net: systemport: Rewrite __bcm_sysport_tx_reclaim()
There is no need for complex checking between the last consumed index
and current consumed index, a simple subtraction will do.
This also eliminates the possibility of a permanent transmit queue stall
under the following conditions:
- one CPU bursts ring->size worth of traffic (up to 256 buffers), to the
point where we run out of free descriptors, so we stop the transmit
queue at the end of bcm_sysport_xmit()
- because of our locking, we have the transmit process disable
interrupts which means we can be blocking the TX reclamation process
- when TX reclamation finally runs, we will be computing the difference
between ring->c_index (last consumed index by SW) and what the HW
reports through its register
- this register is masked with (ring->size - 1) = 0xff, which will lead
to stripping the upper bits of the index (register is 16-bits wide)
- we will be computing last_tx_cn as 0, which means there is no work to
be done, and we never wake-up the transmit queue, leaving it
permanently disabled
A practical example is e.g: ring->c_index aka last_c_index = 12, we
pushed 256 entries, HW consumer index = 268, we mask it with 0xff = 12,
so last_tx_cn == 0, nothing happens.
Fixes: 80105befdb4b ("net: systemport: add Broadcom SYSTEMPORT Ethernet MAC driver")
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-03-13 21:45:07 +00:00
|
|
|
while (txbds_processed < txbds_ready) {
|
|
|
|
cb = &ring->cbs[ring->clean_index];
|
2017-03-23 17:36:46 +00:00
|
|
|
bcm_sysport_tx_reclaim_one(ring, cb, &bytes_compl, &pkts_compl);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
ring->desc_count++;
|
net: systemport: Rewrite __bcm_sysport_tx_reclaim()
There is no need for complex checking between the last consumed index
and current consumed index, a simple subtraction will do.
This also eliminates the possibility of a permanent transmit queue stall
under the following conditions:
- one CPU bursts ring->size worth of traffic (up to 256 buffers), to the
point where we run out of free descriptors, so we stop the transmit
queue at the end of bcm_sysport_xmit()
- because of our locking, we have the transmit process disable
interrupts which means we can be blocking the TX reclamation process
- when TX reclamation finally runs, we will be computing the difference
between ring->c_index (last consumed index by SW) and what the HW
reports through its register
- this register is masked with (ring->size - 1) = 0xff, which will lead
to stripping the upper bits of the index (register is 16-bits wide)
- we will be computing last_tx_cn as 0, which means there is no work to
be done, and we never wake-up the transmit queue, leaving it
permanently disabled
A practical example is e.g: ring->c_index aka last_c_index = 12, we
pushed 256 entries, HW consumer index = 268, we mask it with 0xff = 12,
so last_tx_cn == 0, nothing happens.
Fixes: 80105befdb4b ("net: systemport: add Broadcom SYSTEMPORT Ethernet MAC driver")
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-03-13 21:45:07 +00:00
|
|
|
txbds_processed++;
|
|
|
|
|
|
|
|
if (likely(ring->clean_index < ring->size - 1))
|
|
|
|
ring->clean_index++;
|
|
|
|
else
|
|
|
|
ring->clean_index = 0;
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
2017-08-03 23:07:45 +00:00
|
|
|
u64_stats_update_begin(&priv->syncp);
|
|
|
|
ring->packets += pkts_compl;
|
|
|
|
ring->bytes += bytes_compl;
|
|
|
|
u64_stats_update_end(&priv->syncp);
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
ring->c_index = c_index;
|
|
|
|
|
|
|
|
netif_dbg(priv, tx_done, ndev,
|
2014-07-10 00:36:46 +00:00
|
|
|
"ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
|
|
|
|
ring->index, ring->c_index, pkts_compl, bytes_compl);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
return pkts_compl;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Locked version of the per-ring TX reclaim routine */
|
|
|
|
static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
|
|
|
|
struct bcm_sysport_tx_ring *ring)
|
|
|
|
{
|
2017-01-12 20:09:09 +00:00
|
|
|
struct netdev_queue *txq;
|
2014-04-25 01:08:57 +00:00
|
|
|
unsigned int released;
|
2014-06-05 17:22:15 +00:00
|
|
|
unsigned long flags;
|
2014-04-25 01:08:57 +00:00
|
|
|
|
2017-01-12 20:09:09 +00:00
|
|
|
txq = netdev_get_tx_queue(priv->netdev, ring->index);
|
|
|
|
|
2014-06-05 17:22:15 +00:00
|
|
|
spin_lock_irqsave(&ring->lock, flags);
|
2014-04-25 01:08:57 +00:00
|
|
|
released = __bcm_sysport_tx_reclaim(priv, ring);
|
2017-01-12 20:09:09 +00:00
|
|
|
if (released)
|
|
|
|
netif_tx_wake_queue(txq);
|
|
|
|
|
2014-06-05 17:22:15 +00:00
|
|
|
spin_unlock_irqrestore(&ring->lock, flags);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
return released;
|
|
|
|
}
|
|
|
|
|
2017-01-12 20:09:09 +00:00
|
|
|
/* Locked version of the per-ring TX reclaim, but does not wake the queue */
|
|
|
|
static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
|
|
|
|
struct bcm_sysport_tx_ring *ring)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&ring->lock, flags);
|
|
|
|
__bcm_sysport_tx_reclaim(priv, ring);
|
|
|
|
spin_unlock_irqrestore(&ring->lock, flags);
|
|
|
|
}
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_tx_ring *ring =
|
|
|
|
container_of(napi, struct bcm_sysport_tx_ring, napi);
|
|
|
|
unsigned int work_done = 0;
|
|
|
|
|
|
|
|
work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
|
|
|
|
|
2014-06-26 17:06:46 +00:00
|
|
|
if (work_done == 0) {
|
2014-04-25 01:08:57 +00:00
|
|
|
napi_complete(napi);
|
|
|
|
/* re-enable TX interrupt */
|
2017-01-20 19:08:27 +00:00
|
|
|
if (!ring->priv->is_lite)
|
|
|
|
intrl2_1_mask_clear(ring->priv, BIT(ring->index));
|
|
|
|
else
|
|
|
|
intrl2_0_mask_clear(ring->priv, BIT(ring->index +
|
|
|
|
INTRL2_0_TDMA_MBDONE_SHIFT));
|
2014-11-12 23:40:43 +00:00
|
|
|
|
|
|
|
return 0;
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
2014-11-12 23:40:43 +00:00
|
|
|
return budget;
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
|
|
|
|
{
|
|
|
|
unsigned int q;
|
|
|
|
|
|
|
|
for (q = 0; q < priv->netdev->num_tx_queues; q++)
|
|
|
|
bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bcm_sysport_poll(struct napi_struct *napi, int budget)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv =
|
|
|
|
container_of(napi, struct bcm_sysport_priv, napi);
|
2019-07-23 07:22:47 +00:00
|
|
|
struct dim_sample dim_sample = {};
|
2014-04-25 01:08:57 +00:00
|
|
|
unsigned int work_done = 0;
|
|
|
|
|
|
|
|
work_done = bcm_sysport_desc_rx(priv, budget);
|
|
|
|
|
|
|
|
priv->rx_c_index += work_done;
|
|
|
|
priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
|
2017-01-20 19:08:27 +00:00
|
|
|
|
|
|
|
/* SYSTEMPORT Lite groups the producer/consumer index, producer is
|
|
|
|
* maintained by HW, but writes to it will be ignore while RDMA
|
|
|
|
* is active
|
|
|
|
*/
|
|
|
|
if (!priv->is_lite)
|
|
|
|
rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
|
|
|
|
else
|
|
|
|
rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
if (work_done < budget) {
|
2016-04-20 18:37:09 +00:00
|
|
|
napi_complete_done(napi, work_done);
|
2014-04-25 01:08:57 +00:00
|
|
|
/* re-enable RX interrupts */
|
|
|
|
intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
|
|
|
|
}
|
|
|
|
|
2018-03-23 01:19:32 +00:00
|
|
|
if (priv->dim.use_dim) {
|
2019-01-31 14:44:48 +00:00
|
|
|
dim_update_sample(priv->dim.event_ctr, priv->dim.packets,
|
|
|
|
priv->dim.bytes, &dim_sample);
|
2018-03-23 01:19:32 +00:00
|
|
|
net_dim(&priv->dim.dim, dim_sample);
|
|
|
|
}
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
return work_done;
|
|
|
|
}
|
|
|
|
|
2018-08-03 18:08:44 +00:00
|
|
|
static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable)
|
2014-07-02 04:08:40 +00:00
|
|
|
{
|
2018-08-07 17:50:23 +00:00
|
|
|
u32 reg, bit;
|
2014-07-02 04:08:40 +00:00
|
|
|
|
2018-08-03 18:08:44 +00:00
|
|
|
reg = umac_readl(priv, UMAC_MPD_CTRL);
|
|
|
|
if (enable)
|
|
|
|
reg |= MPD_EN;
|
|
|
|
else
|
|
|
|
reg &= ~MPD_EN;
|
|
|
|
umac_writel(priv, reg, UMAC_MPD_CTRL);
|
2018-08-07 17:50:23 +00:00
|
|
|
|
|
|
|
if (priv->is_lite)
|
|
|
|
bit = RBUF_ACPI_EN_LITE;
|
|
|
|
else
|
|
|
|
bit = RBUF_ACPI_EN;
|
|
|
|
|
|
|
|
reg = rbuf_readl(priv, RBUF_CONTROL);
|
|
|
|
if (enable)
|
|
|
|
reg |= bit;
|
|
|
|
else
|
|
|
|
reg &= ~bit;
|
|
|
|
rbuf_writel(priv, reg, RBUF_CONTROL);
|
2018-08-03 18:08:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
|
|
|
|
{
|
2018-11-06 20:58:41 +00:00
|
|
|
unsigned int index;
|
2018-08-07 17:50:23 +00:00
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
/* Disable RXCHK, active filters and Broadcom tag matching */
|
|
|
|
reg = rxchk_readl(priv, RXCHK_CONTROL);
|
|
|
|
reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
|
|
|
|
RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN);
|
|
|
|
rxchk_writel(priv, reg, RXCHK_CONTROL);
|
2014-07-02 04:08:40 +00:00
|
|
|
|
2018-11-06 20:58:41 +00:00
|
|
|
/* Make sure we restore correct CID index in case HW lost
|
|
|
|
* its context during deep idle state
|
|
|
|
*/
|
|
|
|
for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
|
|
|
|
rxchk_writel(priv, priv->filters_loc[index] <<
|
|
|
|
RXCHK_BRCM_TAG_CID_SHIFT, RXCHK_BRCM_TAG(index));
|
|
|
|
rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
|
|
|
|
}
|
|
|
|
|
2014-07-02 04:08:40 +00:00
|
|
|
/* Clear the MagicPacket detection logic */
|
2018-08-03 18:08:44 +00:00
|
|
|
mpd_enable_set(priv, false);
|
2014-07-02 04:08:40 +00:00
|
|
|
|
2018-10-02 23:52:03 +00:00
|
|
|
reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
|
|
|
|
if (reg & INTRL2_0_MPD)
|
|
|
|
netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
|
|
|
|
|
|
|
|
if (reg & INTRL2_0_BRCM_MATCH_TAG) {
|
|
|
|
reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
|
|
|
|
RXCHK_BRCM_TAG_MATCH_MASK;
|
|
|
|
netdev_info(priv->netdev,
|
|
|
|
"Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
|
|
|
|
}
|
|
|
|
|
2014-07-02 04:08:40 +00:00
|
|
|
netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
|
|
|
|
}
|
2014-04-25 01:08:57 +00:00
|
|
|
|
2018-03-23 01:19:32 +00:00
|
|
|
static void bcm_sysport_dim_work(struct work_struct *work)
|
|
|
|
{
|
2019-01-31 14:44:48 +00:00
|
|
|
struct dim *dim = container_of(work, struct dim, work);
|
2018-03-23 01:19:32 +00:00
|
|
|
struct bcm_sysport_net_dim *ndim =
|
|
|
|
container_of(dim, struct bcm_sysport_net_dim, dim);
|
|
|
|
struct bcm_sysport_priv *priv =
|
|
|
|
container_of(ndim, struct bcm_sysport_priv, dim);
|
2019-01-31 14:44:48 +00:00
|
|
|
struct dim_cq_moder cur_profile = net_dim_get_rx_moderation(dim->mode,
|
|
|
|
dim->profile_ix);
|
2018-03-23 01:19:32 +00:00
|
|
|
|
2018-03-28 22:15:37 +00:00
|
|
|
bcm_sysport_set_rx_coalesce(priv, cur_profile.usec, cur_profile.pkts);
|
2018-11-05 10:07:52 +00:00
|
|
|
dim->state = DIM_START_MEASURE;
|
2018-03-23 01:19:32 +00:00
|
|
|
}
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
/* RX and misc interrupt routine */
|
|
|
|
static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
struct net_device *dev = dev_id;
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
2017-01-20 19:08:27 +00:00
|
|
|
struct bcm_sysport_tx_ring *txr;
|
|
|
|
unsigned int ring, ring_bit;
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
|
|
|
|
~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
|
|
|
|
intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
|
|
|
|
|
|
|
|
if (unlikely(priv->irq0_stat == 0)) {
|
|
|
|
netdev_warn(priv->netdev, "spurious RX interrupt\n");
|
|
|
|
return IRQ_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
|
2018-03-23 01:19:32 +00:00
|
|
|
priv->dim.event_ctr++;
|
2014-04-25 01:08:57 +00:00
|
|
|
if (likely(napi_schedule_prep(&priv->napi))) {
|
|
|
|
/* disable RX interrupts */
|
|
|
|
intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
|
2016-04-20 18:37:08 +00:00
|
|
|
__napi_schedule_irqoff(&priv->napi);
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* TX ring is full, perform a full reclaim since we do not know
|
|
|
|
* which one would trigger this interrupt
|
|
|
|
*/
|
|
|
|
if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
|
|
|
|
bcm_sysport_tx_reclaim_all(priv);
|
|
|
|
|
2017-01-20 19:08:27 +00:00
|
|
|
if (!priv->is_lite)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
for (ring = 0; ring < dev->num_tx_queues; ring++) {
|
|
|
|
ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT);
|
|
|
|
if (!(priv->irq0_stat & ring_bit))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
txr = &priv->tx_rings[ring];
|
|
|
|
|
|
|
|
if (likely(napi_schedule_prep(&txr->napi))) {
|
|
|
|
intrl2_0_mask_set(priv, ring_bit);
|
|
|
|
__napi_schedule(&txr->napi);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
out:
|
2014-04-25 01:08:57 +00:00
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* TX interrupt service routine */
|
|
|
|
static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
struct net_device *dev = dev_id;
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
|
|
|
struct bcm_sysport_tx_ring *txr;
|
|
|
|
unsigned int ring;
|
|
|
|
|
|
|
|
priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
|
|
|
|
~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
|
|
|
|
intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
|
|
|
|
|
|
|
|
if (unlikely(priv->irq1_stat == 0)) {
|
|
|
|
netdev_warn(priv->netdev, "spurious TX interrupt\n");
|
|
|
|
return IRQ_NONE;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (ring = 0; ring < dev->num_tx_queues; ring++) {
|
|
|
|
if (!(priv->irq1_stat & BIT(ring)))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
txr = &priv->tx_rings[ring];
|
|
|
|
|
|
|
|
if (likely(napi_schedule_prep(&txr->napi))) {
|
|
|
|
intrl2_1_mask_set(priv, BIT(ring));
|
2016-04-20 18:37:08 +00:00
|
|
|
__napi_schedule_irqoff(&txr->napi);
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2014-07-02 04:08:40 +00:00
|
|
|
static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv = dev_id;
|
|
|
|
|
|
|
|
pm_wakeup_event(&priv->pdev->dev, 0);
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
2015-07-31 18:42:55 +00:00
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
static void bcm_sysport_poll_controller(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
|
|
|
|
|
|
|
disable_irq(priv->irq0);
|
|
|
|
bcm_sysport_rx_isr(priv->irq0, priv);
|
|
|
|
enable_irq(priv->irq0);
|
|
|
|
|
2017-01-20 19:08:27 +00:00
|
|
|
if (!priv->is_lite) {
|
|
|
|
disable_irq(priv->irq1);
|
|
|
|
bcm_sysport_tx_isr(priv->irq1, priv);
|
|
|
|
enable_irq(priv->irq1);
|
|
|
|
}
|
2015-07-31 18:42:55 +00:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2014-10-02 16:43:16 +00:00
|
|
|
static struct sk_buff *bcm_sysport_insert_tsb(struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
2014-04-25 01:08:57 +00:00
|
|
|
{
|
2018-09-27 22:36:14 +00:00
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
2014-04-25 01:08:57 +00:00
|
|
|
struct sk_buff *nskb;
|
2014-05-30 19:39:30 +00:00
|
|
|
struct bcm_tsb *tsb;
|
2014-04-25 01:08:57 +00:00
|
|
|
u32 csum_info;
|
|
|
|
u8 ip_proto;
|
|
|
|
u16 csum_start;
|
2018-04-02 22:58:56 +00:00
|
|
|
__be16 ip_ver;
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
/* Re-allocate SKB if needed */
|
|
|
|
if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
|
|
|
|
nskb = skb_realloc_headroom(skb, sizeof(*tsb));
|
|
|
|
if (!nskb) {
|
2018-09-27 22:36:13 +00:00
|
|
|
dev_kfree_skb_any(skb);
|
2018-09-27 22:36:14 +00:00
|
|
|
priv->mib.tx_realloc_tsb_failed++;
|
2014-04-25 01:08:57 +00:00
|
|
|
dev->stats.tx_errors++;
|
|
|
|
dev->stats.tx_dropped++;
|
2014-10-02 16:43:16 +00:00
|
|
|
return NULL;
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
2018-09-27 22:36:13 +00:00
|
|
|
dev_consume_skb_any(skb);
|
2014-04-25 01:08:57 +00:00
|
|
|
skb = nskb;
|
2018-09-27 22:36:14 +00:00
|
|
|
priv->mib.tx_realloc_tsb++;
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
networking: make skb_push & __skb_push return void pointers
It seems like a historic accident that these return unsigned char *,
and in many places that means casts are required, more often than not.
Make these functions return void * and remove all the casts across
the tree, adding a (u8 *) cast only where the unsigned char pointer
was used directly, all done with the following spatch:
@@
expression SKB, LEN;
typedef u8;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- *(fn(SKB, LEN))
+ *(u8 *)fn(SKB, LEN)
@@
expression E, SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
type T;
@@
- E = ((T *)(fn(SKB, LEN)))
+ E = fn(SKB, LEN)
@@
expression SKB, LEN;
identifier fn = { skb_push, __skb_push, skb_push_rcsum };
@@
- fn(SKB, LEN)[0]
+ *(u8 *)fn(SKB, LEN)
Note that the last part there converts from push(...)[0] to the
more idiomatic *(u8 *)push(...).
Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-06-16 12:29:23 +00:00
|
|
|
tsb = skb_push(skb, sizeof(*tsb));
|
2014-04-25 01:08:57 +00:00
|
|
|
/* Zero-out TSB by default */
|
|
|
|
memset(tsb, 0, sizeof(*tsb));
|
|
|
|
|
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
2018-04-02 22:58:56 +00:00
|
|
|
ip_ver = skb->protocol;
|
2014-04-25 01:08:57 +00:00
|
|
|
switch (ip_ver) {
|
2018-04-02 22:58:56 +00:00
|
|
|
case htons(ETH_P_IP):
|
2014-04-25 01:08:57 +00:00
|
|
|
ip_proto = ip_hdr(skb)->protocol;
|
|
|
|
break;
|
2018-04-02 22:58:56 +00:00
|
|
|
case htons(ETH_P_IPV6):
|
2014-04-25 01:08:57 +00:00
|
|
|
ip_proto = ipv6_hdr(skb)->nexthdr;
|
|
|
|
break;
|
|
|
|
default:
|
2014-10-02 16:43:16 +00:00
|
|
|
return skb;
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Get the checksum offset and the L4 (transport) offset */
|
|
|
|
csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
|
|
|
|
csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
|
|
|
|
csum_info |= (csum_start << L4_PTR_SHIFT);
|
|
|
|
|
|
|
|
if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
|
|
|
|
csum_info |= L4_LENGTH_VALID;
|
2018-04-02 22:58:56 +00:00
|
|
|
if (ip_proto == IPPROTO_UDP &&
|
|
|
|
ip_ver == htons(ETH_P_IP))
|
2014-04-25 01:08:57 +00:00
|
|
|
csum_info |= L4_UDP;
|
2014-07-10 00:36:46 +00:00
|
|
|
} else {
|
2014-04-25 01:08:57 +00:00
|
|
|
csum_info = 0;
|
2014-07-10 00:36:46 +00:00
|
|
|
}
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
tsb->l4_ptr_dest_map = csum_info;
|
|
|
|
}
|
|
|
|
|
2014-10-02 16:43:16 +00:00
|
|
|
return skb;
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
|
|
|
|
struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
|
|
|
struct device *kdev = &priv->pdev->dev;
|
|
|
|
struct bcm_sysport_tx_ring *ring;
|
|
|
|
struct bcm_sysport_cb *cb;
|
|
|
|
struct netdev_queue *txq;
|
2019-04-22 16:46:44 +00:00
|
|
|
u32 len_status, addr_lo;
|
2014-05-15 02:32:14 +00:00
|
|
|
unsigned int skb_len;
|
2014-06-05 17:22:15 +00:00
|
|
|
unsigned long flags;
|
2014-04-25 01:08:57 +00:00
|
|
|
dma_addr_t mapping;
|
|
|
|
u16 queue;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
queue = skb_get_queue_mapping(skb);
|
|
|
|
txq = netdev_get_tx_queue(dev, queue);
|
|
|
|
ring = &priv->tx_rings[queue];
|
|
|
|
|
2014-06-05 17:22:15 +00:00
|
|
|
/* lock against tx reclaim in BH context and TX ring full interrupt */
|
|
|
|
spin_lock_irqsave(&ring->lock, flags);
|
2014-04-25 01:08:57 +00:00
|
|
|
if (unlikely(ring->desc_count == 0)) {
|
|
|
|
netif_tx_stop_queue(txq);
|
|
|
|
netdev_err(dev, "queue %d awake and ring full!\n", queue);
|
|
|
|
ret = NETDEV_TX_BUSY;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2017-01-04 00:34:49 +00:00
|
|
|
/* Insert TSB and checksum infos */
|
|
|
|
if (priv->tsb_en) {
|
|
|
|
skb = bcm_sysport_insert_tsb(skb, dev);
|
|
|
|
if (!skb) {
|
|
|
|
ret = NETDEV_TX_OK;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-04 00:34:48 +00:00
|
|
|
skb_len = skb->len;
|
2014-05-15 02:32:14 +00:00
|
|
|
|
|
|
|
mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
|
2014-04-25 01:08:57 +00:00
|
|
|
if (dma_mapping_error(kdev, mapping)) {
|
2014-11-19 18:29:55 +00:00
|
|
|
priv->mib.tx_dma_failed++;
|
2014-04-25 01:08:57 +00:00
|
|
|
netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
|
2014-07-10 00:36:46 +00:00
|
|
|
skb->data, skb_len);
|
2014-04-25 01:08:57 +00:00
|
|
|
ret = NETDEV_TX_OK;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Remember the SKB for future freeing */
|
|
|
|
cb = &ring->cbs[ring->curr_desc];
|
|
|
|
cb->skb = skb;
|
|
|
|
dma_unmap_addr_set(cb, dma_addr, mapping);
|
2014-05-15 02:32:14 +00:00
|
|
|
dma_unmap_len_set(cb, dma_len, skb_len);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
2019-04-22 16:46:44 +00:00
|
|
|
addr_lo = lower_32_bits(mapping);
|
2014-04-25 01:08:57 +00:00
|
|
|
len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
|
2014-05-15 02:32:14 +00:00
|
|
|
len_status |= (skb_len << DESC_LEN_SHIFT);
|
2014-04-25 01:08:57 +00:00
|
|
|
len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
|
2014-07-10 00:36:46 +00:00
|
|
|
DESC_STATUS_SHIFT;
|
2014-04-25 01:08:57 +00:00
|
|
|
if (skb->ip_summed == CHECKSUM_PARTIAL)
|
|
|
|
len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
|
|
|
|
|
|
|
|
ring->curr_desc++;
|
|
|
|
if (ring->curr_desc == ring->size)
|
|
|
|
ring->curr_desc = 0;
|
|
|
|
ring->desc_count--;
|
|
|
|
|
2019-04-22 16:46:44 +00:00
|
|
|
/* Ports are latched, so write upper address first */
|
|
|
|
tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index));
|
|
|
|
tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index));
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
/* Check ring space and update SW control flow */
|
|
|
|
if (ring->desc_count == 0)
|
|
|
|
netif_tx_stop_queue(txq);
|
|
|
|
|
|
|
|
netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
|
2014-07-10 00:36:46 +00:00
|
|
|
ring->index, ring->desc_count, ring->curr_desc);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
ret = NETDEV_TX_OK;
|
|
|
|
out:
|
2014-06-05 17:22:15 +00:00
|
|
|
spin_unlock_irqrestore(&ring->lock, flags);
|
2014-04-25 01:08:57 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
netdev: pass the stuck queue to the timeout handler
This allows incrementing the correct timeout statistic without any mess.
Down the road, devices can learn to reset just the specific queue.
The patch was generated with the following script:
use strict;
use warnings;
our $^I = '.bak';
my @work = (
["arch/m68k/emu/nfeth.c", "nfeth_tx_timeout"],
["arch/um/drivers/net_kern.c", "uml_net_tx_timeout"],
["arch/um/drivers/vector_kern.c", "vector_net_tx_timeout"],
["arch/xtensa/platforms/iss/network.c", "iss_net_tx_timeout"],
["drivers/char/pcmcia/synclink_cs.c", "hdlcdev_tx_timeout"],
["drivers/infiniband/ulp/ipoib/ipoib_main.c", "ipoib_timeout"],
["drivers/infiniband/ulp/ipoib/ipoib_main.c", "ipoib_timeout"],
["drivers/message/fusion/mptlan.c", "mpt_lan_tx_timeout"],
["drivers/misc/sgi-xp/xpnet.c", "xpnet_dev_tx_timeout"],
["drivers/net/appletalk/cops.c", "cops_timeout"],
["drivers/net/arcnet/arcdevice.h", "arcnet_timeout"],
["drivers/net/arcnet/arcnet.c", "arcnet_timeout"],
["drivers/net/arcnet/com20020.c", "arcnet_timeout"],
["drivers/net/ethernet/3com/3c509.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c515.c", "corkscrew_timeout"],
["drivers/net/ethernet/3com/3c574_cs.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c589_cs.c", "el3_tx_timeout"],
["drivers/net/ethernet/3com/3c59x.c", "vortex_tx_timeout"],
["drivers/net/ethernet/3com/3c59x.c", "vortex_tx_timeout"],
["drivers/net/ethernet/3com/typhoon.c", "typhoon_tx_timeout"],
["drivers/net/ethernet/8390/8390.h", "ei_tx_timeout"],
["drivers/net/ethernet/8390/8390.h", "eip_tx_timeout"],
["drivers/net/ethernet/8390/8390.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/8390p.c", "eip_tx_timeout"],
["drivers/net/ethernet/8390/ax88796.c", "ax_ei_tx_timeout"],
["drivers/net/ethernet/8390/axnet_cs.c", "axnet_tx_timeout"],
["drivers/net/ethernet/8390/etherh.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/hydra.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/mac8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/mcf8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/lib8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/8390/ne2k-pci.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/pcnet_cs.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/smc-ultra.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/wd.c", "ei_tx_timeout"],
["drivers/net/ethernet/8390/zorro8390.c", "__ei_tx_timeout"],
["drivers/net/ethernet/adaptec/starfire.c", "tx_timeout"],
["drivers/net/ethernet/agere/et131x.c", "et131x_tx_timeout"],
["drivers/net/ethernet/allwinner/sun4i-emac.c", "emac_timeout"],
["drivers/net/ethernet/alteon/acenic.c", "ace_watchdog"],
["drivers/net/ethernet/amazon/ena/ena_netdev.c", "ena_tx_timeout"],
["drivers/net/ethernet/amd/7990.h", "lance_tx_timeout"],
["drivers/net/ethernet/amd/7990.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/a2065.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/am79c961a.c", "am79c961_timeout"],
["drivers/net/ethernet/amd/amd8111e.c", "amd8111e_tx_timeout"],
["drivers/net/ethernet/amd/ariadne.c", "ariadne_tx_timeout"],
["drivers/net/ethernet/amd/atarilance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/au1000_eth.c", "au1000_tx_timeout"],
["drivers/net/ethernet/amd/declance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/lance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/mvme147.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/ni65.c", "ni65_timeout"],
["drivers/net/ethernet/amd/nmclan_cs.c", "mace_tx_timeout"],
["drivers/net/ethernet/amd/pcnet32.c", "pcnet32_tx_timeout"],
["drivers/net/ethernet/amd/sunlance.c", "lance_tx_timeout"],
["drivers/net/ethernet/amd/xgbe/xgbe-drv.c", "xgbe_tx_timeout"],
["drivers/net/ethernet/apm/xgene-v2/main.c", "xge_timeout"],
["drivers/net/ethernet/apm/xgene/xgene_enet_main.c", "xgene_enet_timeout"],
["drivers/net/ethernet/apple/macmace.c", "mace_tx_timeout"],
["drivers/net/ethernet/atheros/ag71xx.c", "ag71xx_tx_timeout"],
["drivers/net/ethernet/atheros/alx/main.c", "alx_tx_timeout"],
["drivers/net/ethernet/atheros/atl1c/atl1c_main.c", "atl1c_tx_timeout"],
["drivers/net/ethernet/atheros/atl1e/atl1e_main.c", "atl1e_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl.c", "atlx_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl1.c", "atlx_tx_timeout"],
["drivers/net/ethernet/atheros/atlx/atl2.c", "atl2_tx_timeout"],
["drivers/net/ethernet/broadcom/b44.c", "b44_tx_timeout"],
["drivers/net/ethernet/broadcom/bcmsysport.c", "bcm_sysport_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2.c", "bnx2_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c", "bnx2x_tx_timeout"],
["drivers/net/ethernet/broadcom/bnxt/bnxt.c", "bnxt_tx_timeout"],
["drivers/net/ethernet/broadcom/genet/bcmgenet.c", "bcmgenet_timeout"],
["drivers/net/ethernet/broadcom/sb1250-mac.c", "sbmac_tx_timeout"],
["drivers/net/ethernet/broadcom/tg3.c", "tg3_tx_timeout"],
["drivers/net/ethernet/calxeda/xgmac.c", "xgmac_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_main.c", "liquidio_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_vf_main.c", "liquidio_tx_timeout"],
["drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c", "lio_vf_rep_tx_timeout"],
["drivers/net/ethernet/cavium/thunder/nicvf_main.c", "nicvf_tx_timeout"],
["drivers/net/ethernet/cirrus/cs89x0.c", "net_timeout"],
["drivers/net/ethernet/cisco/enic/enic_main.c", "enic_tx_timeout"],
["drivers/net/ethernet/cisco/enic/enic_main.c", "enic_tx_timeout"],
["drivers/net/ethernet/cortina/gemini.c", "gmac_tx_timeout"],
["drivers/net/ethernet/davicom/dm9000.c", "dm9000_timeout"],
["drivers/net/ethernet/dec/tulip/de2104x.c", "de_tx_timeout"],
["drivers/net/ethernet/dec/tulip/tulip_core.c", "tulip_tx_timeout"],
["drivers/net/ethernet/dec/tulip/winbond-840.c", "tx_timeout"],
["drivers/net/ethernet/dlink/dl2k.c", "rio_tx_timeout"],
["drivers/net/ethernet/dlink/sundance.c", "tx_timeout"],
["drivers/net/ethernet/emulex/benet/be_main.c", "be_tx_timeout"],
["drivers/net/ethernet/ethoc.c", "ethoc_tx_timeout"],
["drivers/net/ethernet/faraday/ftgmac100.c", "ftgmac100_tx_timeout"],
["drivers/net/ethernet/fealnx.c", "fealnx_tx_timeout"],
["drivers/net/ethernet/freescale/dpaa/dpaa_eth.c", "dpaa_tx_timeout"],
["drivers/net/ethernet/freescale/fec_main.c", "fec_timeout"],
["drivers/net/ethernet/freescale/fec_mpc52xx.c", "mpc52xx_fec_tx_timeout"],
["drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c", "fs_timeout"],
["drivers/net/ethernet/freescale/gianfar.c", "gfar_timeout"],
["drivers/net/ethernet/freescale/ucc_geth.c", "ucc_geth_timeout"],
["drivers/net/ethernet/fujitsu/fmvj18x_cs.c", "fjn_tx_timeout"],
["drivers/net/ethernet/google/gve/gve_main.c", "gve_tx_timeout"],
["drivers/net/ethernet/hisilicon/hip04_eth.c", "hip04_timeout"],
["drivers/net/ethernet/hisilicon/hix5hd2_gmac.c", "hix5hd2_net_timeout"],
["drivers/net/ethernet/hisilicon/hns/hns_enet.c", "hns_nic_net_timeout"],
["drivers/net/ethernet/hisilicon/hns3/hns3_enet.c", "hns3_nic_net_timeout"],
["drivers/net/ethernet/huawei/hinic/hinic_main.c", "hinic_tx_timeout"],
["drivers/net/ethernet/i825xx/82596.c", "i596_tx_timeout"],
["drivers/net/ethernet/i825xx/ether1.c", "ether1_timeout"],
["drivers/net/ethernet/i825xx/lib82596.c", "i596_tx_timeout"],
["drivers/net/ethernet/i825xx/sun3_82586.c", "sun3_82586_timeout"],
["drivers/net/ethernet/ibm/ehea/ehea_main.c", "ehea_tx_watchdog"],
["drivers/net/ethernet/ibm/emac/core.c", "emac_tx_timeout"],
["drivers/net/ethernet/ibm/emac/core.c", "emac_tx_timeout"],
["drivers/net/ethernet/ibm/ibmvnic.c", "ibmvnic_tx_timeout"],
["drivers/net/ethernet/intel/e100.c", "e100_tx_timeout"],
["drivers/net/ethernet/intel/e1000/e1000_main.c", "e1000_tx_timeout"],
["drivers/net/ethernet/intel/e1000e/netdev.c", "e1000_tx_timeout"],
["drivers/net/ethernet/intel/fm10k/fm10k_netdev.c", "fm10k_tx_timeout"],
["drivers/net/ethernet/intel/i40e/i40e_main.c", "i40e_tx_timeout"],
["drivers/net/ethernet/intel/iavf/iavf_main.c", "iavf_tx_timeout"],
["drivers/net/ethernet/intel/ice/ice_main.c", "ice_tx_timeout"],
["drivers/net/ethernet/intel/ice/ice_main.c", "ice_tx_timeout"],
["drivers/net/ethernet/intel/igb/igb_main.c", "igb_tx_timeout"],
["drivers/net/ethernet/intel/igbvf/netdev.c", "igbvf_tx_timeout"],
["drivers/net/ethernet/intel/ixgb/ixgb_main.c", "ixgb_tx_timeout"],
["drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c", "adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev);"],
["drivers/net/ethernet/intel/ixgbe/ixgbe_main.c", "ixgbe_tx_timeout"],
["drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c", "ixgbevf_tx_timeout"],
["drivers/net/ethernet/jme.c", "jme_tx_timeout"],
["drivers/net/ethernet/korina.c", "korina_tx_timeout"],
["drivers/net/ethernet/lantiq_etop.c", "ltq_etop_tx_timeout"],
["drivers/net/ethernet/marvell/mv643xx_eth.c", "mv643xx_eth_tx_timeout"],
["drivers/net/ethernet/marvell/pxa168_eth.c", "pxa168_eth_tx_timeout"],
["drivers/net/ethernet/marvell/skge.c", "skge_tx_timeout"],
["drivers/net/ethernet/marvell/sky2.c", "sky2_tx_timeout"],
["drivers/net/ethernet/marvell/sky2.c", "sky2_tx_timeout"],
["drivers/net/ethernet/mediatek/mtk_eth_soc.c", "mtk_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx4/en_netdev.c", "mlx4_en_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx4/en_netdev.c", "mlx4_en_tx_timeout"],
["drivers/net/ethernet/mellanox/mlx5/core/en_main.c", "mlx5e_tx_timeout"],
["drivers/net/ethernet/micrel/ks8842.c", "ks8842_tx_timeout"],
["drivers/net/ethernet/micrel/ksz884x.c", "netdev_tx_timeout"],
["drivers/net/ethernet/microchip/enc28j60.c", "enc28j60_tx_timeout"],
["drivers/net/ethernet/microchip/encx24j600.c", "encx24j600_tx_timeout"],
["drivers/net/ethernet/natsemi/sonic.h", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/sonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/jazzsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/macsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/natsemi/natsemi.c", "ns_tx_timeout"],
["drivers/net/ethernet/natsemi/ns83820.c", "ns83820_tx_timeout"],
["drivers/net/ethernet/natsemi/xtsonic.c", "sonic_tx_timeout"],
["drivers/net/ethernet/neterion/s2io.h", "s2io_tx_watchdog"],
["drivers/net/ethernet/neterion/s2io.c", "s2io_tx_watchdog"],
["drivers/net/ethernet/neterion/vxge/vxge-main.c", "vxge_tx_watchdog"],
["drivers/net/ethernet/netronome/nfp/nfp_net_common.c", "nfp_net_tx_timeout"],
["drivers/net/ethernet/nvidia/forcedeth.c", "nv_tx_timeout"],
["drivers/net/ethernet/nvidia/forcedeth.c", "nv_tx_timeout"],
["drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c", "pch_gbe_tx_timeout"],
["drivers/net/ethernet/packetengines/hamachi.c", "hamachi_tx_timeout"],
["drivers/net/ethernet/packetengines/yellowfin.c", "yellowfin_tx_timeout"],
["drivers/net/ethernet/pensando/ionic/ionic_lif.c", "ionic_tx_timeout"],
["drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c", "netxen_tx_timeout"],
["drivers/net/ethernet/qlogic/qla3xxx.c", "ql3xxx_tx_timeout"],
["drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c", "qlcnic_tx_timeout"],
["drivers/net/ethernet/qualcomm/emac/emac.c", "emac_tx_timeout"],
["drivers/net/ethernet/qualcomm/qca_spi.c", "qcaspi_netdev_tx_timeout"],
["drivers/net/ethernet/qualcomm/qca_uart.c", "qcauart_netdev_tx_timeout"],
["drivers/net/ethernet/rdc/r6040.c", "r6040_tx_timeout"],
["drivers/net/ethernet/realtek/8139cp.c", "cp_tx_timeout"],
["drivers/net/ethernet/realtek/8139too.c", "rtl8139_tx_timeout"],
["drivers/net/ethernet/realtek/atp.c", "tx_timeout"],
["drivers/net/ethernet/realtek/r8169_main.c", "rtl8169_tx_timeout"],
["drivers/net/ethernet/renesas/ravb_main.c", "ravb_tx_timeout"],
["drivers/net/ethernet/renesas/sh_eth.c", "sh_eth_tx_timeout"],
["drivers/net/ethernet/renesas/sh_eth.c", "sh_eth_tx_timeout"],
["drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c", "sxgbe_tx_timeout"],
["drivers/net/ethernet/seeq/ether3.c", "ether3_timeout"],
["drivers/net/ethernet/seeq/sgiseeq.c", "timeout"],
["drivers/net/ethernet/sfc/efx.c", "efx_watchdog"],
["drivers/net/ethernet/sfc/falcon/efx.c", "ef4_watchdog"],
["drivers/net/ethernet/sgi/ioc3-eth.c", "ioc3_timeout"],
["drivers/net/ethernet/sgi/meth.c", "meth_tx_timeout"],
["drivers/net/ethernet/silan/sc92031.c", "sc92031_tx_timeout"],
["drivers/net/ethernet/sis/sis190.c", "sis190_tx_timeout"],
["drivers/net/ethernet/sis/sis900.c", "sis900_tx_timeout"],
["drivers/net/ethernet/smsc/epic100.c", "epic_tx_timeout"],
["drivers/net/ethernet/smsc/smc911x.c", "smc911x_timeout"],
["drivers/net/ethernet/smsc/smc9194.c", "smc_timeout"],
["drivers/net/ethernet/smsc/smc91c92_cs.c", "smc_tx_timeout"],
["drivers/net/ethernet/smsc/smc91x.c", "smc_timeout"],
["drivers/net/ethernet/stmicro/stmmac/stmmac_main.c", "stmmac_tx_timeout"],
["drivers/net/ethernet/sun/cassini.c", "cas_tx_timeout"],
["drivers/net/ethernet/sun/ldmvsw.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/niu.c", "niu_tx_timeout"],
["drivers/net/ethernet/sun/sunbmac.c", "bigmac_tx_timeout"],
["drivers/net/ethernet/sun/sungem.c", "gem_tx_timeout"],
["drivers/net/ethernet/sun/sunhme.c", "happy_meal_tx_timeout"],
["drivers/net/ethernet/sun/sunqe.c", "qe_tx_timeout"],
["drivers/net/ethernet/sun/sunvnet.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/sunvnet_common.c", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/sun/sunvnet_common.h", "sunvnet_tx_timeout_common"],
["drivers/net/ethernet/synopsys/dwc-xlgmac-net.c", "xlgmac_tx_timeout"],
["drivers/net/ethernet/ti/cpmac.c", "cpmac_tx_timeout"],
["drivers/net/ethernet/ti/cpsw.c", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/cpsw_priv.c", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/cpsw_priv.h", "cpsw_ndo_tx_timeout"],
["drivers/net/ethernet/ti/davinci_emac.c", "emac_dev_tx_timeout"],
["drivers/net/ethernet/ti/netcp_core.c", "netcp_ndo_tx_timeout"],
["drivers/net/ethernet/ti/tlan.c", "tlan_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_net.h", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_net.c", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/ps3_gelic_wireless.c", "gelic_net_tx_timeout"],
["drivers/net/ethernet/toshiba/spider_net.c", "spider_net_tx_timeout"],
["drivers/net/ethernet/toshiba/tc35815.c", "tc35815_tx_timeout"],
["drivers/net/ethernet/via/via-rhine.c", "rhine_tx_timeout"],
["drivers/net/ethernet/wiznet/w5100.c", "w5100_tx_timeout"],
["drivers/net/ethernet/wiznet/w5300.c", "w5300_tx_timeout"],
["drivers/net/ethernet/xilinx/xilinx_emaclite.c", "xemaclite_tx_timeout"],
["drivers/net/ethernet/xircom/xirc2ps_cs.c", "xirc_tx_timeout"],
["drivers/net/fjes/fjes_main.c", "fjes_tx_retry"],
["drivers/net/slip/slip.c", "sl_tx_timeout"],
["include/linux/usb/usbnet.h", "usbnet_tx_timeout"],
["drivers/net/usb/aqc111.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/asix_devices.c", "usbnet_tx_timeout"],
["drivers/net/usb/ax88172a.c", "usbnet_tx_timeout"],
["drivers/net/usb/ax88179_178a.c", "usbnet_tx_timeout"],
["drivers/net/usb/catc.c", "catc_tx_timeout"],
["drivers/net/usb/cdc_mbim.c", "usbnet_tx_timeout"],
["drivers/net/usb/cdc_ncm.c", "usbnet_tx_timeout"],
["drivers/net/usb/dm9601.c", "usbnet_tx_timeout"],
["drivers/net/usb/hso.c", "hso_net_tx_timeout"],
["drivers/net/usb/int51x1.c", "usbnet_tx_timeout"],
["drivers/net/usb/ipheth.c", "ipheth_tx_timeout"],
["drivers/net/usb/kaweth.c", "kaweth_tx_timeout"],
["drivers/net/usb/lan78xx.c", "lan78xx_tx_timeout"],
["drivers/net/usb/mcs7830.c", "usbnet_tx_timeout"],
["drivers/net/usb/pegasus.c", "pegasus_tx_timeout"],
["drivers/net/usb/qmi_wwan.c", "usbnet_tx_timeout"],
["drivers/net/usb/r8152.c", "rtl8152_tx_timeout"],
["drivers/net/usb/rndis_host.c", "usbnet_tx_timeout"],
["drivers/net/usb/rtl8150.c", "rtl8150_tx_timeout"],
["drivers/net/usb/sierra_net.c", "usbnet_tx_timeout"],
["drivers/net/usb/smsc75xx.c", "usbnet_tx_timeout"],
["drivers/net/usb/smsc95xx.c", "usbnet_tx_timeout"],
["drivers/net/usb/sr9700.c", "usbnet_tx_timeout"],
["drivers/net/usb/sr9800.c", "usbnet_tx_timeout"],
["drivers/net/usb/usbnet.c", "usbnet_tx_timeout"],
["drivers/net/vmxnet3/vmxnet3_drv.c", "vmxnet3_tx_timeout"],
["drivers/net/wan/cosa.c", "cosa_net_timeout"],
["drivers/net/wan/farsync.c", "fst_tx_timeout"],
["drivers/net/wan/fsl_ucc_hdlc.c", "uhdlc_tx_timeout"],
["drivers/net/wan/lmc/lmc_main.c", "lmc_driver_timeout"],
["drivers/net/wan/x25_asy.c", "x25_asy_timeout"],
["drivers/net/wimax/i2400m/netdev.c", "i2400m_tx_timeout"],
["drivers/net/wireless/intel/ipw2x00/ipw2100.c", "ipw2100_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/hostap/hostap_main.c", "prism2_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/main.c", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/orinoco_usb.c", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/orinoco/orinoco.h", "orinoco_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_dev.c", "islpci_eth_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_eth.c", "islpci_eth_tx_timeout"],
["drivers/net/wireless/intersil/prism54/islpci_eth.h", "islpci_eth_tx_timeout"],
["drivers/net/wireless/marvell/mwifiex/main.c", "mwifiex_tx_timeout"],
["drivers/net/wireless/quantenna/qtnfmac/core.c", "qtnf_netdev_tx_timeout"],
["drivers/net/wireless/quantenna/qtnfmac/core.h", "qtnf_netdev_tx_timeout"],
["drivers/net/wireless/rndis_wlan.c", "usbnet_tx_timeout"],
["drivers/net/wireless/wl3501_cs.c", "wl3501_tx_timeout"],
["drivers/net/wireless/zydas/zd1201.c", "zd1201_tx_timeout"],
["drivers/s390/net/qeth_core.h", "qeth_tx_timeout"],
["drivers/s390/net/qeth_core_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l2_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l2_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l3_main.c", "qeth_tx_timeout"],
["drivers/s390/net/qeth_l3_main.c", "qeth_tx_timeout"],
["drivers/staging/ks7010/ks_wlan_net.c", "ks_wlan_tx_timeout"],
["drivers/staging/qlge/qlge_main.c", "qlge_tx_timeout"],
["drivers/staging/rtl8192e/rtl8192e/rtl_core.c", "_rtl92e_tx_timeout"],
["drivers/staging/rtl8192u/r8192U_core.c", "tx_timeout"],
["drivers/staging/unisys/visornic/visornic_main.c", "visornic_xmit_timeout"],
["drivers/staging/wlan-ng/p80211netdev.c", "p80211knetdev_tx_timeout"],
["drivers/tty/n_gsm.c", "gsm_mux_net_tx_timeout"],
["drivers/tty/synclink.c", "hdlcdev_tx_timeout"],
["drivers/tty/synclink_gt.c", "hdlcdev_tx_timeout"],
["drivers/tty/synclinkmp.c", "hdlcdev_tx_timeout"],
["net/atm/lec.c", "lec_tx_timeout"],
["net/bluetooth/bnep/netdev.c", "bnep_net_timeout"]
);
for my $p (@work) {
my @pair = @$p;
my $file = $pair[0];
my $func = $pair[1];
print STDERR $file , ": ", $func,"\n";
our @ARGV = ($file);
while (<ARGV>) {
if (m/($func\s*\(struct\s+net_device\s+\*[A-Za-z_]?[A-Za-z-0-9_]*)(\))/) {
print STDERR "found $1+$2 in $file\n";
}
if (s/($func\s*\(struct\s+net_device\s+\*[A-Za-z_]?[A-Za-z-0-9_]*)(\))/$1, unsigned int txqueue$2/) {
print STDERR "$func found in $file\n";
}
print;
}
}
where the list of files and functions is simply from:
git grep ndo_tx_timeout, with manual addition of headers
in the rare cases where the function is from a header,
then manually changing the few places which actually
call ndo_tx_timeout.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Acked-by: Heiner Kallweit <hkallweit1@gmail.com>
Acked-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Acked-by: Shannon Nelson <snelson@pensando.io>
Reviewed-by: Martin Habets <mhabets@solarflare.com>
changes from v9:
fixup a forward declaration
changes from v9:
more leftovers from v3 change
changes from v8:
fix up a missing direct call to timeout
rebased on net-next
changes from v7:
fixup leftovers from v3 change
changes from v6:
fix typo in rtl driver
changes from v5:
add missing files (allow any net device argument name)
changes from v4:
add a missing driver header
changes from v3:
change queue # to unsigned
Changes from v2:
added headers
Changes from v1:
Fix errors found by kbuild:
generalize the pattern a bit, to pick up
a couple of instances missed by the previous
version.
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-12-10 14:23:51 +00:00
|
|
|
static void bcm_sysport_tx_timeout(struct net_device *dev, unsigned int txqueue)
|
2014-04-25 01:08:57 +00:00
|
|
|
{
|
|
|
|
netdev_warn(dev, "transmit timeout!\n");
|
|
|
|
|
2016-05-03 14:33:13 +00:00
|
|
|
netif_trans_update(dev);
|
2014-04-25 01:08:57 +00:00
|
|
|
dev->stats.tx_errors++;
|
|
|
|
|
|
|
|
netif_tx_wake_all_queues(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* phylib adjust link callback */
|
|
|
|
static void bcm_sysport_adj_link(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
2016-06-19 18:39:08 +00:00
|
|
|
struct phy_device *phydev = dev->phydev;
|
2014-04-25 01:08:57 +00:00
|
|
|
unsigned int changed = 0;
|
|
|
|
u32 cmd_bits = 0, reg;
|
|
|
|
|
|
|
|
if (priv->old_link != phydev->link) {
|
|
|
|
changed = 1;
|
|
|
|
priv->old_link = phydev->link;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (priv->old_duplex != phydev->duplex) {
|
|
|
|
changed = 1;
|
|
|
|
priv->old_duplex = phydev->duplex;
|
|
|
|
}
|
|
|
|
|
2017-01-20 19:08:27 +00:00
|
|
|
if (priv->is_lite)
|
|
|
|
goto out;
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
switch (phydev->speed) {
|
|
|
|
case SPEED_2500:
|
|
|
|
cmd_bits = CMD_SPEED_2500;
|
|
|
|
break;
|
|
|
|
case SPEED_1000:
|
|
|
|
cmd_bits = CMD_SPEED_1000;
|
|
|
|
break;
|
|
|
|
case SPEED_100:
|
|
|
|
cmd_bits = CMD_SPEED_100;
|
|
|
|
break;
|
|
|
|
case SPEED_10:
|
|
|
|
cmd_bits = CMD_SPEED_10;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
cmd_bits <<= CMD_SPEED_SHIFT;
|
|
|
|
|
|
|
|
if (phydev->duplex == DUPLEX_HALF)
|
|
|
|
cmd_bits |= CMD_HD_EN;
|
|
|
|
|
|
|
|
if (priv->old_pause != phydev->pause) {
|
|
|
|
changed = 1;
|
|
|
|
priv->old_pause = phydev->pause;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!phydev->pause)
|
|
|
|
cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
|
|
|
|
|
2014-09-02 18:17:07 +00:00
|
|
|
if (!changed)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (phydev->link) {
|
2014-05-15 02:32:13 +00:00
|
|
|
reg = umac_readl(priv, UMAC_CMD);
|
|
|
|
reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
|
2014-04-25 01:08:57 +00:00
|
|
|
CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
|
|
|
|
CMD_TX_PAUSE_IGNORE);
|
2014-05-15 02:32:13 +00:00
|
|
|
reg |= cmd_bits;
|
|
|
|
umac_writel(priv, reg, UMAC_CMD);
|
|
|
|
}
|
2017-01-20 19:08:27 +00:00
|
|
|
out:
|
|
|
|
if (changed)
|
|
|
|
phy_print_status(phydev);
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
2018-03-28 22:15:37 +00:00
|
|
|
static void bcm_sysport_init_dim(struct bcm_sysport_priv *priv,
|
2018-03-23 01:19:32 +00:00
|
|
|
void (*cb)(struct work_struct *work))
|
|
|
|
{
|
2018-03-28 22:15:37 +00:00
|
|
|
struct bcm_sysport_net_dim *dim = &priv->dim;
|
|
|
|
|
2018-03-23 01:19:32 +00:00
|
|
|
INIT_WORK(&dim->dim.work, cb);
|
2018-11-05 10:07:52 +00:00
|
|
|
dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
|
2018-03-23 01:19:32 +00:00
|
|
|
dim->event_ctr = 0;
|
|
|
|
dim->packets = 0;
|
|
|
|
dim->bytes = 0;
|
|
|
|
}
|
|
|
|
|
2018-03-28 22:15:37 +00:00
|
|
|
static void bcm_sysport_init_rx_coalesce(struct bcm_sysport_priv *priv)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_net_dim *dim = &priv->dim;
|
2019-01-31 14:44:48 +00:00
|
|
|
struct dim_cq_moder moder;
|
2018-03-28 22:15:37 +00:00
|
|
|
u32 usecs, pkts;
|
|
|
|
|
|
|
|
usecs = priv->rx_coalesce_usecs;
|
|
|
|
pkts = priv->rx_max_coalesced_frames;
|
|
|
|
|
|
|
|
/* If DIM was enabled, re-apply default parameters */
|
|
|
|
if (dim->use_dim) {
|
2018-04-24 10:36:01 +00:00
|
|
|
moder = net_dim_get_def_rx_moderation(dim->dim.mode);
|
2018-03-28 22:15:37 +00:00
|
|
|
usecs = moder.usec;
|
|
|
|
pkts = moder.pkts;
|
|
|
|
}
|
|
|
|
|
|
|
|
bcm_sysport_set_rx_coalesce(priv, usecs, pkts);
|
|
|
|
}
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
|
|
|
|
unsigned int index)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
|
|
|
|
size_t size;
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
/* Simple descriptors partitioning for now */
|
|
|
|
size = 256;
|
|
|
|
|
2014-07-10 00:36:47 +00:00
|
|
|
ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
|
2014-04-25 01:08:57 +00:00
|
|
|
if (!ring->cbs) {
|
|
|
|
netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize SW view of the ring */
|
|
|
|
spin_lock_init(&ring->lock);
|
|
|
|
ring->priv = priv;
|
2015-11-18 14:31:00 +00:00
|
|
|
netif_tx_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
|
2014-04-25 01:08:57 +00:00
|
|
|
ring->index = index;
|
|
|
|
ring->size = size;
|
net: systemport: Rewrite __bcm_sysport_tx_reclaim()
There is no need for complex checking between the last consumed index
and current consumed index, a simple subtraction will do.
This also eliminates the possibility of a permanent transmit queue stall
under the following conditions:
- one CPU bursts ring->size worth of traffic (up to 256 buffers), to the
point where we run out of free descriptors, so we stop the transmit
queue at the end of bcm_sysport_xmit()
- because of our locking, we have the transmit process disable
interrupts which means we can be blocking the TX reclamation process
- when TX reclamation finally runs, we will be computing the difference
between ring->c_index (last consumed index by SW) and what the HW
reports through its register
- this register is masked with (ring->size - 1) = 0xff, which will lead
to stripping the upper bits of the index (register is 16-bits wide)
- we will be computing last_tx_cn as 0, which means there is no work to
be done, and we never wake-up the transmit queue, leaving it
permanently disabled
A practical example is e.g: ring->c_index aka last_c_index = 12, we
pushed 256 entries, HW consumer index = 268, we mask it with 0xff = 12,
so last_tx_cn == 0, nothing happens.
Fixes: 80105befdb4b ("net: systemport: add Broadcom SYSTEMPORT Ethernet MAC driver")
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2018-03-13 21:45:07 +00:00
|
|
|
ring->clean_index = 0;
|
2014-04-25 01:08:57 +00:00
|
|
|
ring->alloc_size = ring->size;
|
|
|
|
ring->desc_count = ring->size;
|
|
|
|
ring->curr_desc = 0;
|
|
|
|
|
|
|
|
/* Initialize HW ring */
|
|
|
|
tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
|
|
|
|
tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
|
|
|
|
tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
|
|
|
|
tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
|
|
|
|
/* Configure QID and port mapping */
|
|
|
|
reg = tdma_readl(priv, TDMA_DESC_RING_MAPPING(index));
|
|
|
|
reg &= ~(RING_QID_MASK | RING_PORT_ID_MASK << RING_PORT_ID_SHIFT);
|
2017-11-01 18:29:47 +00:00
|
|
|
if (ring->inspect) {
|
|
|
|
reg |= ring->switch_queue & RING_QID_MASK;
|
|
|
|
reg |= ring->switch_port << RING_PORT_ID_SHIFT;
|
|
|
|
} else {
|
|
|
|
reg |= RING_IGNORE_STATUS;
|
|
|
|
}
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
tdma_writel(priv, reg, TDMA_DESC_RING_MAPPING(index));
|
2014-04-25 01:08:57 +00:00
|
|
|
tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
|
|
|
|
|
2017-10-11 17:57:52 +00:00
|
|
|
/* Enable ACB algorithm 2 */
|
|
|
|
reg = tdma_readl(priv, TDMA_CONTROL);
|
|
|
|
reg |= tdma_control_bit(priv, ACB_ALGO);
|
|
|
|
tdma_writel(priv, reg, TDMA_CONTROL);
|
|
|
|
|
2017-09-02 00:32:34 +00:00
|
|
|
/* Do not use tdma_control_bit() here because TSB_SWAP1 collides
|
|
|
|
* with the original definition of ACB_ALGO
|
|
|
|
*/
|
|
|
|
reg = tdma_readl(priv, TDMA_CONTROL);
|
|
|
|
if (priv->is_lite)
|
|
|
|
reg &= ~BIT(TSB_SWAP1);
|
|
|
|
/* Set a correct TSB format based on host endian */
|
|
|
|
if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
|
|
|
|
reg |= tdma_control_bit(priv, TSB_SWAP0);
|
|
|
|
else
|
|
|
|
reg &= ~tdma_control_bit(priv, TSB_SWAP0);
|
|
|
|
tdma_writel(priv, reg, TDMA_CONTROL);
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
/* Program the number of descriptors as MAX_THRESHOLD and half of
|
|
|
|
* its size for the hysteresis trigger
|
|
|
|
*/
|
|
|
|
tdma_writel(priv, ring->size |
|
|
|
|
1 << RING_HYST_THRESH_SHIFT,
|
|
|
|
TDMA_DESC_RING_MAX_HYST(index));
|
|
|
|
|
|
|
|
/* Enable the ring queue in the arbiter */
|
|
|
|
reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
|
|
|
|
reg |= (1 << index);
|
|
|
|
tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
|
|
|
|
|
|
|
|
napi_enable(&ring->napi);
|
|
|
|
|
|
|
|
netif_dbg(priv, hw, priv->netdev,
|
2019-04-22 16:46:44 +00:00
|
|
|
"TDMA cfg, size=%d, switch q=%d,port=%d\n",
|
|
|
|
ring->size, ring->switch_queue,
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
ring->switch_port);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
|
2014-07-10 00:36:46 +00:00
|
|
|
unsigned int index)
|
2014-04-25 01:08:57 +00:00
|
|
|
{
|
|
|
|
struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
/* Caller should stop the TDMA engine */
|
|
|
|
reg = tdma_readl(priv, TDMA_STATUS);
|
|
|
|
if (!(reg & TDMA_DISABLED))
|
|
|
|
netdev_warn(priv->netdev, "TDMA not stopped!\n");
|
|
|
|
|
2014-10-31 22:51:35 +00:00
|
|
|
/* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
|
|
|
|
* fail, so by checking this pointer we know whether the TX ring was
|
|
|
|
* fully initialized or not.
|
|
|
|
*/
|
|
|
|
if (!ring->cbs)
|
|
|
|
return;
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
napi_disable(&ring->napi);
|
|
|
|
netif_napi_del(&ring->napi);
|
|
|
|
|
2017-01-12 20:09:09 +00:00
|
|
|
bcm_sysport_tx_clean(priv, ring);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
kfree(ring->cbs);
|
|
|
|
ring->cbs = NULL;
|
|
|
|
ring->size = 0;
|
|
|
|
ring->alloc_size = 0;
|
|
|
|
|
|
|
|
netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
/* RDMA helper */
|
|
|
|
static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
|
2014-07-10 00:36:46 +00:00
|
|
|
unsigned int enable)
|
2014-04-25 01:08:57 +00:00
|
|
|
{
|
|
|
|
unsigned int timeout = 1000;
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
reg = rdma_readl(priv, RDMA_CONTROL);
|
|
|
|
if (enable)
|
|
|
|
reg |= RDMA_EN;
|
|
|
|
else
|
|
|
|
reg &= ~RDMA_EN;
|
|
|
|
rdma_writel(priv, reg, RDMA_CONTROL);
|
|
|
|
|
|
|
|
/* Poll for RMDA disabling completion */
|
|
|
|
do {
|
|
|
|
reg = rdma_readl(priv, RDMA_STATUS);
|
|
|
|
if (!!(reg & RDMA_DISABLED) == !enable)
|
|
|
|
return 0;
|
|
|
|
usleep_range(1000, 2000);
|
|
|
|
} while (timeout-- > 0);
|
|
|
|
|
|
|
|
netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
|
|
|
|
|
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* TDMA helper */
|
|
|
|
static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
|
2014-07-10 00:36:46 +00:00
|
|
|
unsigned int enable)
|
2014-04-25 01:08:57 +00:00
|
|
|
{
|
|
|
|
unsigned int timeout = 1000;
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
reg = tdma_readl(priv, TDMA_CONTROL);
|
|
|
|
if (enable)
|
2017-01-20 19:08:27 +00:00
|
|
|
reg |= tdma_control_bit(priv, TDMA_EN);
|
2014-04-25 01:08:57 +00:00
|
|
|
else
|
2017-01-20 19:08:27 +00:00
|
|
|
reg &= ~tdma_control_bit(priv, TDMA_EN);
|
2014-04-25 01:08:57 +00:00
|
|
|
tdma_writel(priv, reg, TDMA_CONTROL);
|
|
|
|
|
|
|
|
/* Poll for TMDA disabling completion */
|
|
|
|
do {
|
|
|
|
reg = tdma_readl(priv, TDMA_STATUS);
|
|
|
|
if (!!(reg & TDMA_DISABLED) == !enable)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
usleep_range(1000, 2000);
|
|
|
|
} while (timeout-- > 0);
|
|
|
|
|
|
|
|
netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
|
|
|
|
|
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
|
|
|
|
{
|
2015-05-28 22:24:42 +00:00
|
|
|
struct bcm_sysport_cb *cb;
|
2014-04-25 01:08:57 +00:00
|
|
|
u32 reg;
|
|
|
|
int ret;
|
2015-05-28 22:24:42 +00:00
|
|
|
int i;
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
/* Initialize SW view of the RX ring */
|
2017-01-20 19:08:27 +00:00
|
|
|
priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC;
|
2014-04-25 01:08:57 +00:00
|
|
|
priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
|
|
|
|
priv->rx_c_index = 0;
|
|
|
|
priv->rx_read_ptr = 0;
|
2014-07-10 00:36:47 +00:00
|
|
|
priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
|
|
|
|
GFP_KERNEL);
|
2014-04-25 01:08:57 +00:00
|
|
|
if (!priv->rx_cbs) {
|
|
|
|
netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2015-05-28 22:24:42 +00:00
|
|
|
for (i = 0; i < priv->num_rx_bds; i++) {
|
|
|
|
cb = priv->rx_cbs + i;
|
|
|
|
cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
|
|
|
|
}
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
ret = bcm_sysport_alloc_rx_bufs(priv);
|
|
|
|
if (ret) {
|
|
|
|
netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize HW, ensure RDMA is disabled */
|
|
|
|
reg = rdma_readl(priv, RDMA_STATUS);
|
|
|
|
if (!(reg & RDMA_DISABLED))
|
|
|
|
rdma_enable_set(priv, 0);
|
|
|
|
|
|
|
|
rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
|
|
|
|
rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
|
|
|
|
rdma_writel(priv, 0, RDMA_PROD_INDEX);
|
|
|
|
rdma_writel(priv, 0, RDMA_CONS_INDEX);
|
|
|
|
rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
|
|
|
|
RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
|
|
|
|
/* Operate the queue in ring mode */
|
|
|
|
rdma_writel(priv, 0, RDMA_START_ADDR_HI);
|
|
|
|
rdma_writel(priv, 0, RDMA_START_ADDR_LO);
|
|
|
|
rdma_writel(priv, 0, RDMA_END_ADDR_HI);
|
2017-01-20 19:08:27 +00:00
|
|
|
rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
netif_dbg(priv, hw, priv->netdev,
|
2014-07-10 00:36:46 +00:00
|
|
|
"RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
|
|
|
|
priv->num_rx_bds, priv->rx_bds);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_cb *cb;
|
|
|
|
unsigned int i;
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
/* Caller should ensure RDMA is disabled */
|
|
|
|
reg = rdma_readl(priv, RDMA_STATUS);
|
|
|
|
if (!(reg & RDMA_DISABLED))
|
|
|
|
netdev_warn(priv->netdev, "RDMA not stopped!\n");
|
|
|
|
|
|
|
|
for (i = 0; i < priv->num_rx_bds; i++) {
|
|
|
|
cb = &priv->rx_cbs[i];
|
|
|
|
if (dma_unmap_addr(cb, dma_addr))
|
|
|
|
dma_unmap_single(&priv->pdev->dev,
|
2014-07-10 00:36:46 +00:00
|
|
|
dma_unmap_addr(cb, dma_addr),
|
|
|
|
RX_BUF_LENGTH, DMA_FROM_DEVICE);
|
2014-04-25 01:08:57 +00:00
|
|
|
bcm_sysport_free_cb(cb);
|
|
|
|
}
|
|
|
|
|
|
|
|
kfree(priv->rx_cbs);
|
|
|
|
priv->rx_cbs = NULL;
|
|
|
|
|
|
|
|
netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void bcm_sysport_set_rx_mode(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
|
|
|
u32 reg;
|
|
|
|
|
2017-01-20 19:08:27 +00:00
|
|
|
if (priv->is_lite)
|
|
|
|
return;
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
reg = umac_readl(priv, UMAC_CMD);
|
|
|
|
if (dev->flags & IFF_PROMISC)
|
|
|
|
reg |= CMD_PROMISC;
|
|
|
|
else
|
|
|
|
reg &= ~CMD_PROMISC;
|
|
|
|
umac_writel(priv, reg, UMAC_CMD);
|
|
|
|
|
|
|
|
/* No support for ALLMULTI */
|
|
|
|
if (dev->flags & IFF_ALLMULTI)
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void umac_enable_set(struct bcm_sysport_priv *priv,
|
2014-07-10 00:36:46 +00:00
|
|
|
u32 mask, unsigned int enable)
|
2014-04-25 01:08:57 +00:00
|
|
|
{
|
|
|
|
u32 reg;
|
|
|
|
|
2017-01-20 19:08:27 +00:00
|
|
|
if (!priv->is_lite) {
|
|
|
|
reg = umac_readl(priv, UMAC_CMD);
|
|
|
|
if (enable)
|
|
|
|
reg |= mask;
|
|
|
|
else
|
|
|
|
reg &= ~mask;
|
|
|
|
umac_writel(priv, reg, UMAC_CMD);
|
|
|
|
} else {
|
|
|
|
reg = gib_readl(priv, GIB_CONTROL);
|
|
|
|
if (enable)
|
|
|
|
reg |= mask;
|
|
|
|
else
|
|
|
|
reg &= ~mask;
|
|
|
|
gib_writel(priv, reg, GIB_CONTROL);
|
|
|
|
}
|
2014-05-15 21:33:53 +00:00
|
|
|
|
|
|
|
/* UniMAC stops on a packet boundary, wait for a full-sized packet
|
|
|
|
* to be processed (1 msec).
|
|
|
|
*/
|
|
|
|
if (enable == 0)
|
|
|
|
usleep_range(1000, 2000);
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
2014-06-26 17:06:45 +00:00
|
|
|
static inline void umac_reset(struct bcm_sysport_priv *priv)
|
2014-04-25 01:08:57 +00:00
|
|
|
{
|
|
|
|
u32 reg;
|
|
|
|
|
2017-01-20 19:08:27 +00:00
|
|
|
if (priv->is_lite)
|
|
|
|
return;
|
|
|
|
|
2014-06-26 17:06:45 +00:00
|
|
|
reg = umac_readl(priv, UMAC_CMD);
|
|
|
|
reg |= CMD_SW_RESET;
|
|
|
|
umac_writel(priv, reg, UMAC_CMD);
|
|
|
|
udelay(10);
|
|
|
|
reg = umac_readl(priv, UMAC_CMD);
|
|
|
|
reg &= ~CMD_SW_RESET;
|
|
|
|
umac_writel(priv, reg, UMAC_CMD);
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
|
2014-07-10 00:36:46 +00:00
|
|
|
unsigned char *addr)
|
2014-04-25 01:08:57 +00:00
|
|
|
{
|
2017-01-20 19:08:27 +00:00
|
|
|
u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
|
|
|
|
addr[3];
|
|
|
|
u32 mac1 = (addr[4] << 8) | addr[5];
|
|
|
|
|
|
|
|
if (!priv->is_lite) {
|
|
|
|
umac_writel(priv, mac0, UMAC_MAC0);
|
|
|
|
umac_writel(priv, mac1, UMAC_MAC1);
|
|
|
|
} else {
|
|
|
|
gib_writel(priv, mac0, GIB_MAC0);
|
|
|
|
gib_writel(priv, mac1, GIB_MAC1);
|
|
|
|
}
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void topctrl_flush(struct bcm_sysport_priv *priv)
|
|
|
|
{
|
|
|
|
topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
|
|
|
|
topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
|
|
|
|
mdelay(1);
|
|
|
|
topctrl_writel(priv, 0, RX_FLUSH_CNTL);
|
|
|
|
topctrl_writel(priv, 0, TX_FLUSH_CNTL);
|
|
|
|
}
|
|
|
|
|
2014-12-08 23:59:18 +00:00
|
|
|
static int bcm_sysport_change_mac(struct net_device *dev, void *p)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
|
|
|
struct sockaddr *addr = p;
|
|
|
|
|
|
|
|
if (!is_valid_ether_addr(addr->sa_data))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
|
|
|
|
|
|
|
|
/* interface is disabled, changes to MAC will be reflected on next
|
|
|
|
* open call
|
|
|
|
*/
|
|
|
|
if (!netif_running(dev))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
umac_set_hw_addr(priv, dev->dev_addr);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-03 23:07:45 +00:00
|
|
|
static void bcm_sysport_get_stats64(struct net_device *dev,
|
|
|
|
struct rtnl_link_stats64 *stats)
|
2017-03-23 17:36:46 +00:00
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
2017-08-03 23:07:45 +00:00
|
|
|
struct bcm_sysport_stats64 *stats64 = &priv->stats64;
|
|
|
|
unsigned int start;
|
2017-03-23 17:36:46 +00:00
|
|
|
|
2017-08-03 23:07:45 +00:00
|
|
|
netdev_stats_to_stats64(stats, &dev->stats);
|
|
|
|
|
2017-09-18 23:31:30 +00:00
|
|
|
bcm_sysport_update_tx_stats(priv, &stats->tx_bytes,
|
|
|
|
&stats->tx_packets);
|
2017-08-03 23:07:45 +00:00
|
|
|
|
|
|
|
do {
|
|
|
|
start = u64_stats_fetch_begin_irq(&priv->syncp);
|
|
|
|
stats->rx_packets = stats64->rx_packets;
|
|
|
|
stats->rx_bytes = stats64->rx_bytes;
|
|
|
|
} while (u64_stats_fetch_retry_irq(&priv->syncp, start));
|
2017-03-23 17:36:46 +00:00
|
|
|
}
|
|
|
|
|
2014-07-02 04:08:37 +00:00
|
|
|
static void bcm_sysport_netif_start(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
|
|
|
|
|
|
|
/* Enable NAPI */
|
2018-03-28 22:15:37 +00:00
|
|
|
bcm_sysport_init_dim(priv, bcm_sysport_dim_work);
|
|
|
|
bcm_sysport_init_rx_coalesce(priv);
|
2014-07-02 04:08:37 +00:00
|
|
|
napi_enable(&priv->napi);
|
|
|
|
|
2014-10-28 18:12:00 +00:00
|
|
|
/* Enable RX interrupt and TX ring full interrupt */
|
|
|
|
intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
|
|
|
|
|
2016-06-19 18:39:08 +00:00
|
|
|
phy_start(dev->phydev);
|
2014-07-02 04:08:37 +00:00
|
|
|
|
2017-01-20 19:08:27 +00:00
|
|
|
/* Enable TX interrupts for the TXQs */
|
|
|
|
if (!priv->is_lite)
|
|
|
|
intrl2_1_mask_clear(priv, 0xffffffff);
|
|
|
|
else
|
|
|
|
intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
|
2014-07-02 04:08:37 +00:00
|
|
|
}
|
|
|
|
|
2014-07-02 04:08:38 +00:00
|
|
|
static void rbuf_init(struct bcm_sysport_priv *priv)
|
|
|
|
{
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
reg = rbuf_readl(priv, RBUF_CONTROL);
|
|
|
|
reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
|
2017-01-20 19:08:27 +00:00
|
|
|
/* Set a correct RSB format on SYSTEMPORT Lite */
|
2017-08-29 20:35:17 +00:00
|
|
|
if (priv->is_lite)
|
2017-01-20 19:08:27 +00:00
|
|
|
reg &= ~RBUF_RSB_SWAP1;
|
2017-08-29 20:35:17 +00:00
|
|
|
|
|
|
|
/* Set a correct RSB format based on host endian */
|
|
|
|
if (!IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
|
2017-01-20 19:08:27 +00:00
|
|
|
reg |= RBUF_RSB_SWAP0;
|
2017-08-29 20:35:17 +00:00
|
|
|
else
|
|
|
|
reg &= ~RBUF_RSB_SWAP0;
|
2014-07-02 04:08:38 +00:00
|
|
|
rbuf_writel(priv, reg, RBUF_CONTROL);
|
|
|
|
}
|
|
|
|
|
2017-01-20 19:08:27 +00:00
|
|
|
static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
|
|
|
|
{
|
|
|
|
intrl2_0_mask_set(priv, 0xffffffff);
|
|
|
|
intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
|
|
|
|
if (!priv->is_lite) {
|
|
|
|
intrl2_1_mask_set(priv, 0xffffffff);
|
|
|
|
intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
|
|
|
|
{
|
2017-11-02 23:08:40 +00:00
|
|
|
u32 reg;
|
2017-01-20 19:08:27 +00:00
|
|
|
|
2017-11-02 23:08:40 +00:00
|
|
|
reg = gib_readl(priv, GIB_CONTROL);
|
|
|
|
/* Include Broadcom tag in pad extension and fix up IPG_LENGTH */
|
2017-01-20 19:08:27 +00:00
|
|
|
if (netdev_uses_dsa(priv->netdev)) {
|
|
|
|
reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
|
|
|
|
reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
|
|
|
|
}
|
2017-11-02 23:08:40 +00:00
|
|
|
reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT);
|
|
|
|
reg |= 12 << GIB_IPG_LEN_SHIFT;
|
|
|
|
gib_writel(priv, reg, GIB_CONTROL);
|
2017-01-20 19:08:27 +00:00
|
|
|
}
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
static int bcm_sysport_open(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
2016-06-19 18:39:08 +00:00
|
|
|
struct phy_device *phydev;
|
2014-04-25 01:08:57 +00:00
|
|
|
unsigned int i;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* Reset UniMAC */
|
2014-06-26 17:06:45 +00:00
|
|
|
umac_reset(priv);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
/* Flush TX and RX FIFOs at TOPCTRL level */
|
|
|
|
topctrl_flush(priv);
|
|
|
|
|
|
|
|
/* Disable the UniMAC RX/TX */
|
2014-07-02 04:08:36 +00:00
|
|
|
umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
/* Enable RBUF 2bytes alignment and Receive Status Block */
|
2014-07-02 04:08:38 +00:00
|
|
|
rbuf_init(priv);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
/* Set maximum frame length */
|
2017-01-20 19:08:27 +00:00
|
|
|
if (!priv->is_lite)
|
|
|
|
umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
|
|
|
|
else
|
|
|
|
gib_set_pad_extension(priv);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
2018-09-27 22:36:11 +00:00
|
|
|
/* Apply features again in case we changed them while interface was
|
|
|
|
* down
|
|
|
|
*/
|
|
|
|
bcm_sysport_set_features(dev, dev->features);
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
/* Set MAC address */
|
|
|
|
umac_set_hw_addr(priv, dev->dev_addr);
|
|
|
|
|
2016-06-19 18:39:08 +00:00
|
|
|
phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
|
|
|
|
0, priv->phy_interface);
|
|
|
|
if (!phydev) {
|
2014-04-25 01:08:57 +00:00
|
|
|
netdev_err(dev, "could not attach to PHY\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Reset house keeping link status */
|
|
|
|
priv->old_duplex = -1;
|
|
|
|
priv->old_link = -1;
|
|
|
|
priv->old_pause = -1;
|
|
|
|
|
|
|
|
/* mask all interrupts and request them */
|
2017-01-20 19:08:27 +00:00
|
|
|
bcm_sysport_mask_all_intrs(priv);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
|
|
|
|
if (ret) {
|
|
|
|
netdev_err(dev, "failed to request RX interrupt\n");
|
|
|
|
goto out_phy_disconnect;
|
|
|
|
}
|
|
|
|
|
2017-01-20 19:08:27 +00:00
|
|
|
if (!priv->is_lite) {
|
|
|
|
ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0,
|
|
|
|
dev->name, dev);
|
|
|
|
if (ret) {
|
|
|
|
netdev_err(dev, "failed to request TX interrupt\n");
|
|
|
|
goto out_free_irq0;
|
|
|
|
}
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize both hardware and software ring */
|
|
|
|
for (i = 0; i < dev->num_tx_queues; i++) {
|
|
|
|
ret = bcm_sysport_init_tx_ring(priv, i);
|
|
|
|
if (ret) {
|
|
|
|
netdev_err(dev, "failed to initialize TX ring %d\n",
|
2014-07-10 00:36:46 +00:00
|
|
|
i);
|
2014-04-25 01:08:57 +00:00
|
|
|
goto out_free_tx_ring;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize linked-list */
|
|
|
|
tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
|
|
|
|
|
|
|
|
/* Initialize RX ring */
|
|
|
|
ret = bcm_sysport_init_rx_ring(priv);
|
|
|
|
if (ret) {
|
|
|
|
netdev_err(dev, "failed to initialize RX ring\n");
|
|
|
|
goto out_free_rx_ring;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Turn on RDMA */
|
|
|
|
ret = rdma_enable_set(priv, 1);
|
|
|
|
if (ret)
|
|
|
|
goto out_free_rx_ring;
|
|
|
|
|
|
|
|
/* Turn on TDMA */
|
|
|
|
ret = tdma_enable_set(priv, 1);
|
|
|
|
if (ret)
|
|
|
|
goto out_clear_rx_int;
|
|
|
|
|
|
|
|
/* Turn on UniMAC TX/RX */
|
2014-07-02 04:08:36 +00:00
|
|
|
umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
2014-07-02 04:08:37 +00:00
|
|
|
bcm_sysport_netif_start(dev);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
2018-11-01 22:55:38 +00:00
|
|
|
netif_tx_start_all_queues(dev);
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_clear_rx_int:
|
|
|
|
intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
|
|
|
|
out_free_rx_ring:
|
|
|
|
bcm_sysport_fini_rx_ring(priv);
|
|
|
|
out_free_tx_ring:
|
|
|
|
for (i = 0; i < dev->num_tx_queues; i++)
|
|
|
|
bcm_sysport_fini_tx_ring(priv, i);
|
2017-01-20 19:08:27 +00:00
|
|
|
if (!priv->is_lite)
|
|
|
|
free_irq(priv->irq1, dev);
|
2014-04-25 01:08:57 +00:00
|
|
|
out_free_irq0:
|
|
|
|
free_irq(priv->irq0, dev);
|
|
|
|
out_phy_disconnect:
|
2016-06-19 18:39:08 +00:00
|
|
|
phy_disconnect(phydev);
|
2014-04-25 01:08:57 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2014-07-02 04:08:37 +00:00
|
|
|
static void bcm_sysport_netif_stop(struct net_device *dev)
|
2014-04-25 01:08:57 +00:00
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
|
|
|
|
|
|
|
/* stop all software from updating hardware */
|
2018-11-01 22:55:38 +00:00
|
|
|
netif_tx_disable(dev);
|
2014-04-25 01:08:57 +00:00
|
|
|
napi_disable(&priv->napi);
|
2018-03-23 01:19:32 +00:00
|
|
|
cancel_work_sync(&priv->dim.dim.work);
|
2016-06-19 18:39:08 +00:00
|
|
|
phy_stop(dev->phydev);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
/* mask all interrupts */
|
2017-01-20 19:08:27 +00:00
|
|
|
bcm_sysport_mask_all_intrs(priv);
|
2014-07-02 04:08:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int bcm_sysport_stop(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
|
|
|
unsigned int i;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
bcm_sysport_netif_stop(dev);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
/* Disable UniMAC RX */
|
2014-07-02 04:08:36 +00:00
|
|
|
umac_enable_set(priv, CMD_RX_EN, 0);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
ret = tdma_enable_set(priv, 0);
|
|
|
|
if (ret) {
|
|
|
|
netdev_err(dev, "timeout disabling RDMA\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Wait for a maximum packet size to be drained */
|
|
|
|
usleep_range(2000, 3000);
|
|
|
|
|
|
|
|
ret = rdma_enable_set(priv, 0);
|
|
|
|
if (ret) {
|
|
|
|
netdev_err(dev, "timeout disabling TDMA\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Disable UniMAC TX */
|
2014-07-02 04:08:36 +00:00
|
|
|
umac_enable_set(priv, CMD_TX_EN, 0);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
/* Free RX/TX rings SW structures */
|
|
|
|
for (i = 0; i < dev->num_tx_queues; i++)
|
|
|
|
bcm_sysport_fini_tx_ring(priv, i);
|
|
|
|
bcm_sysport_fini_rx_ring(priv);
|
|
|
|
|
|
|
|
free_irq(priv->irq0, dev);
|
2017-01-20 19:08:27 +00:00
|
|
|
if (!priv->is_lite)
|
|
|
|
free_irq(priv->irq1, dev);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
/* Disconnect from PHY */
|
2016-06-19 18:39:08 +00:00
|
|
|
phy_disconnect(dev->phydev);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-08-07 17:50:23 +00:00
|
|
|
static int bcm_sysport_rule_find(struct bcm_sysport_priv *priv,
|
|
|
|
u64 location)
|
|
|
|
{
|
|
|
|
unsigned int index;
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
|
|
|
|
reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
|
|
|
|
reg >>= RXCHK_BRCM_TAG_CID_SHIFT;
|
|
|
|
reg &= RXCHK_BRCM_TAG_CID_MASK;
|
|
|
|
if (reg == location)
|
|
|
|
return index;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bcm_sysport_rule_get(struct bcm_sysport_priv *priv,
|
|
|
|
struct ethtool_rxnfc *nfc)
|
|
|
|
{
|
|
|
|
int index;
|
|
|
|
|
|
|
|
/* This is not a rule that we know about */
|
|
|
|
index = bcm_sysport_rule_find(priv, nfc->fs.location);
|
|
|
|
if (index < 0)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
nfc->fs.ring_cookie = RX_CLS_FLOW_WAKE;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv,
|
|
|
|
struct ethtool_rxnfc *nfc)
|
|
|
|
{
|
|
|
|
unsigned int index;
|
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
/* We cannot match locations greater than what the classification ID
|
|
|
|
* permits (256 entries)
|
|
|
|
*/
|
|
|
|
if (nfc->fs.location > RXCHK_BRCM_TAG_CID_MASK)
|
|
|
|
return -E2BIG;
|
|
|
|
|
|
|
|
/* We cannot support flows that are not destined for a wake-up */
|
|
|
|
if (nfc->fs.ring_cookie != RX_CLS_FLOW_WAKE)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
/* All filters are already in use, we cannot match more rules */
|
|
|
|
if (bitmap_weight(priv->filters, RXCHK_BRCM_TAG_MAX) ==
|
|
|
|
RXCHK_BRCM_TAG_MAX)
|
|
|
|
return -ENOSPC;
|
|
|
|
|
|
|
|
index = find_first_zero_bit(priv->filters, RXCHK_BRCM_TAG_MAX);
|
2020-03-12 15:04:30 +00:00
|
|
|
if (index >= RXCHK_BRCM_TAG_MAX)
|
2018-08-07 17:50:23 +00:00
|
|
|
return -ENOSPC;
|
|
|
|
|
|
|
|
/* Location is the classification ID, and index is the position
|
|
|
|
* within one of our 8 possible filters to be programmed
|
|
|
|
*/
|
|
|
|
reg = rxchk_readl(priv, RXCHK_BRCM_TAG(index));
|
|
|
|
reg &= ~(RXCHK_BRCM_TAG_CID_MASK << RXCHK_BRCM_TAG_CID_SHIFT);
|
|
|
|
reg |= nfc->fs.location << RXCHK_BRCM_TAG_CID_SHIFT;
|
|
|
|
rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index));
|
|
|
|
rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index));
|
|
|
|
|
2018-11-06 20:58:41 +00:00
|
|
|
priv->filters_loc[index] = nfc->fs.location;
|
2018-08-07 17:50:23 +00:00
|
|
|
set_bit(index, priv->filters);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv,
|
|
|
|
u64 location)
|
|
|
|
{
|
|
|
|
int index;
|
|
|
|
|
|
|
|
/* This is not a rule that we know about */
|
|
|
|
index = bcm_sysport_rule_find(priv, location);
|
|
|
|
if (index < 0)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
/* No need to disable this filter if it was enabled, this will
|
|
|
|
* be taken care of during suspend time by bcm_sysport_suspend_to_wol
|
|
|
|
*/
|
|
|
|
clear_bit(index, priv->filters);
|
2018-11-06 20:58:41 +00:00
|
|
|
priv->filters_loc[index] = 0;
|
2018-08-07 17:50:23 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bcm_sysport_get_rxnfc(struct net_device *dev,
|
|
|
|
struct ethtool_rxnfc *nfc, u32 *rule_locs)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
|
|
|
int ret = -EOPNOTSUPP;
|
|
|
|
|
|
|
|
switch (nfc->cmd) {
|
|
|
|
case ETHTOOL_GRXCLSRULE:
|
|
|
|
ret = bcm_sysport_rule_get(priv, nfc);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bcm_sysport_set_rxnfc(struct net_device *dev,
|
|
|
|
struct ethtool_rxnfc *nfc)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
|
|
|
int ret = -EOPNOTSUPP;
|
|
|
|
|
|
|
|
switch (nfc->cmd) {
|
|
|
|
case ETHTOOL_SRXCLSRLINS:
|
|
|
|
ret = bcm_sysport_rule_set(priv, nfc);
|
|
|
|
break;
|
|
|
|
case ETHTOOL_SRXCLSRLDEL:
|
|
|
|
ret = bcm_sysport_rule_del(priv, nfc->fs.location);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2016-08-31 07:30:48 +00:00
|
|
|
static const struct ethtool_ops bcm_sysport_ethtool_ops = {
|
2020-03-10 02:15:00 +00:00
|
|
|
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
|
|
|
|
ETHTOOL_COALESCE_MAX_FRAMES |
|
|
|
|
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
|
2014-04-25 01:08:57 +00:00
|
|
|
.get_drvinfo = bcm_sysport_get_drvinfo,
|
|
|
|
.get_msglevel = bcm_sysport_get_msglvl,
|
|
|
|
.set_msglevel = bcm_sysport_set_msglvl,
|
|
|
|
.get_link = ethtool_op_get_link,
|
|
|
|
.get_strings = bcm_sysport_get_strings,
|
|
|
|
.get_ethtool_stats = bcm_sysport_get_stats,
|
|
|
|
.get_sset_count = bcm_sysport_get_sset_count,
|
2014-07-02 04:08:40 +00:00
|
|
|
.get_wol = bcm_sysport_get_wol,
|
|
|
|
.set_wol = bcm_sysport_set_wol,
|
2015-05-11 22:12:41 +00:00
|
|
|
.get_coalesce = bcm_sysport_get_coalesce,
|
|
|
|
.set_coalesce = bcm_sysport_set_coalesce,
|
2016-06-19 18:39:09 +00:00
|
|
|
.get_link_ksettings = phy_ethtool_get_link_ksettings,
|
|
|
|
.set_link_ksettings = phy_ethtool_set_link_ksettings,
|
2018-08-07 17:50:23 +00:00
|
|
|
.get_rxnfc = bcm_sysport_get_rxnfc,
|
|
|
|
.set_rxnfc = bcm_sysport_set_rxnfc,
|
2014-04-25 01:08:57 +00:00
|
|
|
};
|
|
|
|
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
static u16 bcm_sysport_select_queue(struct net_device *dev, struct sk_buff *skb,
|
2019-03-20 10:02:06 +00:00
|
|
|
struct net_device *sb_dev)
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
{
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
|
|
|
u16 queue = skb_get_queue_mapping(skb);
|
|
|
|
struct bcm_sysport_tx_ring *tx_ring;
|
|
|
|
unsigned int q, port;
|
|
|
|
|
|
|
|
if (!netdev_uses_dsa(dev))
|
2019-03-20 10:02:06 +00:00
|
|
|
return netdev_pick_tx(dev, skb, NULL);
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
|
|
|
|
/* DSA tagging layer will have configured the correct queue */
|
|
|
|
q = BRCM_TAG_GET_QUEUE(queue);
|
|
|
|
port = BRCM_TAG_GET_PORT(queue);
|
|
|
|
tx_ring = priv->ring_map[q + port * priv->per_port_num_tx_queues];
|
|
|
|
|
2017-10-20 22:59:30 +00:00
|
|
|
if (unlikely(!tx_ring))
|
2019-03-20 10:02:06 +00:00
|
|
|
return netdev_pick_tx(dev, skb, NULL);
|
2017-10-20 22:59:30 +00:00
|
|
|
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
return tx_ring->index;
|
|
|
|
}
|
|
|
|
|
2017-10-26 01:01:05 +00:00
|
|
|
static const struct net_device_ops bcm_sysport_netdev_ops = {
|
|
|
|
.ndo_start_xmit = bcm_sysport_xmit,
|
|
|
|
.ndo_tx_timeout = bcm_sysport_tx_timeout,
|
|
|
|
.ndo_open = bcm_sysport_open,
|
|
|
|
.ndo_stop = bcm_sysport_stop,
|
|
|
|
.ndo_set_features = bcm_sysport_set_features,
|
|
|
|
.ndo_set_rx_mode = bcm_sysport_set_rx_mode,
|
|
|
|
.ndo_set_mac_address = bcm_sysport_change_mac,
|
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
|
.ndo_poll_controller = bcm_sysport_poll_controller,
|
|
|
|
#endif
|
|
|
|
.ndo_get_stats64 = bcm_sysport_get_stats64,
|
|
|
|
.ndo_select_queue = bcm_sysport_select_queue,
|
|
|
|
};
|
|
|
|
|
2018-04-25 23:21:51 +00:00
|
|
|
static int bcm_sysport_map_queues(struct notifier_block *nb,
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
struct dsa_notifier_register_info *info)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_tx_ring *ring;
|
2018-04-25 23:21:51 +00:00
|
|
|
struct bcm_sysport_priv *priv;
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
struct net_device *slave_dev;
|
|
|
|
unsigned int num_tx_queues;
|
2018-11-06 23:15:17 +00:00
|
|
|
unsigned int q, qp, port;
|
2018-04-25 23:21:51 +00:00
|
|
|
struct net_device *dev;
|
|
|
|
|
|
|
|
priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
|
|
|
|
if (priv->netdev != info->master)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
dev = info->master;
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
|
|
|
|
/* We can't be setting up queue inspection for non directly attached
|
|
|
|
* switches
|
|
|
|
*/
|
|
|
|
if (info->switch_number)
|
|
|
|
return 0;
|
|
|
|
|
2017-10-26 01:01:05 +00:00
|
|
|
if (dev->netdev_ops != &bcm_sysport_netdev_ops)
|
|
|
|
return 0;
|
|
|
|
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
port = info->port_number;
|
|
|
|
slave_dev = info->info.dev;
|
|
|
|
|
|
|
|
/* On SYSTEMPORT Lite we have twice as less queues, so we cannot do a
|
|
|
|
* 1:1 mapping, we can only do a 2:1 mapping. By reducing the number of
|
|
|
|
* per-port (slave_dev) network devices queue, we achieve just that.
|
|
|
|
* This need to happen now before any slave network device is used such
|
|
|
|
* it accurately reflects the number of real TX queues.
|
|
|
|
*/
|
|
|
|
if (priv->is_lite)
|
|
|
|
netif_set_real_num_tx_queues(slave_dev,
|
|
|
|
slave_dev->num_tx_queues / 2);
|
2018-04-25 23:21:51 +00:00
|
|
|
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
num_tx_queues = slave_dev->real_num_tx_queues;
|
|
|
|
|
|
|
|
if (priv->per_port_num_tx_queues &&
|
|
|
|
priv->per_port_num_tx_queues != num_tx_queues)
|
2018-04-27 19:09:25 +00:00
|
|
|
netdev_warn(slave_dev, "asymmetric number of per-port queues\n");
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
|
|
|
|
priv->per_port_num_tx_queues = num_tx_queues;
|
|
|
|
|
2018-11-06 23:15:17 +00:00
|
|
|
for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues;
|
|
|
|
q++) {
|
|
|
|
ring = &priv->tx_rings[q];
|
|
|
|
|
|
|
|
if (ring->inspect)
|
|
|
|
continue;
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
|
|
|
|
/* Just remember the mapping actual programming done
|
|
|
|
* during bcm_sysport_init_tx_ring
|
|
|
|
*/
|
2018-11-06 23:15:17 +00:00
|
|
|
ring->switch_queue = qp;
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
ring->switch_port = port;
|
2017-11-01 18:29:47 +00:00
|
|
|
ring->inspect = true;
|
2020-01-16 21:08:58 +00:00
|
|
|
priv->ring_map[qp + port * num_tx_queues] = ring;
|
2018-11-06 23:15:17 +00:00
|
|
|
qp++;
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-11-06 23:15:18 +00:00
|
|
|
static int bcm_sysport_unmap_queues(struct notifier_block *nb,
|
|
|
|
struct dsa_notifier_register_info *info)
|
|
|
|
{
|
|
|
|
struct bcm_sysport_tx_ring *ring;
|
|
|
|
struct bcm_sysport_priv *priv;
|
|
|
|
struct net_device *slave_dev;
|
|
|
|
unsigned int num_tx_queues;
|
|
|
|
struct net_device *dev;
|
2020-01-16 21:08:58 +00:00
|
|
|
unsigned int q, qp, port;
|
2018-11-06 23:15:18 +00:00
|
|
|
|
|
|
|
priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
|
|
|
|
if (priv->netdev != info->master)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
dev = info->master;
|
|
|
|
|
|
|
|
if (dev->netdev_ops != &bcm_sysport_netdev_ops)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
port = info->port_number;
|
|
|
|
slave_dev = info->info.dev;
|
|
|
|
|
|
|
|
num_tx_queues = slave_dev->real_num_tx_queues;
|
|
|
|
|
|
|
|
for (q = 0; q < dev->num_tx_queues; q++) {
|
|
|
|
ring = &priv->tx_rings[q];
|
|
|
|
|
|
|
|
if (ring->switch_port != port)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (!ring->inspect)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ring->inspect = false;
|
2020-01-16 21:08:58 +00:00
|
|
|
qp = ring->switch_queue;
|
|
|
|
priv->ring_map[qp + port * num_tx_queues] = NULL;
|
2018-11-06 23:15:18 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-04-25 23:21:51 +00:00
|
|
|
static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
unsigned long event, void *ptr)
|
|
|
|
{
|
2018-11-06 23:15:18 +00:00
|
|
|
int ret = NOTIFY_DONE;
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
|
2018-11-06 23:15:18 +00:00
|
|
|
switch (event) {
|
|
|
|
case DSA_PORT_REGISTER:
|
|
|
|
ret = bcm_sysport_map_queues(nb, ptr);
|
|
|
|
break;
|
|
|
|
case DSA_PORT_UNREGISTER:
|
|
|
|
ret = bcm_sysport_unmap_queues(nb, ptr);
|
|
|
|
break;
|
|
|
|
}
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
|
2018-11-06 23:15:18 +00:00
|
|
|
return notifier_from_errno(ret);
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
}
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
#define REV_FMT "v%2x.%02x"
|
|
|
|
|
2017-01-20 19:08:27 +00:00
|
|
|
static const struct bcm_sysport_hw_params bcm_sysport_params[] = {
|
|
|
|
[SYSTEMPORT] = {
|
|
|
|
.is_lite = false,
|
|
|
|
.num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS,
|
|
|
|
},
|
|
|
|
[SYSTEMPORT_LITE] = {
|
|
|
|
.is_lite = true,
|
|
|
|
.num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS,
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
static const struct of_device_id bcm_sysport_of_match[] = {
|
|
|
|
{ .compatible = "brcm,systemportlite-v1.00",
|
|
|
|
.data = &bcm_sysport_params[SYSTEMPORT_LITE] },
|
|
|
|
{ .compatible = "brcm,systemport-v1.00",
|
|
|
|
.data = &bcm_sysport_params[SYSTEMPORT] },
|
|
|
|
{ .compatible = "brcm,systemport",
|
|
|
|
.data = &bcm_sysport_params[SYSTEMPORT] },
|
|
|
|
{ /* sentinel */ }
|
|
|
|
};
|
|
|
|
MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
static int bcm_sysport_probe(struct platform_device *pdev)
|
|
|
|
{
|
2017-01-20 19:08:27 +00:00
|
|
|
const struct bcm_sysport_hw_params *params;
|
|
|
|
const struct of_device_id *of_id = NULL;
|
2014-04-25 01:08:57 +00:00
|
|
|
struct bcm_sysport_priv *priv;
|
|
|
|
struct device_node *dn;
|
|
|
|
struct net_device *dev;
|
|
|
|
const void *macaddr;
|
|
|
|
u32 txq, rxq;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
dn = pdev->dev.of_node;
|
2017-01-20 19:08:27 +00:00
|
|
|
of_id = of_match_node(bcm_sysport_of_match, dn);
|
|
|
|
if (!of_id || !of_id->data)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2019-12-18 00:29:50 +00:00
|
|
|
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
|
|
|
|
if (ret)
|
|
|
|
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
|
|
|
|
if (ret) {
|
|
|
|
dev_err(&pdev->dev, "unable to set DMA mask: %d\n", ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-01-20 19:08:27 +00:00
|
|
|
/* Fairly quickly we need to know the type of adapter we have */
|
|
|
|
params = of_id->data;
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
/* Read the Transmit/Receive Queue properties */
|
|
|
|
if (of_property_read_u32(dn, "systemport,num-txq", &txq))
|
|
|
|
txq = TDMA_NUM_RINGS;
|
|
|
|
if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
|
|
|
|
rxq = 1;
|
|
|
|
|
2017-01-20 19:08:26 +00:00
|
|
|
/* Sanity check the number of transmit queues */
|
|
|
|
if (!txq || txq > TDMA_NUM_RINGS)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
|
|
|
|
if (!dev)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
/* Initialize private members */
|
|
|
|
priv = netdev_priv(dev);
|
|
|
|
|
2017-01-20 19:08:26 +00:00
|
|
|
/* Allocate number of TX rings */
|
|
|
|
priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
|
|
|
|
sizeof(struct bcm_sysport_tx_ring),
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!priv->tx_rings)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2017-01-20 19:08:27 +00:00
|
|
|
priv->is_lite = params->is_lite;
|
|
|
|
priv->num_rx_desc_words = params->num_rx_desc_words;
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
priv->irq0 = platform_get_irq(pdev, 0);
|
2017-06-02 01:02:39 +00:00
|
|
|
if (!priv->is_lite) {
|
2017-01-20 19:08:27 +00:00
|
|
|
priv->irq1 = platform_get_irq(pdev, 1);
|
2017-06-02 01:02:39 +00:00
|
|
|
priv->wol_irq = platform_get_irq(pdev, 2);
|
|
|
|
} else {
|
|
|
|
priv->wol_irq = platform_get_irq(pdev, 1);
|
|
|
|
}
|
2017-01-20 19:08:27 +00:00
|
|
|
if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
|
2014-04-25 01:08:57 +00:00
|
|
|
ret = -EINVAL;
|
2016-11-28 18:24:58 +00:00
|
|
|
goto err_free_netdev;
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
2019-08-21 13:46:13 +00:00
|
|
|
priv->base = devm_platform_ioremap_resource(pdev, 0);
|
2014-05-14 03:15:42 +00:00
|
|
|
if (IS_ERR(priv->base)) {
|
|
|
|
ret = PTR_ERR(priv->base);
|
2016-11-28 18:24:58 +00:00
|
|
|
goto err_free_netdev;
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
priv->netdev = dev;
|
|
|
|
priv->pdev = pdev;
|
|
|
|
|
net: of_get_phy_mode: Change API to solve int/unit warnings
Before this change of_get_phy_mode() returned an enum,
phy_interface_t. On error, -ENODEV etc, is returned. If the result of
the function is stored in a variable of type phy_interface_t, and the
compiler has decided to represent this as an unsigned int, comparision
with -ENODEV etc, is a signed vs unsigned comparision.
Fix this problem by changing the API. Make the function return an
error, or 0 on success, and pass a pointer, of type phy_interface_t,
where the phy mode should be stored.
v2:
Return with *interface set to PHY_INTERFACE_MODE_NA on error.
Add error checks to all users of of_get_phy_mode()
Fixup a few reverse christmas tree errors
Fixup a few slightly malformed reverse christmas trees
v3:
Fix 0-day reported errors.
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-11-04 01:40:33 +00:00
|
|
|
ret = of_get_phy_mode(dn, &priv->phy_interface);
|
2014-04-25 01:08:57 +00:00
|
|
|
/* Default to GMII interface mode */
|
net: of_get_phy_mode: Change API to solve int/unit warnings
Before this change of_get_phy_mode() returned an enum,
phy_interface_t. On error, -ENODEV etc, is returned. If the result of
the function is stored in a variable of type phy_interface_t, and the
compiler has decided to represent this as an unsigned int, comparision
with -ENODEV etc, is a signed vs unsigned comparision.
Fix this problem by changing the API. Make the function return an
error, or 0 on success, and pass a pointer, of type phy_interface_t,
where the phy mode should be stored.
v2:
Return with *interface set to PHY_INTERFACE_MODE_NA on error.
Add error checks to all users of of_get_phy_mode()
Fixup a few reverse christmas tree errors
Fixup a few slightly malformed reverse christmas trees
v3:
Fix 0-day reported errors.
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Andrew Lunn <andrew@lunn.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2019-11-04 01:40:33 +00:00
|
|
|
if (ret)
|
2014-04-25 01:08:57 +00:00
|
|
|
priv->phy_interface = PHY_INTERFACE_MODE_GMII;
|
|
|
|
|
2014-05-22 16:47:46 +00:00
|
|
|
/* In the case of a fixed PHY, the DT node associated
|
|
|
|
* to the PHY is the Ethernet MAC DT node.
|
|
|
|
*/
|
|
|
|
if (of_phy_is_fixed_link(dn)) {
|
|
|
|
ret = of_phy_register_fixed_link(dn);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(&pdev->dev, "failed to register fixed PHY\n");
|
2016-11-28 18:24:58 +00:00
|
|
|
goto err_free_netdev;
|
2014-05-22 16:47:46 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
priv->phy_dn = dn;
|
|
|
|
}
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
/* Initialize netdevice members */
|
|
|
|
macaddr = of_get_mac_address(dn);
|
2019-05-06 21:27:04 +00:00
|
|
|
if (IS_ERR(macaddr)) {
|
2014-04-25 01:08:57 +00:00
|
|
|
dev_warn(&pdev->dev, "using random Ethernet MAC\n");
|
2015-07-08 05:19:30 +00:00
|
|
|
eth_hw_addr_random(dev);
|
2014-04-25 01:08:57 +00:00
|
|
|
} else {
|
|
|
|
ether_addr_copy(dev->dev_addr, macaddr);
|
|
|
|
}
|
|
|
|
|
|
|
|
SET_NETDEV_DEV(dev, &pdev->dev);
|
|
|
|
dev_set_drvdata(&pdev->dev, dev);
|
2014-05-11 00:12:32 +00:00
|
|
|
dev->ethtool_ops = &bcm_sysport_ethtool_ops;
|
2014-04-25 01:08:57 +00:00
|
|
|
dev->netdev_ops = &bcm_sysport_netdev_ops;
|
|
|
|
netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
|
|
|
|
|
2018-09-27 22:36:12 +00:00
|
|
|
dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
|
|
|
|
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
|
|
|
|
dev->hw_features |= dev->features;
|
|
|
|
dev->vlan_features |= dev->features;
|
2014-04-25 01:08:57 +00:00
|
|
|
|
2014-07-02 04:08:40 +00:00
|
|
|
/* Request the WOL interrupt and advertise suspend if available */
|
|
|
|
priv->wol_irq_disabled = 1;
|
|
|
|
ret = devm_request_irq(&pdev->dev, priv->wol_irq,
|
2014-07-10 00:36:46 +00:00
|
|
|
bcm_sysport_wol_isr, 0, dev->name, priv);
|
2014-07-02 04:08:40 +00:00
|
|
|
if (!ret)
|
|
|
|
device_set_wakeup_capable(&pdev->dev, 1);
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
/* Set the needed headroom once and for all */
|
2014-05-30 19:39:30 +00:00
|
|
|
BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
|
|
|
|
dev->needed_headroom += sizeof(struct bcm_tsb);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
2014-06-05 17:22:18 +00:00
|
|
|
/* libphy will adjust the link state accordingly */
|
|
|
|
netif_carrier_off(dev);
|
|
|
|
|
2018-03-28 22:15:37 +00:00
|
|
|
priv->rx_max_coalesced_frames = 1;
|
2017-08-03 23:07:45 +00:00
|
|
|
u64_stats_init(&priv->syncp);
|
|
|
|
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
priv->dsa_notifier.notifier_call = bcm_sysport_dsa_notifier;
|
|
|
|
|
|
|
|
ret = register_dsa_notifier(&priv->dsa_notifier);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(&pdev->dev, "failed to register DSA notifier\n");
|
|
|
|
goto err_deregister_fixed_link;
|
|
|
|
}
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
ret = register_netdev(dev);
|
|
|
|
if (ret) {
|
|
|
|
dev_err(&pdev->dev, "failed to register net_device\n");
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
goto err_deregister_notifier;
|
2014-04-25 01:08:57 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
|
|
|
|
dev_info(&pdev->dev,
|
2019-03-20 16:45:17 +00:00
|
|
|
"Broadcom SYSTEMPORT%s " REV_FMT
|
|
|
|
" (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
|
2017-01-20 19:08:27 +00:00
|
|
|
priv->is_lite ? " Lite" : "",
|
2014-07-10 00:36:46 +00:00
|
|
|
(priv->rev >> 8) & 0xff, priv->rev & 0xff,
|
2019-03-20 16:45:17 +00:00
|
|
|
priv->irq0, priv->irq1, txq, rxq);
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
return 0;
|
2016-11-28 18:24:58 +00:00
|
|
|
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
err_deregister_notifier:
|
|
|
|
unregister_dsa_notifier(&priv->dsa_notifier);
|
2016-11-28 18:24:58 +00:00
|
|
|
err_deregister_fixed_link:
|
|
|
|
if (of_phy_is_fixed_link(dn))
|
|
|
|
of_phy_deregister_fixed_link(dn);
|
|
|
|
err_free_netdev:
|
2014-04-25 01:08:57 +00:00
|
|
|
free_netdev(dev);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bcm_sysport_remove(struct platform_device *pdev)
|
|
|
|
{
|
|
|
|
struct net_device *dev = dev_get_drvdata(&pdev->dev);
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
2016-11-28 18:24:58 +00:00
|
|
|
struct device_node *dn = pdev->dev.of_node;
|
2014-04-25 01:08:57 +00:00
|
|
|
|
|
|
|
/* Not much to do, ndo_close has been called
|
|
|
|
* and we use managed allocations
|
|
|
|
*/
|
net: systemport: Establish lower/upper queue mapping
Establish a queue mapping between the DSA slave network device queues
created that correspond to switch port queues, and the transmit queue
that SYSTEMPORT manages.
We need to configure the SYSTEMPORT transmit queue with the switch port number
and switch port queue number in order for the switch and SYSTEMPORT hardware to
utilize the out of band congestion notification. This hardware mechanism works
by looking at the switch port egress queue and determines whether there is
enough buffers for this queue, with that class of service for a successful
transmission and if not, backpressures the SYSTEMPORT queue that is being used.
For this to work, we implement a notifier which looks at the
DSA_PORT_REGISTER event. When DSA network devices are registered, the
framework calls the DSA notifiers when that happens, extracts the number
of queues for these devices and their associated port number, remembers
that in the driver private structure and linearly maps those queues to
TX rings/queues that we manage.
This scheme works because DSA slave network deviecs always transmit
through SYSTEMPORT so when DSA slave network devices are
destroyed/brought down, the corresponding SYSTEMPORT queues are no
longer used. Also, by design of the DSA framework, the master network
device (SYSTEMPORT) is registered first.
For faster lookups we use an array of up to DSA_MAX_PORTS * number of
queues per port, and then map pointers to bcm_sysport_tx_ring such that
our ndo_select_queue() implementation can just index into that array to
locate the corresponding ring index.
Signed-off-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-10-11 17:57:50 +00:00
|
|
|
unregister_dsa_notifier(&priv->dsa_notifier);
|
2014-04-25 01:08:57 +00:00
|
|
|
unregister_netdev(dev);
|
2016-11-28 18:24:58 +00:00
|
|
|
if (of_phy_is_fixed_link(dn))
|
|
|
|
of_phy_deregister_fixed_link(dn);
|
2014-04-25 01:08:57 +00:00
|
|
|
free_netdev(dev);
|
|
|
|
dev_set_drvdata(&pdev->dev, NULL);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-07-02 04:08:40 +00:00
|
|
|
static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
|
|
|
|
{
|
|
|
|
struct net_device *ndev = priv->netdev;
|
|
|
|
unsigned int timeout = 1000;
|
2018-08-07 17:50:23 +00:00
|
|
|
unsigned int index, i = 0;
|
2014-07-02 04:08:40 +00:00
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
reg = umac_readl(priv, UMAC_MPD_CTRL);
|
2018-08-07 17:50:23 +00:00
|
|
|
if (priv->wolopts & (WAKE_MAGIC | WAKE_MAGICSECURE))
|
|
|
|
reg |= MPD_EN;
|
2014-07-02 04:08:40 +00:00
|
|
|
reg &= ~PSW_EN;
|
2019-02-01 21:23:38 +00:00
|
|
|
if (priv->wolopts & WAKE_MAGICSECURE) {
|
|
|
|
/* Program the SecureOn password */
|
|
|
|
umac_writel(priv, get_unaligned_be16(&priv->sopass[0]),
|
|
|
|
UMAC_PSW_MS);
|
|
|
|
umac_writel(priv, get_unaligned_be32(&priv->sopass[2]),
|
|
|
|
UMAC_PSW_LS);
|
2014-07-02 04:08:40 +00:00
|
|
|
reg |= PSW_EN;
|
2019-02-01 21:23:38 +00:00
|
|
|
}
|
2014-07-02 04:08:40 +00:00
|
|
|
umac_writel(priv, reg, UMAC_MPD_CTRL);
|
|
|
|
|
2018-08-07 17:50:23 +00:00
|
|
|
if (priv->wolopts & WAKE_FILTER) {
|
|
|
|
/* Turn on ACPI matching to steal packets from RBUF */
|
|
|
|
reg = rbuf_readl(priv, RBUF_CONTROL);
|
|
|
|
if (priv->is_lite)
|
|
|
|
reg |= RBUF_ACPI_EN_LITE;
|
|
|
|
else
|
|
|
|
reg |= RBUF_ACPI_EN;
|
|
|
|
rbuf_writel(priv, reg, RBUF_CONTROL);
|
|
|
|
|
|
|
|
/* Enable RXCHK, active filters and Broadcom tag matching */
|
|
|
|
reg = rxchk_readl(priv, RXCHK_CONTROL);
|
|
|
|
reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
|
|
|
|
RXCHK_BRCM_TAG_MATCH_SHIFT);
|
|
|
|
for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) {
|
|
|
|
reg |= BIT(RXCHK_BRCM_TAG_MATCH_SHIFT + i);
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
reg |= RXCHK_EN | RXCHK_BRCM_TAG_EN;
|
|
|
|
rxchk_writel(priv, reg, RXCHK_CONTROL);
|
|
|
|
}
|
|
|
|
|
2014-07-02 04:08:40 +00:00
|
|
|
/* Make sure RBUF entered WoL mode as result */
|
|
|
|
do {
|
|
|
|
reg = rbuf_readl(priv, RBUF_STATUS);
|
|
|
|
if (reg & RBUF_WOL_MODE)
|
|
|
|
break;
|
|
|
|
|
|
|
|
udelay(10);
|
|
|
|
} while (timeout-- > 0);
|
|
|
|
|
|
|
|
/* Do not leave the UniMAC RBUF matching only MPD packets */
|
|
|
|
if (!timeout) {
|
2018-08-03 18:08:44 +00:00
|
|
|
mpd_enable_set(priv, false);
|
2014-07-02 04:08:40 +00:00
|
|
|
netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
|
|
|
|
return -ETIMEDOUT;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* UniMAC receive needs to be turned on */
|
|
|
|
umac_enable_set(priv, CMD_RX_EN, 1);
|
|
|
|
|
|
|
|
netif_dbg(priv, wol, ndev, "entered WOL mode\n");
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-08-13 22:10:34 +00:00
|
|
|
static int __maybe_unused bcm_sysport_suspend(struct device *d)
|
2014-07-02 04:08:38 +00:00
|
|
|
{
|
|
|
|
struct net_device *dev = dev_get_drvdata(d);
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
|
|
|
unsigned int i;
|
2014-07-02 04:08:40 +00:00
|
|
|
int ret = 0;
|
2014-07-02 04:08:38 +00:00
|
|
|
u32 reg;
|
|
|
|
|
|
|
|
if (!netif_running(dev))
|
|
|
|
return 0;
|
|
|
|
|
2018-11-01 22:55:38 +00:00
|
|
|
netif_device_detach(dev);
|
|
|
|
|
2014-07-02 04:08:38 +00:00
|
|
|
bcm_sysport_netif_stop(dev);
|
|
|
|
|
2016-06-19 18:39:08 +00:00
|
|
|
phy_suspend(dev->phydev);
|
2014-07-02 04:08:38 +00:00
|
|
|
|
|
|
|
/* Disable UniMAC RX */
|
|
|
|
umac_enable_set(priv, CMD_RX_EN, 0);
|
|
|
|
|
|
|
|
ret = rdma_enable_set(priv, 0);
|
|
|
|
if (ret) {
|
|
|
|
netdev_err(dev, "RDMA timeout!\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Disable RXCHK if enabled */
|
2014-07-02 04:08:39 +00:00
|
|
|
if (priv->rx_chk_en) {
|
2014-07-02 04:08:38 +00:00
|
|
|
reg = rxchk_readl(priv, RXCHK_CONTROL);
|
|
|
|
reg &= ~RXCHK_EN;
|
|
|
|
rxchk_writel(priv, reg, RXCHK_CONTROL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Flush RX pipe */
|
2014-07-02 04:08:40 +00:00
|
|
|
if (!priv->wolopts)
|
|
|
|
topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
|
2014-07-02 04:08:38 +00:00
|
|
|
|
|
|
|
ret = tdma_enable_set(priv, 0);
|
|
|
|
if (ret) {
|
|
|
|
netdev_err(dev, "TDMA timeout!\n");
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Wait for a packet boundary */
|
|
|
|
usleep_range(2000, 3000);
|
|
|
|
|
|
|
|
umac_enable_set(priv, CMD_TX_EN, 0);
|
|
|
|
|
|
|
|
topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
|
|
|
|
|
|
|
|
/* Free RX/TX rings SW structures */
|
|
|
|
for (i = 0; i < dev->num_tx_queues; i++)
|
|
|
|
bcm_sysport_fini_tx_ring(priv, i);
|
|
|
|
bcm_sysport_fini_rx_ring(priv);
|
|
|
|
|
2014-07-02 04:08:40 +00:00
|
|
|
/* Get prepared for Wake-on-LAN */
|
|
|
|
if (device_may_wakeup(d) && priv->wolopts)
|
|
|
|
ret = bcm_sysport_suspend_to_wol(priv);
|
|
|
|
|
|
|
|
return ret;
|
2014-07-02 04:08:38 +00:00
|
|
|
}
|
|
|
|
|
2018-08-13 22:10:34 +00:00
|
|
|
static int __maybe_unused bcm_sysport_resume(struct device *d)
|
2014-07-02 04:08:38 +00:00
|
|
|
{
|
|
|
|
struct net_device *dev = dev_get_drvdata(d);
|
|
|
|
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
|
|
|
unsigned int i;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (!netif_running(dev))
|
|
|
|
return 0;
|
|
|
|
|
2014-10-28 18:12:01 +00:00
|
|
|
umac_reset(priv);
|
|
|
|
|
2020-02-05 20:32:04 +00:00
|
|
|
/* Disable the UniMAC RX/TX */
|
|
|
|
umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
|
|
|
|
|
2014-07-02 04:08:40 +00:00
|
|
|
/* We may have been suspended and never received a WOL event that
|
|
|
|
* would turn off MPD detection, take care of that now
|
|
|
|
*/
|
|
|
|
bcm_sysport_resume_from_wol(priv);
|
|
|
|
|
2014-07-02 04:08:38 +00:00
|
|
|
/* Initialize both hardware and software ring */
|
|
|
|
for (i = 0; i < dev->num_tx_queues; i++) {
|
|
|
|
ret = bcm_sysport_init_tx_ring(priv, i);
|
|
|
|
if (ret) {
|
|
|
|
netdev_err(dev, "failed to initialize TX ring %d\n",
|
2014-07-10 00:36:46 +00:00
|
|
|
i);
|
2014-07-02 04:08:38 +00:00
|
|
|
goto out_free_tx_rings;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize linked-list */
|
|
|
|
tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
|
|
|
|
|
|
|
|
/* Initialize RX ring */
|
|
|
|
ret = bcm_sysport_init_rx_ring(priv);
|
|
|
|
if (ret) {
|
|
|
|
netdev_err(dev, "failed to initialize RX ring\n");
|
|
|
|
goto out_free_rx_ring;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* RX pipe enable */
|
|
|
|
topctrl_writel(priv, 0, RX_FLUSH_CNTL);
|
|
|
|
|
|
|
|
ret = rdma_enable_set(priv, 1);
|
|
|
|
if (ret) {
|
|
|
|
netdev_err(dev, "failed to enable RDMA\n");
|
|
|
|
goto out_free_rx_ring;
|
|
|
|
}
|
|
|
|
|
2018-09-27 22:36:11 +00:00
|
|
|
/* Restore enabled features */
|
|
|
|
bcm_sysport_set_features(dev, dev->features);
|
2014-07-02 04:08:38 +00:00
|
|
|
|
|
|
|
rbuf_init(priv);
|
|
|
|
|
|
|
|
/* Set maximum frame length */
|
2017-01-20 19:08:27 +00:00
|
|
|
if (!priv->is_lite)
|
|
|
|
umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
|
|
|
|
else
|
|
|
|
gib_set_pad_extension(priv);
|
2014-07-02 04:08:38 +00:00
|
|
|
|
|
|
|
/* Set MAC address */
|
|
|
|
umac_set_hw_addr(priv, dev->dev_addr);
|
|
|
|
|
|
|
|
umac_enable_set(priv, CMD_RX_EN, 1);
|
|
|
|
|
|
|
|
/* TX pipe enable */
|
|
|
|
topctrl_writel(priv, 0, TX_FLUSH_CNTL);
|
|
|
|
|
|
|
|
umac_enable_set(priv, CMD_TX_EN, 1);
|
|
|
|
|
|
|
|
ret = tdma_enable_set(priv, 1);
|
|
|
|
if (ret) {
|
|
|
|
netdev_err(dev, "TDMA timeout!\n");
|
|
|
|
goto out_free_rx_ring;
|
|
|
|
}
|
|
|
|
|
2016-06-19 18:39:08 +00:00
|
|
|
phy_resume(dev->phydev);
|
2014-07-02 04:08:38 +00:00
|
|
|
|
|
|
|
bcm_sysport_netif_start(dev);
|
|
|
|
|
2018-11-01 22:55:38 +00:00
|
|
|
netif_device_attach(dev);
|
|
|
|
|
2014-07-02 04:08:38 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
out_free_rx_ring:
|
|
|
|
bcm_sysport_fini_rx_ring(priv);
|
|
|
|
out_free_tx_rings:
|
|
|
|
for (i = 0; i < dev->num_tx_queues; i++)
|
|
|
|
bcm_sysport_fini_tx_ring(priv, i);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
|
|
|
|
bcm_sysport_suspend, bcm_sysport_resume);
|
|
|
|
|
2014-04-25 01:08:57 +00:00
|
|
|
static struct platform_driver bcm_sysport_driver = {
|
|
|
|
.probe = bcm_sysport_probe,
|
|
|
|
.remove = bcm_sysport_remove,
|
|
|
|
.driver = {
|
|
|
|
.name = "brcm-systemport",
|
|
|
|
.of_match_table = bcm_sysport_of_match,
|
2014-07-02 04:08:38 +00:00
|
|
|
.pm = &bcm_sysport_pm_ops,
|
2014-04-25 01:08:57 +00:00
|
|
|
},
|
|
|
|
};
|
|
|
|
module_platform_driver(bcm_sysport_driver);
|
|
|
|
|
|
|
|
MODULE_AUTHOR("Broadcom Corporation");
|
|
|
|
MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
|
|
|
|
MODULE_ALIAS("platform:brcm-systemport");
|
|
|
|
MODULE_LICENSE("GPL");
|