Merge branch 'mlxsw-cleanups'

Jiri Pirko says:

====================
mlxsw: Driver update, cleanups

This patchset contains various cleanups and improvements in mlxsw driver.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2015-10-15 23:28:03 -07:00
commit 181e4246b4
6 changed files with 103 additions and 134 deletions

View File

@ -464,6 +464,8 @@ MLXSW_ITEM32(cmd_mbox, query_aq_cap, max_sg_rq, 0x10, 0, 8);
* passed in this command must be pinned.
*/
#define MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX 32
static inline int mlxsw_cmd_map_fa(struct mlxsw_core *mlxsw_core,
char *in_mbox, u32 vpm_entries_count)
{
@ -568,7 +570,7 @@ MLXSW_ITEM32(cmd_mbox, config_profile, set_max_vlan_groups, 0x0C, 6, 1);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, set_max_regions, 0x0C, 7, 1);
/* cmd_mbox_config_profile_set_fid_based
/* cmd_mbox_config_profile_set_flood_mode
* Capability bit. Setting a bit to 1 configures the profile
* according to the mailbox contents.
*/
@ -649,12 +651,8 @@ MLXSW_ITEM32(cmd_mbox, config_profile, max_vlan_groups, 0x28, 0, 12);
MLXSW_ITEM32(cmd_mbox, config_profile, max_regions, 0x2C, 0, 16);
/* cmd_mbox_config_profile_max_flood_tables
* Maximum number of Flooding Tables. Flooding Tables are associated to
* the different packet types for the different switch partitions.
* Note that the table size depends on the fid_based mode.
* In SwitchX silicon, tables are split equally between the switch
* partitions. e.g. for 2 swids and 8 tables, the first 4 are associated
* with swid-1 and the last 4 are associated with swid-2.
* Maximum number of single-entry flooding tables. Different flooding tables
* can be associated with different packet types.
*/
MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
@ -665,12 +663,14 @@ MLXSW_ITEM32(cmd_mbox, config_profile, max_flood_tables, 0x30, 16, 4);
*/
MLXSW_ITEM32(cmd_mbox, config_profile, max_vid_flood_tables, 0x30, 8, 4);
/* cmd_mbox_config_profile_fid_based
* FID Based Flood Mode
* 00 Do not use FID to offset the index into the Port Group Table/Multicast ID
* 01 Use FID to offset the index to the Port Group Table (pgi)
* 10 Use FID to offset the index to the Port Group Table (pgi) and
* the Multicast ID
/* cmd_mbox_config_profile_flood_mode
* Flooding mode to use.
* 0-2 - Backward compatible modes for SwitchX devices.
* 3 - Mixed mode, where:
* max_flood_tables indicates the number of single-entry tables.
* max_vid_flood_tables indicates the number of per-VID tables.
* max_fid_offset_flood_tables indicates the number of FID-offset tables.
* max_fid_flood_tables indicates the number of per-FID tables.
*/
MLXSW_ITEM32(cmd_mbox, config_profile, flood_mode, 0x30, 0, 2);

View File

@ -506,7 +506,6 @@ static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
return err;
mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
MLXSW_TRAP_ID_ETHEMAD);
return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
}
@ -551,8 +550,8 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
{
char hpkt_pl[MLXSW_REG_HPKT_LEN];
mlxsw_core->emad.use_emad = false;
mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
MLXSW_TRAP_ID_ETHEMAD);
mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);

View File

@ -171,8 +171,8 @@ struct mlxsw_pci {
struct msix_entry msix_entry;
struct mlxsw_core *core;
struct {
u16 num_pages;
struct mlxsw_pci_mem_item *items;
unsigned int count;
} fw_area;
struct {
struct mlxsw_pci_mem_item out_mbox;
@ -431,8 +431,7 @@ static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
if (net_ratelimit())
dev_err(&pdev->dev, "failed to dma map tx frag\n");
dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
return -EIO;
}
mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
@ -497,6 +496,7 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
struct mlxsw_pci_queue *q)
{
struct mlxsw_pci_queue_elem_info *elem_info;
u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
int i;
int err;
@ -504,9 +504,9 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
q->consumer_counter = 0;
/* Set CQ of same number of this RDQ with base
* above MLXSW_PCI_SDQS_MAX as the lower ones are assigned to SDQs.
* above SDQ count as the lower ones are assigned to SDQs.
*/
mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num + MLXSW_PCI_SDQS_COUNT);
mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
@ -699,8 +699,8 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
put_new_skb:
memset(wqe, 0, q->elem_size);
err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
if (err && net_ratelimit())
dev_dbg(&pdev->dev, "Failed to alloc skb for RDQ\n");
if (err)
dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
/* Everything is set up, ring doorbell to pass elem to HW */
q->producer_counter++;
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
@ -830,7 +830,8 @@ static void mlxsw_pci_eq_tasklet(unsigned long data)
{
struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
struct mlxsw_pci *mlxsw_pci = q->pci;
unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_COUNT)];
u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
char *eqe;
u8 cqn;
bool cq_handle = false;
@ -866,7 +867,7 @@ static void mlxsw_pci_eq_tasklet(unsigned long data)
if (!cq_handle)
return;
for_each_set_bit(cqn, active_cqns, MLXSW_PCI_CQS_COUNT) {
for_each_set_bit(cqn, active_cqns, cq_count) {
q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
mlxsw_pci_queue_tasklet_schedule(q);
}
@ -1067,10 +1068,8 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
if ((num_sdqs != MLXSW_PCI_SDQS_COUNT) ||
(num_rdqs != MLXSW_PCI_RDQS_COUNT) ||
(num_cqs != MLXSW_PCI_CQS_COUNT) ||
(num_eqs != MLXSW_PCI_EQS_COUNT)) {
if (num_sdqs + num_rdqs > num_cqs ||
num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
dev_err(&pdev->dev, "Unsupported number of queues\n");
return -EINVAL;
}
@ -1272,6 +1271,7 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
u16 num_pages)
{
struct mlxsw_pci_mem_item *mem_item;
int nent = 0;
int i;
int err;
@ -1279,7 +1279,7 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
GFP_KERNEL);
if (!mlxsw_pci->fw_area.items)
return -ENOMEM;
mlxsw_pci->fw_area.num_pages = num_pages;
mlxsw_pci->fw_area.count = num_pages;
mlxsw_cmd_mbox_zero(mbox);
for (i = 0; i < num_pages; i++) {
@ -1293,13 +1293,22 @@ static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
err = -ENOMEM;
goto err_alloc;
}
mlxsw_cmd_mbox_map_fa_pa_set(mbox, i, mem_item->mapaddr);
mlxsw_cmd_mbox_map_fa_log2size_set(mbox, i, 0); /* 1 page */
mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr);
mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */
if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) {
err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
if (err)
goto err_cmd_map_fa;
nent = 0;
mlxsw_cmd_mbox_zero(mbox);
}
}
err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, num_pages);
if (err)
goto err_cmd_map_fa;
if (nent) {
err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
if (err)
goto err_cmd_map_fa;
}
return 0;
@ -1322,7 +1331,7 @@ static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
mlxsw_cmd_unmap_fa(mlxsw_pci->core);
for (i = 0; i < mlxsw_pci->fw_area.num_pages; i++) {
for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
mem_item = &mlxsw_pci->fw_area.items[i];
pci_free_consistent(mlxsw_pci->pdev, mem_item->size,

View File

@ -71,9 +71,7 @@
#define MLXSW_PCI_DOORBELL(offset, type_offset, num) \
((offset) + (type_offset) + (num) * 4)
#define MLXSW_PCI_RDQS_COUNT 24
#define MLXSW_PCI_SDQS_COUNT 24
#define MLXSW_PCI_CQS_COUNT (MLXSW_PCI_RDQS_COUNT + MLXSW_PCI_SDQS_COUNT)
#define MLXSW_PCI_CQS_MAX 96
#define MLXSW_PCI_EQS_COUNT 2
#define MLXSW_PCI_EQ_ASYNC_NUM 0
#define MLXSW_PCI_EQ_COMP_NUM 1

View File

@ -99,57 +99,6 @@ static const struct mlxsw_reg_info mlxsw_reg_spad = {
*/
MLXSW_ITEM_BUF(reg, spad, base_mac, 0x02, 6);
/* SMID - Switch Multicast ID
* --------------------------
* In multi-chip configuration, each device should maintain mapping between
* Multicast ID (MID) into a list of local ports. This mapping is used in all
* the devices other than the ingress device, and is implemented as part of the
* FDB. The MID record maps from a MID, which is a unique identi- fier of the
* multicast group within the stacking domain, into a list of local ports into
* which the packet is replicated.
*/
#define MLXSW_REG_SMID_ID 0x2007
#define MLXSW_REG_SMID_LEN 0x420
static const struct mlxsw_reg_info mlxsw_reg_smid = {
.id = MLXSW_REG_SMID_ID,
.len = MLXSW_REG_SMID_LEN,
};
/* reg_smid_swid
* Switch partition ID.
* Access: Index
*/
MLXSW_ITEM32(reg, smid, swid, 0x00, 24, 8);
/* reg_smid_mid
* Multicast identifier - global identifier that represents the multicast group
* across all devices
* Access: Index
*/
MLXSW_ITEM32(reg, smid, mid, 0x00, 0, 16);
/* reg_smid_port
* Local port memebership (1 bit per port).
* Access: RW
*/
MLXSW_ITEM_BIT_ARRAY(reg, smid, port, 0x20, 0x20, 1);
/* reg_smid_port_mask
* Local port mask (1 bit per port).
* Access: W
*/
MLXSW_ITEM_BIT_ARRAY(reg, smid, port_mask, 0x220, 0x20, 1);
static inline void mlxsw_reg_smid_pack(char *payload, u16 mid)
{
MLXSW_REG_ZERO(smid, payload);
mlxsw_reg_smid_swid_set(payload, 0);
mlxsw_reg_smid_mid_set(payload, mid);
mlxsw_reg_smid_port_set(payload, MLXSW_PORT_CPU_PORT, 1);
mlxsw_reg_smid_port_mask_set(payload, MLXSW_PORT_CPU_PORT, 1);
}
/* SSPR - Switch System Port Record Register
* -----------------------------------------
* Configures the system port to local port mapping.
@ -212,7 +161,7 @@ static inline void mlxsw_reg_sspr_pack(char *payload, u8 local_port)
* -------------------------------------------
* Configures the spanning tree state of a physical port.
*/
#define MLXSW_REG_SPMS_ID 0x200d
#define MLXSW_REG_SPMS_ID 0x200D
#define MLXSW_REG_SPMS_LEN 0x404
static const struct mlxsw_reg_info mlxsw_reg_spms = {
@ -243,11 +192,15 @@ enum mlxsw_reg_spms_state {
*/
MLXSW_ITEM_BIT_ARRAY(reg, spms, state, 0x04, 0x400, 2);
static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port, u16 vid,
enum mlxsw_reg_spms_state state)
static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port)
{
MLXSW_REG_ZERO(spms, payload);
mlxsw_reg_spms_local_port_set(payload, local_port);
}
static inline void mlxsw_reg_spms_vid_pack(char *payload, u16 vid,
enum mlxsw_reg_spms_state state)
{
mlxsw_reg_spms_state_set(payload, vid, state);
}
@ -256,7 +209,7 @@ static inline void mlxsw_reg_spms_pack(char *payload, u8 local_port, u16 vid,
* The following register controls the association of flooding tables and MIDs
* to packet types used for flooding.
*/
#define MLXSW_REG_SFGC_ID 0x2011
#define MLXSW_REG_SFGC_ID 0x2011
#define MLXSW_REG_SFGC_LEN 0x10
static const struct mlxsw_reg_info mlxsw_reg_sfgc = {
@ -265,13 +218,15 @@ static const struct mlxsw_reg_info mlxsw_reg_sfgc = {
};
enum mlxsw_reg_sfgc_type {
MLXSW_REG_SFGC_TYPE_BROADCAST = 0,
MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST = 1,
MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4 = 2,
MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6 = 3,
MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP = 5,
MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL = 6,
MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST = 7,
MLXSW_REG_SFGC_TYPE_BROADCAST,
MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST,
MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV4,
MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_IPV6,
MLXSW_REG_SFGC_TYPE_RESERVED,
MLXSW_REG_SFGC_TYPE_UNREGISTERED_MULTICAST_NON_IP,
MLXSW_REG_SFGC_TYPE_IPV4_LINK_LOCAL,
MLXSW_REG_SFGC_TYPE_IPV6_ALL_HOST,
MLXSW_REG_SFGC_TYPE_MAX,
};
/* reg_sfgc_type
@ -1013,7 +968,7 @@ static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port)
* Controls the association of a port with a switch partition and enables
* configuring ports as stacking ports.
*/
#define MLXSW_REG_PSPA_ID 0x500d
#define MLXSW_REG_PSPA_ID 0x500D
#define MLXSW_REG_PSPA_LEN 0x8
static const struct mlxsw_reg_info mlxsw_reg_pspa = {
@ -1074,8 +1029,11 @@ MLXSW_ITEM32(reg, htgt, swid, 0x00, 24, 8);
*/
MLXSW_ITEM32(reg, htgt, type, 0x00, 8, 4);
#define MLXSW_REG_HTGT_TRAP_GROUP_EMAD 0x0
#define MLXSW_REG_HTGT_TRAP_GROUP_RX 0x1
enum mlxsw_reg_htgt_trap_group {
MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
MLXSW_REG_HTGT_TRAP_GROUP_RX,
MLXSW_REG_HTGT_TRAP_GROUP_CTRL,
};
/* reg_htgt_trap_group
* Trap group number. User defined number specifying which trap groups
@ -1142,6 +1100,7 @@ MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6);
#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD 0x15
#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX 0x14
#define MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL 0x13
/* reg_htgt_local_path_rdq
* Receive descriptor queue (RDQ) to use for the trap group.
@ -1149,21 +1108,29 @@ MLXSW_ITEM32(reg, htgt, local_path_cpu_tclass, 0x10, 16, 6);
*/
MLXSW_ITEM32(reg, htgt, local_path_rdq, 0x10, 0, 6);
static inline void mlxsw_reg_htgt_pack(char *payload, u8 trap_group)
static inline void mlxsw_reg_htgt_pack(char *payload,
enum mlxsw_reg_htgt_trap_group group)
{
u8 swid, rdq;
MLXSW_REG_ZERO(htgt, payload);
if (MLXSW_REG_HTGT_TRAP_GROUP_EMAD == trap_group) {
switch (group) {
case MLXSW_REG_HTGT_TRAP_GROUP_EMAD:
swid = MLXSW_PORT_SWID_ALL_SWIDS;
rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_EMAD;
} else {
break;
case MLXSW_REG_HTGT_TRAP_GROUP_RX:
swid = 0;
rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_RX;
break;
case MLXSW_REG_HTGT_TRAP_GROUP_CTRL:
swid = 0;
rdq = MLXSW_REG_HTGT_LOCAL_PATH_RDQ_CTRL;
break;
}
mlxsw_reg_htgt_swid_set(payload, swid);
mlxsw_reg_htgt_type_set(payload, MLXSW_REG_HTGT_PATH_TYPE_LOCAL);
mlxsw_reg_htgt_trap_group_set(payload, trap_group);
mlxsw_reg_htgt_trap_group_set(payload, group);
mlxsw_reg_htgt_pide_set(payload, MLXSW_REG_HTGT_POLICER_DISABLE);
mlxsw_reg_htgt_pid_set(payload, 0);
mlxsw_reg_htgt_mirror_action_set(payload, MLXSW_REG_HTGT_TRAP_TO_CPU);
@ -1254,12 +1221,22 @@ enum {
*/
MLXSW_ITEM32(reg, hpkt, ctrl, 0x04, 16, 2);
static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action,
u8 trap_group, u16 trap_id)
static inline void mlxsw_reg_hpkt_pack(char *payload, u8 action, u16 trap_id)
{
enum mlxsw_reg_htgt_trap_group trap_group;
MLXSW_REG_ZERO(hpkt, payload);
mlxsw_reg_hpkt_ack_set(payload, MLXSW_REG_HPKT_ACK_NOT_REQUIRED);
mlxsw_reg_hpkt_action_set(payload, action);
switch (trap_id) {
case MLXSW_TRAP_ID_ETHEMAD:
case MLXSW_TRAP_ID_PUDE:
trap_group = MLXSW_REG_HTGT_TRAP_GROUP_EMAD;
break;
default:
trap_group = MLXSW_REG_HTGT_TRAP_GROUP_RX;
break;
}
mlxsw_reg_hpkt_trap_group_set(payload, trap_group);
mlxsw_reg_hpkt_trap_id_set(payload, trap_id);
mlxsw_reg_hpkt_ctrl_set(payload, MLXSW_REG_HPKT_CTRL_PACKET_DEFAULT);
@ -1272,8 +1249,6 @@ static inline const char *mlxsw_reg_id_str(u16 reg_id)
return "SGCR";
case MLXSW_REG_SPAD_ID:
return "SPAD";
case MLXSW_REG_SMID_ID:
return "SMID";
case MLXSW_REG_SSPR_ID:
return "SSPR";
case MLXSW_REG_SPMS_ID:

View File

@ -57,13 +57,11 @@ static const char mlxsw_sx_driver_version[] = "1.0";
struct mlxsw_sx_port;
#define MLXSW_SW_HW_ID_LEN 6
struct mlxsw_sx {
struct mlxsw_sx_port **ports;
struct mlxsw_core *core;
const struct mlxsw_bus_info *bus_info;
u8 hw_id[MLXSW_SW_HW_ID_LEN];
u8 hw_id[ETH_ALEN];
};
struct mlxsw_sx_port_pcpu_stats {
@ -925,7 +923,8 @@ static int mlxsw_sx_port_stp_state_set(struct mlxsw_sx_port *mlxsw_sx_port,
spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
if (!spms_pl)
return -ENOMEM;
mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port, vid, state);
mlxsw_reg_spms_pack(spms_pl, mlxsw_sx_port->local_port);
mlxsw_reg_spms_vid_pack(spms_pl, vid, state);
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(spms), spms_pl);
kfree(spms_pl);
return err;
@ -1178,8 +1177,7 @@ static int mlxsw_sx_event_register(struct mlxsw_sx *mlxsw_sx,
if (err)
return err;
mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
MLXSW_REG_HTGT_TRAP_GROUP_EMAD, trap_id);
mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD, trap_id);
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
if (err)
goto err_event_trap_set;
@ -1212,9 +1210,8 @@ static void mlxsw_sx_rx_listener_func(struct sk_buff *skb, u8 local_port,
struct mlxsw_sx_port_pcpu_stats *pcpu_stats;
if (unlikely(!mlxsw_sx_port)) {
if (net_ratelimit())
dev_warn(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
local_port);
dev_warn_ratelimited(mlxsw_sx->bus_info->dev, "Port %d: skb received for non-existent port\n",
local_port);
return;
}
@ -1316,6 +1313,11 @@ static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
if (err)
return err;
mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_CTRL);
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(htgt), htgt_pl);
if (err)
return err;
for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
err = mlxsw_core_rx_listener_register(mlxsw_sx->core,
&mlxsw_sx_rx_listener[i],
@ -1324,7 +1326,6 @@ static int mlxsw_sx_traps_init(struct mlxsw_sx *mlxsw_sx)
goto err_rx_listener_register;
mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
MLXSW_REG_HTGT_TRAP_GROUP_RX,
mlxsw_sx_rx_listener[i].trap_id);
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
if (err)
@ -1339,7 +1340,6 @@ err_rx_trap_set:
err_rx_listener_register:
for (i--; i >= 0; i--) {
mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
MLXSW_REG_HTGT_TRAP_GROUP_RX,
mlxsw_sx_rx_listener[i].trap_id);
mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
@ -1357,7 +1357,6 @@ static void mlxsw_sx_traps_fini(struct mlxsw_sx *mlxsw_sx)
for (i = 0; i < ARRAY_SIZE(mlxsw_sx_rx_listener); i++) {
mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_FORWARD,
MLXSW_REG_HTGT_TRAP_GROUP_RX,
mlxsw_sx_rx_listener[i].trap_id);
mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(hpkt), hpkt_pl);
@ -1371,20 +1370,9 @@ static int mlxsw_sx_flood_init(struct mlxsw_sx *mlxsw_sx)
{
char sfgc_pl[MLXSW_REG_SFGC_LEN];
char sgcr_pl[MLXSW_REG_SGCR_LEN];
char *smid_pl;
char *sftr_pl;
int err;
/* Due to FW bug, we must configure SMID. */
smid_pl = kmalloc(MLXSW_REG_SMID_LEN, GFP_KERNEL);
if (!smid_pl)
return -ENOMEM;
mlxsw_reg_smid_pack(smid_pl, MLXSW_PORT_MID);
err = mlxsw_reg_write(mlxsw_sx->core, MLXSW_REG(smid), smid_pl);
kfree(smid_pl);
if (err)
return err;
/* Configure a flooding table, which includes only CPU port. */
sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
if (!sftr_pl)