mirror of
https://github.com/torvalds/linux.git
synced 2024-12-27 13:22:23 +00:00
Merge branch 'move-siena-into-a-separate-subdirectory'
Martin Habets says: ==================== Move Siena into a separate subdirectory The Siena NICs (SFN5000 and SFN6000 series) went EOL in November 2021. Most of these adapters have been remove from our test labs, and testing has been reduced to a minimum. This patch series creates a separate kernel module for the Siena architecture, analogous to what was done for Falcon some years ago. This reduces our maintenance for the sfc.ko module, and allows us to enhance the EF10 and EF100 drivers without the risk of breaking Siena NICs. After this series further enhancements are needed to differentiate the new kernel module from sfc.ko, and the Siena code can be removed from sfc.ko. Thes will be posted as a small follow-up series. The Siena module is not built by default, but can be enabled using Kconfig option SFC_SIENA. This will create module sfc-siena.ko. Patches Patches 1-3 establish the code base for the Siena driver. Patches 4-10 ensure the allyesconfig build succeeds. Patch 11 adds the basic Siena module. I do not expect patch 1 through 3 to be reviewed, they are FYI only. No checkpatch issues were resolved as part of these, but they were fixed in the subsequent patches. Testing Various build tests were done such as allyesconfig, W=1 and sparse. The new sfc-siena.ko and sfc.ko modules were tested on a machine with both these NICs in them, and several tests were run on both drivers. ==================== Link: https://lore.kernel.org/r/165211018297.5289.9658523545298485394.stgit@palantir17.mph.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
bca56ea684
@ -65,5 +65,6 @@ config SFC_MCDI_LOGGING
|
||||
a sysfs file 'mcdi_logging' under the PCI device.
|
||||
|
||||
source "drivers/net/ethernet/sfc/falcon/Kconfig"
|
||||
source "drivers/net/ethernet/sfc/siena/Kconfig"
|
||||
|
||||
endif # NET_VENDOR_SOLARFLARE
|
||||
|
@ -13,3 +13,4 @@ sfc-$(CONFIG_SFC_SRIOV) += sriov.o ef10_sriov.o ef100_sriov.o
|
||||
obj-$(CONFIG_SFC) += sfc.o
|
||||
|
||||
obj-$(CONFIG_SFC_FALCON) += falcon/
|
||||
obj-$(CONFIG_SFC_SIENA) += siena/
|
||||
|
12
drivers/net/ethernet/sfc/siena/Kconfig
Normal file
12
drivers/net/ethernet/sfc/siena/Kconfig
Normal file
@ -0,0 +1,12 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
config SFC_SIENA
|
||||
tristate "Solarflare SFC9000 support"
|
||||
depends on PCI
|
||||
select MDIO
|
||||
select CRC32
|
||||
help
|
||||
This driver supports 10-gigabit Ethernet cards based on
|
||||
the Solarflare SFC9000 controller.
|
||||
|
||||
To compile this driver as a module, choose M here. The module
|
||||
will be called sfc-siena.
|
11
drivers/net/ethernet/sfc/siena/Makefile
Normal file
11
drivers/net/ethernet/sfc/siena/Makefile
Normal file
@ -0,0 +1,11 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
sfc-siena-y += farch.o siena.o \
|
||||
efx.o efx_common.o efx_channels.o nic.o \
|
||||
tx.o tx_common.o rx.o rx_common.o \
|
||||
selftest.o ethtool.o ethtool_common.o ptp.o \
|
||||
mcdi.o mcdi_port.o mcdi_port_common.o \
|
||||
mcdi_mon.o
|
||||
sfc-siena-$(CONFIG_SFC_MTD) += mtd.o
|
||||
sfc-siena-$(CONFIG_SFC_SRIOV) += siena_sriov.o
|
||||
|
||||
obj-$(CONFIG_SFC_SIENA) += sfc-siena.o
|
614
drivers/net/ethernet/sfc/siena/bitfield.h
Normal file
614
drivers/net/ethernet/sfc/siena/bitfield.h
Normal file
@ -0,0 +1,614 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2013 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#ifndef EFX_BITFIELD_H
|
||||
#define EFX_BITFIELD_H
|
||||
|
||||
/*
|
||||
* Efx bitfield access
|
||||
*
|
||||
* Efx NICs make extensive use of bitfields up to 128 bits
|
||||
* wide. Since there is no native 128-bit datatype on most systems,
|
||||
* and since 64-bit datatypes are inefficient on 32-bit systems and
|
||||
* vice versa, we wrap accesses in a way that uses the most efficient
|
||||
* datatype.
|
||||
*
|
||||
* The NICs are PCI devices and therefore little-endian. Since most
|
||||
* of the quantities that we deal with are DMAed to/from host memory,
|
||||
* we define our datatypes (efx_oword_t, efx_qword_t and
|
||||
* efx_dword_t) to be little-endian.
|
||||
*/
|
||||
|
||||
/* Lowest bit numbers and widths */
|
||||
#define EFX_DUMMY_FIELD_LBN 0
|
||||
#define EFX_DUMMY_FIELD_WIDTH 0
|
||||
#define EFX_WORD_0_LBN 0
|
||||
#define EFX_WORD_0_WIDTH 16
|
||||
#define EFX_WORD_1_LBN 16
|
||||
#define EFX_WORD_1_WIDTH 16
|
||||
#define EFX_DWORD_0_LBN 0
|
||||
#define EFX_DWORD_0_WIDTH 32
|
||||
#define EFX_DWORD_1_LBN 32
|
||||
#define EFX_DWORD_1_WIDTH 32
|
||||
#define EFX_DWORD_2_LBN 64
|
||||
#define EFX_DWORD_2_WIDTH 32
|
||||
#define EFX_DWORD_3_LBN 96
|
||||
#define EFX_DWORD_3_WIDTH 32
|
||||
#define EFX_QWORD_0_LBN 0
|
||||
#define EFX_QWORD_0_WIDTH 64
|
||||
|
||||
/* Specified attribute (e.g. LBN) of the specified field */
|
||||
#define EFX_VAL(field, attribute) field ## _ ## attribute
|
||||
/* Low bit number of the specified field */
|
||||
#define EFX_LOW_BIT(field) EFX_VAL(field, LBN)
|
||||
/* Bit width of the specified field */
|
||||
#define EFX_WIDTH(field) EFX_VAL(field, WIDTH)
|
||||
/* High bit number of the specified field */
|
||||
#define EFX_HIGH_BIT(field) (EFX_LOW_BIT(field) + EFX_WIDTH(field) - 1)
|
||||
/* Mask equal in width to the specified field.
|
||||
*
|
||||
* For example, a field with width 5 would have a mask of 0x1f.
|
||||
*
|
||||
* The maximum width mask that can be generated is 64 bits.
|
||||
*/
|
||||
#define EFX_MASK64(width) \
|
||||
((width) == 64 ? ~((u64) 0) : \
|
||||
(((((u64) 1) << (width))) - 1))
|
||||
|
||||
/* Mask equal in width to the specified field.
|
||||
*
|
||||
* For example, a field with width 5 would have a mask of 0x1f.
|
||||
*
|
||||
* The maximum width mask that can be generated is 32 bits. Use
|
||||
* EFX_MASK64 for higher width fields.
|
||||
*/
|
||||
#define EFX_MASK32(width) \
|
||||
((width) == 32 ? ~((u32) 0) : \
|
||||
(((((u32) 1) << (width))) - 1))
|
||||
|
||||
/* A doubleword (i.e. 4 byte) datatype - little-endian in HW */
|
||||
typedef union efx_dword {
|
||||
__le32 u32[1];
|
||||
} efx_dword_t;
|
||||
|
||||
/* A quadword (i.e. 8 byte) datatype - little-endian in HW */
|
||||
typedef union efx_qword {
|
||||
__le64 u64[1];
|
||||
__le32 u32[2];
|
||||
efx_dword_t dword[2];
|
||||
} efx_qword_t;
|
||||
|
||||
/* An octword (eight-word, i.e. 16 byte) datatype - little-endian in HW */
|
||||
typedef union efx_oword {
|
||||
__le64 u64[2];
|
||||
efx_qword_t qword[2];
|
||||
__le32 u32[4];
|
||||
efx_dword_t dword[4];
|
||||
} efx_oword_t;
|
||||
|
||||
/* Format string and value expanders for printk */
|
||||
#define EFX_DWORD_FMT "%08x"
|
||||
#define EFX_QWORD_FMT "%08x:%08x"
|
||||
#define EFX_OWORD_FMT "%08x:%08x:%08x:%08x"
|
||||
#define EFX_DWORD_VAL(dword) \
|
||||
((unsigned int) le32_to_cpu((dword).u32[0]))
|
||||
#define EFX_QWORD_VAL(qword) \
|
||||
((unsigned int) le32_to_cpu((qword).u32[1])), \
|
||||
((unsigned int) le32_to_cpu((qword).u32[0]))
|
||||
#define EFX_OWORD_VAL(oword) \
|
||||
((unsigned int) le32_to_cpu((oword).u32[3])), \
|
||||
((unsigned int) le32_to_cpu((oword).u32[2])), \
|
||||
((unsigned int) le32_to_cpu((oword).u32[1])), \
|
||||
((unsigned int) le32_to_cpu((oword).u32[0]))
|
||||
|
||||
/*
|
||||
* Extract bit field portion [low,high) from the native-endian element
|
||||
* which contains bits [min,max).
|
||||
*
|
||||
* For example, suppose "element" represents the high 32 bits of a
|
||||
* 64-bit value, and we wish to extract the bits belonging to the bit
|
||||
* field occupying bits 28-45 of this 64-bit value.
|
||||
*
|
||||
* Then EFX_EXTRACT ( element, 32, 63, 28, 45 ) would give
|
||||
*
|
||||
* ( element ) << 4
|
||||
*
|
||||
* The result will contain the relevant bits filled in in the range
|
||||
* [0,high-low), with garbage in bits [high-low+1,...).
|
||||
*/
|
||||
#define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \
|
||||
((low) > (max) || (high) < (min) ? 0 : \
|
||||
(low) > (min) ? \
|
||||
(native_element) >> ((low) - (min)) : \
|
||||
(native_element) << ((min) - (low)))
|
||||
|
||||
/*
|
||||
* Extract bit field portion [low,high) from the 64-bit little-endian
|
||||
* element which contains bits [min,max)
|
||||
*/
|
||||
#define EFX_EXTRACT64(element, min, max, low, high) \
|
||||
EFX_EXTRACT_NATIVE(le64_to_cpu(element), min, max, low, high)
|
||||
|
||||
/*
|
||||
* Extract bit field portion [low,high) from the 32-bit little-endian
|
||||
* element which contains bits [min,max)
|
||||
*/
|
||||
#define EFX_EXTRACT32(element, min, max, low, high) \
|
||||
EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high)
|
||||
|
||||
#define EFX_EXTRACT_OWORD64(oword, low, high) \
|
||||
((EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \
|
||||
EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) & \
|
||||
EFX_MASK64((high) + 1 - (low)))
|
||||
|
||||
#define EFX_EXTRACT_QWORD64(qword, low, high) \
|
||||
(EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) & \
|
||||
EFX_MASK64((high) + 1 - (low)))
|
||||
|
||||
#define EFX_EXTRACT_OWORD32(oword, low, high) \
|
||||
((EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \
|
||||
EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \
|
||||
EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \
|
||||
EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) & \
|
||||
EFX_MASK32((high) + 1 - (low)))
|
||||
|
||||
#define EFX_EXTRACT_QWORD32(qword, low, high) \
|
||||
((EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \
|
||||
EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) & \
|
||||
EFX_MASK32((high) + 1 - (low)))
|
||||
|
||||
#define EFX_EXTRACT_DWORD(dword, low, high) \
|
||||
(EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) & \
|
||||
EFX_MASK32((high) + 1 - (low)))
|
||||
|
||||
#define EFX_OWORD_FIELD64(oword, field) \
|
||||
EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), \
|
||||
EFX_HIGH_BIT(field))
|
||||
|
||||
#define EFX_QWORD_FIELD64(qword, field) \
|
||||
EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), \
|
||||
EFX_HIGH_BIT(field))
|
||||
|
||||
#define EFX_OWORD_FIELD32(oword, field) \
|
||||
EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), \
|
||||
EFX_HIGH_BIT(field))
|
||||
|
||||
#define EFX_QWORD_FIELD32(qword, field) \
|
||||
EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), \
|
||||
EFX_HIGH_BIT(field))
|
||||
|
||||
#define EFX_DWORD_FIELD(dword, field) \
|
||||
EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), \
|
||||
EFX_HIGH_BIT(field))
|
||||
|
||||
#define EFX_OWORD_IS_ZERO64(oword) \
|
||||
(((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0)
|
||||
|
||||
#define EFX_QWORD_IS_ZERO64(qword) \
|
||||
(((qword).u64[0]) == (__force __le64) 0)
|
||||
|
||||
#define EFX_OWORD_IS_ZERO32(oword) \
|
||||
(((oword).u32[0] | (oword).u32[1] | (oword).u32[2] | (oword).u32[3]) \
|
||||
== (__force __le32) 0)
|
||||
|
||||
#define EFX_QWORD_IS_ZERO32(qword) \
|
||||
(((qword).u32[0] | (qword).u32[1]) == (__force __le32) 0)
|
||||
|
||||
#define EFX_DWORD_IS_ZERO(dword) \
|
||||
(((dword).u32[0]) == (__force __le32) 0)
|
||||
|
||||
#define EFX_OWORD_IS_ALL_ONES64(oword) \
|
||||
(((oword).u64[0] & (oword).u64[1]) == ~((__force __le64) 0))
|
||||
|
||||
#define EFX_QWORD_IS_ALL_ONES64(qword) \
|
||||
((qword).u64[0] == ~((__force __le64) 0))
|
||||
|
||||
#define EFX_OWORD_IS_ALL_ONES32(oword) \
|
||||
(((oword).u32[0] & (oword).u32[1] & (oword).u32[2] & (oword).u32[3]) \
|
||||
== ~((__force __le32) 0))
|
||||
|
||||
#define EFX_QWORD_IS_ALL_ONES32(qword) \
|
||||
(((qword).u32[0] & (qword).u32[1]) == ~((__force __le32) 0))
|
||||
|
||||
#define EFX_DWORD_IS_ALL_ONES(dword) \
|
||||
((dword).u32[0] == ~((__force __le32) 0))
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
#define EFX_OWORD_FIELD EFX_OWORD_FIELD64
|
||||
#define EFX_QWORD_FIELD EFX_QWORD_FIELD64
|
||||
#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64
|
||||
#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64
|
||||
#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES64
|
||||
#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES64
|
||||
#else
|
||||
#define EFX_OWORD_FIELD EFX_OWORD_FIELD32
|
||||
#define EFX_QWORD_FIELD EFX_QWORD_FIELD32
|
||||
#define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32
|
||||
#define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32
|
||||
#define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES32
|
||||
#define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES32
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Construct bit field portion
|
||||
*
|
||||
* Creates the portion of the bit field [low,high) that lies within
|
||||
* the range [min,max).
|
||||
*/
|
||||
#define EFX_INSERT_NATIVE64(min, max, low, high, value) \
|
||||
(((low > max) || (high < min)) ? 0 : \
|
||||
((low > min) ? \
|
||||
(((u64) (value)) << (low - min)) : \
|
||||
(((u64) (value)) >> (min - low))))
|
||||
|
||||
#define EFX_INSERT_NATIVE32(min, max, low, high, value) \
|
||||
(((low > max) || (high < min)) ? 0 : \
|
||||
((low > min) ? \
|
||||
(((u32) (value)) << (low - min)) : \
|
||||
(((u32) (value)) >> (min - low))))
|
||||
|
||||
#define EFX_INSERT_NATIVE(min, max, low, high, value) \
|
||||
((((max - min) >= 32) || ((high - low) >= 32)) ? \
|
||||
EFX_INSERT_NATIVE64(min, max, low, high, value) : \
|
||||
EFX_INSERT_NATIVE32(min, max, low, high, value))
|
||||
|
||||
/*
|
||||
* Construct bit field portion
|
||||
*
|
||||
* Creates the portion of the named bit field that lies within the
|
||||
* range [min,max).
|
||||
*/
|
||||
#define EFX_INSERT_FIELD_NATIVE(min, max, field, value) \
|
||||
EFX_INSERT_NATIVE(min, max, EFX_LOW_BIT(field), \
|
||||
EFX_HIGH_BIT(field), value)
|
||||
|
||||
/*
|
||||
* Construct bit field
|
||||
*
|
||||
* Creates the portion of the named bit fields that lie within the
|
||||
* range [min,max).
|
||||
*/
|
||||
#define EFX_INSERT_FIELDS_NATIVE(min, max, \
|
||||
field1, value1, \
|
||||
field2, value2, \
|
||||
field3, value3, \
|
||||
field4, value4, \
|
||||
field5, value5, \
|
||||
field6, value6, \
|
||||
field7, value7, \
|
||||
field8, value8, \
|
||||
field9, value9, \
|
||||
field10, value10, \
|
||||
field11, value11, \
|
||||
field12, value12, \
|
||||
field13, value13, \
|
||||
field14, value14, \
|
||||
field15, value15, \
|
||||
field16, value16, \
|
||||
field17, value17, \
|
||||
field18, value18, \
|
||||
field19, value19) \
|
||||
(EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field4, (value4)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field5, (value5)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field6, (value6)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field11, (value11)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field12, (value12)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field13, (value13)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field14, (value14)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field15, (value15)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field16, (value16)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field17, (value17)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field18, (value18)) | \
|
||||
EFX_INSERT_FIELD_NATIVE((min), (max), field19, (value19)))
|
||||
|
||||
#define EFX_INSERT_FIELDS64(...) \
|
||||
cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
|
||||
|
||||
#define EFX_INSERT_FIELDS32(...) \
|
||||
cpu_to_le32(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__))
|
||||
|
||||
#define EFX_POPULATE_OWORD64(oword, ...) do { \
|
||||
(oword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
|
||||
(oword).u64[1] = EFX_INSERT_FIELDS64(64, 127, __VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define EFX_POPULATE_QWORD64(qword, ...) do { \
|
||||
(qword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define EFX_POPULATE_OWORD32(oword, ...) do { \
|
||||
(oword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
|
||||
(oword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
|
||||
(oword).u32[2] = EFX_INSERT_FIELDS32(64, 95, __VA_ARGS__); \
|
||||
(oword).u32[3] = EFX_INSERT_FIELDS32(96, 127, __VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define EFX_POPULATE_QWORD32(qword, ...) do { \
|
||||
(qword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
|
||||
(qword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define EFX_POPULATE_DWORD(dword, ...) do { \
|
||||
(dword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64
|
||||
#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64
|
||||
#else
|
||||
#define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32
|
||||
#define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32
|
||||
#endif
|
||||
|
||||
/* Populate an octword field with various numbers of arguments */
|
||||
#define EFX_POPULATE_OWORD_19 EFX_POPULATE_OWORD
|
||||
#define EFX_POPULATE_OWORD_18(oword, ...) \
|
||||
EFX_POPULATE_OWORD_19(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_17(oword, ...) \
|
||||
EFX_POPULATE_OWORD_18(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_16(oword, ...) \
|
||||
EFX_POPULATE_OWORD_17(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_15(oword, ...) \
|
||||
EFX_POPULATE_OWORD_16(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_14(oword, ...) \
|
||||
EFX_POPULATE_OWORD_15(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_13(oword, ...) \
|
||||
EFX_POPULATE_OWORD_14(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_12(oword, ...) \
|
||||
EFX_POPULATE_OWORD_13(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_11(oword, ...) \
|
||||
EFX_POPULATE_OWORD_12(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_10(oword, ...) \
|
||||
EFX_POPULATE_OWORD_11(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_9(oword, ...) \
|
||||
EFX_POPULATE_OWORD_10(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_8(oword, ...) \
|
||||
EFX_POPULATE_OWORD_9(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_7(oword, ...) \
|
||||
EFX_POPULATE_OWORD_8(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_6(oword, ...) \
|
||||
EFX_POPULATE_OWORD_7(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_5(oword, ...) \
|
||||
EFX_POPULATE_OWORD_6(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_4(oword, ...) \
|
||||
EFX_POPULATE_OWORD_5(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_3(oword, ...) \
|
||||
EFX_POPULATE_OWORD_4(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_2(oword, ...) \
|
||||
EFX_POPULATE_OWORD_3(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_OWORD_1(oword, ...) \
|
||||
EFX_POPULATE_OWORD_2(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_ZERO_OWORD(oword) \
|
||||
EFX_POPULATE_OWORD_1(oword, EFX_DUMMY_FIELD, 0)
|
||||
#define EFX_SET_OWORD(oword) \
|
||||
EFX_POPULATE_OWORD_4(oword, \
|
||||
EFX_DWORD_0, 0xffffffff, \
|
||||
EFX_DWORD_1, 0xffffffff, \
|
||||
EFX_DWORD_2, 0xffffffff, \
|
||||
EFX_DWORD_3, 0xffffffff)
|
||||
|
||||
/* Populate a quadword field with various numbers of arguments */
|
||||
#define EFX_POPULATE_QWORD_19 EFX_POPULATE_QWORD
|
||||
#define EFX_POPULATE_QWORD_18(qword, ...) \
|
||||
EFX_POPULATE_QWORD_19(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_17(qword, ...) \
|
||||
EFX_POPULATE_QWORD_18(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_16(qword, ...) \
|
||||
EFX_POPULATE_QWORD_17(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_15(qword, ...) \
|
||||
EFX_POPULATE_QWORD_16(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_14(qword, ...) \
|
||||
EFX_POPULATE_QWORD_15(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_13(qword, ...) \
|
||||
EFX_POPULATE_QWORD_14(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_12(qword, ...) \
|
||||
EFX_POPULATE_QWORD_13(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_11(qword, ...) \
|
||||
EFX_POPULATE_QWORD_12(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_10(qword, ...) \
|
||||
EFX_POPULATE_QWORD_11(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_9(qword, ...) \
|
||||
EFX_POPULATE_QWORD_10(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_8(qword, ...) \
|
||||
EFX_POPULATE_QWORD_9(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_7(qword, ...) \
|
||||
EFX_POPULATE_QWORD_8(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_6(qword, ...) \
|
||||
EFX_POPULATE_QWORD_7(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_5(qword, ...) \
|
||||
EFX_POPULATE_QWORD_6(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_4(qword, ...) \
|
||||
EFX_POPULATE_QWORD_5(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_3(qword, ...) \
|
||||
EFX_POPULATE_QWORD_4(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_2(qword, ...) \
|
||||
EFX_POPULATE_QWORD_3(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_QWORD_1(qword, ...) \
|
||||
EFX_POPULATE_QWORD_2(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_ZERO_QWORD(qword) \
|
||||
EFX_POPULATE_QWORD_1(qword, EFX_DUMMY_FIELD, 0)
|
||||
#define EFX_SET_QWORD(qword) \
|
||||
EFX_POPULATE_QWORD_2(qword, \
|
||||
EFX_DWORD_0, 0xffffffff, \
|
||||
EFX_DWORD_1, 0xffffffff)
|
||||
|
||||
/* Populate a dword field with various numbers of arguments */
|
||||
#define EFX_POPULATE_DWORD_19 EFX_POPULATE_DWORD
|
||||
#define EFX_POPULATE_DWORD_18(dword, ...) \
|
||||
EFX_POPULATE_DWORD_19(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_17(dword, ...) \
|
||||
EFX_POPULATE_DWORD_18(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_16(dword, ...) \
|
||||
EFX_POPULATE_DWORD_17(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_15(dword, ...) \
|
||||
EFX_POPULATE_DWORD_16(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_14(dword, ...) \
|
||||
EFX_POPULATE_DWORD_15(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_13(dword, ...) \
|
||||
EFX_POPULATE_DWORD_14(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_12(dword, ...) \
|
||||
EFX_POPULATE_DWORD_13(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_11(dword, ...) \
|
||||
EFX_POPULATE_DWORD_12(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_10(dword, ...) \
|
||||
EFX_POPULATE_DWORD_11(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_9(dword, ...) \
|
||||
EFX_POPULATE_DWORD_10(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_8(dword, ...) \
|
||||
EFX_POPULATE_DWORD_9(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_7(dword, ...) \
|
||||
EFX_POPULATE_DWORD_8(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_6(dword, ...) \
|
||||
EFX_POPULATE_DWORD_7(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_5(dword, ...) \
|
||||
EFX_POPULATE_DWORD_6(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_4(dword, ...) \
|
||||
EFX_POPULATE_DWORD_5(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_3(dword, ...) \
|
||||
EFX_POPULATE_DWORD_4(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_2(dword, ...) \
|
||||
EFX_POPULATE_DWORD_3(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_POPULATE_DWORD_1(dword, ...) \
|
||||
EFX_POPULATE_DWORD_2(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__)
|
||||
#define EFX_ZERO_DWORD(dword) \
|
||||
EFX_POPULATE_DWORD_1(dword, EFX_DUMMY_FIELD, 0)
|
||||
#define EFX_SET_DWORD(dword) \
|
||||
EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xffffffff)
|
||||
|
||||
/*
|
||||
* Modify a named field within an already-populated structure. Used
|
||||
* for read-modify-write operations.
|
||||
*
|
||||
*/
|
||||
#define EFX_INVERT_OWORD(oword) do { \
|
||||
(oword).u64[0] = ~((oword).u64[0]); \
|
||||
(oword).u64[1] = ~((oword).u64[1]); \
|
||||
} while (0)
|
||||
|
||||
#define EFX_AND_OWORD(oword, from, mask) \
|
||||
do { \
|
||||
(oword).u64[0] = (from).u64[0] & (mask).u64[0]; \
|
||||
(oword).u64[1] = (from).u64[1] & (mask).u64[1]; \
|
||||
} while (0)
|
||||
|
||||
#define EFX_AND_QWORD(qword, from, mask) \
|
||||
(qword).u64[0] = (from).u64[0] & (mask).u64[0]
|
||||
|
||||
#define EFX_OR_OWORD(oword, from, mask) \
|
||||
do { \
|
||||
(oword).u64[0] = (from).u64[0] | (mask).u64[0]; \
|
||||
(oword).u64[1] = (from).u64[1] | (mask).u64[1]; \
|
||||
} while (0)
|
||||
|
||||
#define EFX_INSERT64(min, max, low, high, value) \
|
||||
cpu_to_le64(EFX_INSERT_NATIVE(min, max, low, high, value))
|
||||
|
||||
#define EFX_INSERT32(min, max, low, high, value) \
|
||||
cpu_to_le32(EFX_INSERT_NATIVE(min, max, low, high, value))
|
||||
|
||||
#define EFX_INPLACE_MASK64(min, max, low, high) \
|
||||
EFX_INSERT64(min, max, low, high, EFX_MASK64((high) + 1 - (low)))
|
||||
|
||||
#define EFX_INPLACE_MASK32(min, max, low, high) \
|
||||
EFX_INSERT32(min, max, low, high, EFX_MASK32((high) + 1 - (low)))
|
||||
|
||||
#define EFX_SET_OWORD64(oword, low, high, value) do { \
|
||||
(oword).u64[0] = (((oword).u64[0] \
|
||||
& ~EFX_INPLACE_MASK64(0, 63, low, high)) \
|
||||
| EFX_INSERT64(0, 63, low, high, value)); \
|
||||
(oword).u64[1] = (((oword).u64[1] \
|
||||
& ~EFX_INPLACE_MASK64(64, 127, low, high)) \
|
||||
| EFX_INSERT64(64, 127, low, high, value)); \
|
||||
} while (0)
|
||||
|
||||
#define EFX_SET_QWORD64(qword, low, high, value) do { \
|
||||
(qword).u64[0] = (((qword).u64[0] \
|
||||
& ~EFX_INPLACE_MASK64(0, 63, low, high)) \
|
||||
| EFX_INSERT64(0, 63, low, high, value)); \
|
||||
} while (0)
|
||||
|
||||
#define EFX_SET_OWORD32(oword, low, high, value) do { \
|
||||
(oword).u32[0] = (((oword).u32[0] \
|
||||
& ~EFX_INPLACE_MASK32(0, 31, low, high)) \
|
||||
| EFX_INSERT32(0, 31, low, high, value)); \
|
||||
(oword).u32[1] = (((oword).u32[1] \
|
||||
& ~EFX_INPLACE_MASK32(32, 63, low, high)) \
|
||||
| EFX_INSERT32(32, 63, low, high, value)); \
|
||||
(oword).u32[2] = (((oword).u32[2] \
|
||||
& ~EFX_INPLACE_MASK32(64, 95, low, high)) \
|
||||
| EFX_INSERT32(64, 95, low, high, value)); \
|
||||
(oword).u32[3] = (((oword).u32[3] \
|
||||
& ~EFX_INPLACE_MASK32(96, 127, low, high)) \
|
||||
| EFX_INSERT32(96, 127, low, high, value)); \
|
||||
} while (0)
|
||||
|
||||
#define EFX_SET_QWORD32(qword, low, high, value) do { \
|
||||
(qword).u32[0] = (((qword).u32[0] \
|
||||
& ~EFX_INPLACE_MASK32(0, 31, low, high)) \
|
||||
| EFX_INSERT32(0, 31, low, high, value)); \
|
||||
(qword).u32[1] = (((qword).u32[1] \
|
||||
& ~EFX_INPLACE_MASK32(32, 63, low, high)) \
|
||||
| EFX_INSERT32(32, 63, low, high, value)); \
|
||||
} while (0)
|
||||
|
||||
#define EFX_SET_DWORD32(dword, low, high, value) do { \
|
||||
(dword).u32[0] = (((dword).u32[0] \
|
||||
& ~EFX_INPLACE_MASK32(0, 31, low, high)) \
|
||||
| EFX_INSERT32(0, 31, low, high, value)); \
|
||||
} while (0)
|
||||
|
||||
#define EFX_SET_OWORD_FIELD64(oword, field, value) \
|
||||
EFX_SET_OWORD64(oword, EFX_LOW_BIT(field), \
|
||||
EFX_HIGH_BIT(field), value)
|
||||
|
||||
#define EFX_SET_QWORD_FIELD64(qword, field, value) \
|
||||
EFX_SET_QWORD64(qword, EFX_LOW_BIT(field), \
|
||||
EFX_HIGH_BIT(field), value)
|
||||
|
||||
#define EFX_SET_OWORD_FIELD32(oword, field, value) \
|
||||
EFX_SET_OWORD32(oword, EFX_LOW_BIT(field), \
|
||||
EFX_HIGH_BIT(field), value)
|
||||
|
||||
#define EFX_SET_QWORD_FIELD32(qword, field, value) \
|
||||
EFX_SET_QWORD32(qword, EFX_LOW_BIT(field), \
|
||||
EFX_HIGH_BIT(field), value)
|
||||
|
||||
#define EFX_SET_DWORD_FIELD(dword, field, value) \
|
||||
EFX_SET_DWORD32(dword, EFX_LOW_BIT(field), \
|
||||
EFX_HIGH_BIT(field), value)
|
||||
|
||||
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64
|
||||
#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64
|
||||
#else
|
||||
#define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32
|
||||
#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
|
||||
#endif
|
||||
|
||||
/* Used to avoid compiler warnings about shift range exceeding width
|
||||
* of the data types when dma_addr_t is only 32 bits wide.
|
||||
*/
|
||||
#define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t))
|
||||
#define EFX_DMA_TYPE_WIDTH(width) \
|
||||
(((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH)
|
||||
|
||||
|
||||
/* Static initialiser */
|
||||
#define EFX_OWORD32(a, b, c, d) \
|
||||
{ .u32 = { cpu_to_le32(a), cpu_to_le32(b), \
|
||||
cpu_to_le32(c), cpu_to_le32(d) } }
|
||||
|
||||
#endif /* EFX_BITFIELD_H */
|
1309
drivers/net/ethernet/sfc/siena/efx.c
Normal file
1309
drivers/net/ethernet/sfc/siena/efx.c
Normal file
File diff suppressed because it is too large
Load Diff
218
drivers/net/ethernet/sfc/siena/efx.h
Normal file
218
drivers/net/ethernet/sfc/siena/efx.h
Normal file
@ -0,0 +1,218 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2013 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#ifndef EFX_EFX_H
|
||||
#define EFX_EFX_H
|
||||
|
||||
#include <linux/indirect_call_wrapper.h>
|
||||
#include "net_driver.h"
|
||||
#include "filter.h"
|
||||
|
||||
/* TX */
|
||||
void efx_siena_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue);
|
||||
netdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb,
|
||||
struct net_device *net_dev);
|
||||
netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue,
|
||||
struct sk_buff *skb);
|
||||
static inline netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
|
||||
{
|
||||
return INDIRECT_CALL_1(tx_queue->efx->type->tx_enqueue,
|
||||
__efx_siena_enqueue_skb, tx_queue, skb);
|
||||
}
|
||||
int efx_siena_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
|
||||
void *type_data);
|
||||
|
||||
/* RX */
|
||||
void __efx_siena_rx_packet(struct efx_channel *channel);
|
||||
void efx_siena_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
|
||||
unsigned int n_frags, unsigned int len, u16 flags);
|
||||
static inline void efx_rx_flush_packet(struct efx_channel *channel)
|
||||
{
|
||||
if (channel->rx_pkt_n_frags)
|
||||
__efx_siena_rx_packet(channel);
|
||||
}
|
||||
|
||||
/* Maximum number of TCP segments we support for soft-TSO */
|
||||
#define EFX_TSO_MAX_SEGS 100
|
||||
|
||||
/* The smallest [rt]xq_entries that the driver supports. RX minimum
|
||||
* is a bit arbitrary. For TX, we must have space for at least 2
|
||||
* TSO skbs.
|
||||
*/
|
||||
#define EFX_RXQ_MIN_ENT 128U
|
||||
#define EFX_TXQ_MIN_ENT(efx) (2 * efx_siena_tx_max_skb_descs(efx))
|
||||
|
||||
/* All EF10 architecture NICs steal one bit of the DMAQ size for various
|
||||
* other purposes when counting TxQ entries, so we halve the queue size.
|
||||
*/
|
||||
#define EFX_TXQ_MAX_ENT(efx) (EFX_WORKAROUND_EF10(efx) ? \
|
||||
EFX_MAX_DMAQ_SIZE / 2 : EFX_MAX_DMAQ_SIZE)
|
||||
|
||||
static inline bool efx_rss_enabled(struct efx_nic *efx)
|
||||
{
|
||||
return efx->rss_spread > 1;
|
||||
}
|
||||
|
||||
/* Filters */
|
||||
|
||||
/**
|
||||
* efx_filter_insert_filter - add or replace a filter
|
||||
* @efx: NIC in which to insert the filter
|
||||
* @spec: Specification for the filter
|
||||
* @replace_equal: Flag for whether the specified filter may replace an
|
||||
* existing filter with equal priority
|
||||
*
|
||||
* On success, return the filter ID.
|
||||
* On failure, return a negative error code.
|
||||
*
|
||||
* If existing filters have equal match values to the new filter spec,
|
||||
* then the new filter might replace them or the function might fail,
|
||||
* as follows.
|
||||
*
|
||||
* 1. If the existing filters have lower priority, or @replace_equal
|
||||
* is set and they have equal priority, replace them.
|
||||
*
|
||||
* 2. If the existing filters have higher priority, return -%EPERM.
|
||||
*
|
||||
* 3. If !efx_siena_filter_is_mc_recipient(@spec), or the NIC does not
|
||||
* support delivery to multiple recipients, return -%EEXIST.
|
||||
*
|
||||
* This implies that filters for multiple multicast recipients must
|
||||
* all be inserted with the same priority and @replace_equal = %false.
|
||||
*/
|
||||
static inline s32 efx_filter_insert_filter(struct efx_nic *efx,
|
||||
struct efx_filter_spec *spec,
|
||||
bool replace_equal)
|
||||
{
|
||||
return efx->type->filter_insert(efx, spec, replace_equal);
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_filter_remove_id_safe - remove a filter by ID, carefully
|
||||
* @efx: NIC from which to remove the filter
|
||||
* @priority: Priority of filter, as passed to @efx_filter_insert_filter
|
||||
* @filter_id: ID of filter, as returned by @efx_filter_insert_filter
|
||||
*
|
||||
* This function will range-check @filter_id, so it is safe to call
|
||||
* with a value passed from userland.
|
||||
*/
|
||||
static inline int efx_filter_remove_id_safe(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority,
|
||||
u32 filter_id)
|
||||
{
|
||||
return efx->type->filter_remove_safe(efx, priority, filter_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_filter_get_filter_safe - retrieve a filter by ID, carefully
|
||||
* @efx: NIC from which to remove the filter
|
||||
* @priority: Priority of filter, as passed to @efx_filter_insert_filter
|
||||
* @filter_id: ID of filter, as returned by @efx_filter_insert_filter
|
||||
* @spec: Buffer in which to store filter specification
|
||||
*
|
||||
* This function will range-check @filter_id, so it is safe to call
|
||||
* with a value passed from userland.
|
||||
*/
|
||||
static inline int
|
||||
efx_filter_get_filter_safe(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority,
|
||||
u32 filter_id, struct efx_filter_spec *spec)
|
||||
{
|
||||
return efx->type->filter_get_safe(efx, priority, filter_id, spec);
|
||||
}
|
||||
|
||||
static inline u32 efx_filter_count_rx_used(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority)
|
||||
{
|
||||
return efx->type->filter_count_rx_used(efx, priority);
|
||||
}
|
||||
static inline u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
|
||||
{
|
||||
return efx->type->filter_get_rx_id_limit(efx);
|
||||
}
|
||||
static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority,
|
||||
u32 *buf, u32 size)
|
||||
{
|
||||
return efx->type->filter_get_rx_ids(efx, priority, buf, size);
|
||||
}
|
||||
|
||||
/* RSS contexts */
|
||||
static inline bool efx_rss_active(struct efx_rss_context *ctx)
|
||||
{
|
||||
return ctx->context_id != EFX_MCDI_RSS_CONTEXT_INVALID;
|
||||
}
|
||||
|
||||
/* Ethtool support */
|
||||
extern const struct ethtool_ops efx_siena_ethtool_ops;
|
||||
|
||||
/* Global */
|
||||
unsigned int efx_siena_usecs_to_ticks(struct efx_nic *efx, unsigned int usecs);
|
||||
int efx_siena_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
|
||||
unsigned int rx_usecs, bool rx_adaptive,
|
||||
bool rx_may_override_tx);
|
||||
void efx_siena_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
|
||||
unsigned int *rx_usecs, bool *rx_adaptive);
|
||||
|
||||
/* Update the generic software stats in the passed stats array */
|
||||
void efx_siena_update_sw_stats(struct efx_nic *efx, u64 *stats);
|
||||
|
||||
/* MTD */
|
||||
#ifdef CONFIG_SFC_MTD
|
||||
int efx_siena_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
|
||||
size_t n_parts, size_t sizeof_part);
|
||||
static inline int efx_mtd_probe(struct efx_nic *efx)
|
||||
{
|
||||
return efx->type->mtd_probe(efx);
|
||||
}
|
||||
void efx_siena_mtd_rename(struct efx_nic *efx);
|
||||
void efx_siena_mtd_remove(struct efx_nic *efx);
|
||||
#else
|
||||
static inline int efx_mtd_probe(struct efx_nic *efx) { return 0; }
|
||||
static inline void efx_siena_mtd_rename(struct efx_nic *efx) {}
|
||||
static inline void efx_siena_mtd_remove(struct efx_nic *efx) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SFC_SRIOV
|
||||
static inline unsigned int efx_vf_size(struct efx_nic *efx)
|
||||
{
|
||||
return 1 << efx->vi_scale;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void efx_device_detach_sync(struct efx_nic *efx)
|
||||
{
|
||||
struct net_device *dev = efx->net_dev;
|
||||
|
||||
/* Lock/freeze all TX queues so that we can be sure the
|
||||
* TX scheduler is stopped when we're done and before
|
||||
* netif_device_present() becomes false.
|
||||
*/
|
||||
netif_tx_lock_bh(dev);
|
||||
netif_device_detach(dev);
|
||||
netif_tx_unlock_bh(dev);
|
||||
}
|
||||
|
||||
static inline void efx_device_attach_if_not_resetting(struct efx_nic *efx)
|
||||
{
|
||||
if ((efx->state != STATE_DISABLED) && !efx->reset_pending)
|
||||
netif_device_attach(efx->net_dev);
|
||||
}
|
||||
|
||||
static inline bool efx_rwsem_assert_write_locked(struct rw_semaphore *sem)
|
||||
{
|
||||
if (WARN_ON(down_read_trylock(sem))) {
|
||||
up_read(sem);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
int efx_siena_xdp_tx_buffers(struct efx_nic *efx, int n,
|
||||
struct xdp_frame **xdpfs, bool flush);
|
||||
|
||||
#endif /* EFX_EFX_H */
|
1376
drivers/net/ethernet/sfc/siena/efx_channels.c
Normal file
1376
drivers/net/ethernet/sfc/siena/efx_channels.c
Normal file
File diff suppressed because it is too large
Load Diff
45
drivers/net/ethernet/sfc/siena/efx_channels.h
Normal file
45
drivers/net/ethernet/sfc/siena/efx_channels.h
Normal file
@ -0,0 +1,45 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2018 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_CHANNELS_H
|
||||
#define EFX_CHANNELS_H
|
||||
|
||||
extern unsigned int efx_siena_interrupt_mode;
|
||||
extern unsigned int efx_siena_rss_cpus;
|
||||
|
||||
int efx_siena_probe_interrupts(struct efx_nic *efx);
|
||||
void efx_siena_remove_interrupts(struct efx_nic *efx);
|
||||
int efx_siena_enable_interrupts(struct efx_nic *efx);
|
||||
void efx_siena_disable_interrupts(struct efx_nic *efx);
|
||||
|
||||
void efx_siena_set_interrupt_affinity(struct efx_nic *efx);
|
||||
void efx_siena_clear_interrupt_affinity(struct efx_nic *efx);
|
||||
|
||||
void efx_siena_start_eventq(struct efx_channel *channel);
|
||||
void efx_siena_stop_eventq(struct efx_channel *channel);
|
||||
|
||||
int efx_siena_realloc_channels(struct efx_nic *efx, u32 rxq_entries,
|
||||
u32 txq_entries);
|
||||
void efx_siena_set_channel_names(struct efx_nic *efx);
|
||||
int efx_siena_init_channels(struct efx_nic *efx);
|
||||
int efx_siena_probe_channels(struct efx_nic *efx);
|
||||
int efx_siena_set_channels(struct efx_nic *efx);
|
||||
void efx_siena_remove_channel(struct efx_channel *channel);
|
||||
void efx_siena_remove_channels(struct efx_nic *efx);
|
||||
void efx_siena_fini_channels(struct efx_nic *efx);
|
||||
void efx_siena_start_channels(struct efx_nic *efx);
|
||||
void efx_siena_stop_channels(struct efx_nic *efx);
|
||||
|
||||
void efx_siena_init_napi(struct efx_nic *efx);
|
||||
void efx_siena_fini_napi(struct efx_nic *efx);
|
||||
|
||||
void efx_siena_channel_dummy_op_void(struct efx_channel *channel);
|
||||
|
||||
#endif
|
1408
drivers/net/ethernet/sfc/siena/efx_common.c
Normal file
1408
drivers/net/ethernet/sfc/siena/efx_common.c
Normal file
File diff suppressed because it is too large
Load Diff
118
drivers/net/ethernet/sfc/siena/efx_common.h
Normal file
118
drivers/net/ethernet/sfc/siena/efx_common.h
Normal file
@ -0,0 +1,118 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2018 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_COMMON_H
|
||||
#define EFX_COMMON_H
|
||||
|
||||
int efx_siena_init_io(struct efx_nic *efx, int bar, dma_addr_t dma_mask,
|
||||
unsigned int mem_map_size);
|
||||
void efx_siena_fini_io(struct efx_nic *efx);
|
||||
int efx_siena_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev,
|
||||
struct net_device *net_dev);
|
||||
void efx_siena_fini_struct(struct efx_nic *efx);
|
||||
|
||||
#define EFX_MAX_DMAQ_SIZE 4096UL
|
||||
#define EFX_DEFAULT_DMAQ_SIZE 1024UL
|
||||
#define EFX_MIN_DMAQ_SIZE 512UL
|
||||
|
||||
#define EFX_MAX_EVQ_SIZE 16384UL
|
||||
#define EFX_MIN_EVQ_SIZE 512UL
|
||||
|
||||
void efx_siena_link_clear_advertising(struct efx_nic *efx);
|
||||
void efx_siena_link_set_wanted_fc(struct efx_nic *efx, u8 wanted_fc);
|
||||
|
||||
void efx_siena_start_all(struct efx_nic *efx);
|
||||
void efx_siena_stop_all(struct efx_nic *efx);
|
||||
|
||||
void efx_siena_net_stats(struct net_device *net_dev,
|
||||
struct rtnl_link_stats64 *stats);
|
||||
|
||||
int efx_siena_create_reset_workqueue(void);
|
||||
void efx_siena_queue_reset_work(struct efx_nic *efx);
|
||||
void efx_siena_flush_reset_workqueue(struct efx_nic *efx);
|
||||
void efx_siena_destroy_reset_workqueue(void);
|
||||
|
||||
void efx_siena_start_monitor(struct efx_nic *efx);
|
||||
|
||||
int __efx_siena_reconfigure_port(struct efx_nic *efx);
|
||||
int efx_siena_reconfigure_port(struct efx_nic *efx);
|
||||
|
||||
#define EFX_ASSERT_RESET_SERIALISED(efx) \
|
||||
do { \
|
||||
if ((efx->state == STATE_READY) || \
|
||||
(efx->state == STATE_RECOVERY) || \
|
||||
(efx->state == STATE_DISABLED)) \
|
||||
ASSERT_RTNL(); \
|
||||
} while (0)
|
||||
|
||||
int efx_siena_try_recovery(struct efx_nic *efx);
|
||||
void efx_siena_reset_down(struct efx_nic *efx, enum reset_type method);
|
||||
void efx_siena_watchdog(struct net_device *net_dev, unsigned int txqueue);
|
||||
int efx_siena_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
|
||||
int efx_siena_reset(struct efx_nic *efx, enum reset_type method);
|
||||
void efx_siena_schedule_reset(struct efx_nic *efx, enum reset_type type);
|
||||
|
||||
/* Dummy PHY ops for PHY drivers */
|
||||
int efx_siena_port_dummy_op_int(struct efx_nic *efx);
|
||||
void efx_siena_port_dummy_op_void(struct efx_nic *efx);
|
||||
|
||||
static inline int efx_check_disabled(struct efx_nic *efx)
|
||||
{
|
||||
if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"device is disabled due to earlier errors\n");
|
||||
return -EIO;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void efx_schedule_channel(struct efx_channel *channel)
|
||||
{
|
||||
netif_vdbg(channel->efx, intr, channel->efx->net_dev,
|
||||
"channel %d scheduling NAPI poll on CPU%d\n",
|
||||
channel->channel, raw_smp_processor_id());
|
||||
|
||||
napi_schedule(&channel->napi_str);
|
||||
}
|
||||
|
||||
static inline void efx_schedule_channel_irq(struct efx_channel *channel)
|
||||
{
|
||||
channel->event_test_cpu = raw_smp_processor_id();
|
||||
efx_schedule_channel(channel);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SFC_MCDI_LOGGING
|
||||
void efx_siena_init_mcdi_logging(struct efx_nic *efx);
|
||||
void efx_siena_fini_mcdi_logging(struct efx_nic *efx);
|
||||
#else
|
||||
static inline void efx_siena_init_mcdi_logging(struct efx_nic *efx) {}
|
||||
static inline void efx_siena_fini_mcdi_logging(struct efx_nic *efx) {}
|
||||
#endif
|
||||
|
||||
void efx_siena_mac_reconfigure(struct efx_nic *efx, bool mtu_only);
|
||||
int efx_siena_set_mac_address(struct net_device *net_dev, void *data);
|
||||
void efx_siena_set_rx_mode(struct net_device *net_dev);
|
||||
int efx_siena_set_features(struct net_device *net_dev, netdev_features_t data);
|
||||
void efx_siena_link_status_changed(struct efx_nic *efx);
|
||||
unsigned int efx_siena_xdp_max_mtu(struct efx_nic *efx);
|
||||
int efx_siena_change_mtu(struct net_device *net_dev, int new_mtu);
|
||||
|
||||
extern const struct pci_error_handlers efx_siena_err_handlers;
|
||||
|
||||
netdev_features_t efx_siena_features_check(struct sk_buff *skb,
|
||||
struct net_device *dev,
|
||||
netdev_features_t features);
|
||||
|
||||
int efx_siena_get_phys_port_id(struct net_device *net_dev,
|
||||
struct netdev_phys_item_id *ppid);
|
||||
|
||||
int efx_siena_get_phys_port_name(struct net_device *net_dev,
|
||||
char *name, size_t len);
|
||||
#endif
|
176
drivers/net/ethernet/sfc/siena/enum.h
Normal file
176
drivers/net/ethernet/sfc/siena/enum.h
Normal file
@ -0,0 +1,176 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2007-2013 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#ifndef EFX_ENUM_H
|
||||
#define EFX_ENUM_H
|
||||
|
||||
/**
|
||||
* enum efx_loopback_mode - loopback modes
|
||||
* @LOOPBACK_NONE: no loopback
|
||||
* @LOOPBACK_DATA: data path loopback
|
||||
* @LOOPBACK_GMAC: loopback within GMAC
|
||||
* @LOOPBACK_XGMII: loopback after XMAC
|
||||
* @LOOPBACK_XGXS: loopback within BPX after XGXS
|
||||
* @LOOPBACK_XAUI: loopback within BPX before XAUI serdes
|
||||
* @LOOPBACK_GMII: loopback within BPX after GMAC
|
||||
* @LOOPBACK_SGMII: loopback within BPX within SGMII
|
||||
* @LOOPBACK_XGBR: loopback within BPX within XGBR
|
||||
* @LOOPBACK_XFI: loopback within BPX before XFI serdes
|
||||
* @LOOPBACK_XAUI_FAR: loopback within BPX after XAUI serdes
|
||||
* @LOOPBACK_GMII_FAR: loopback within BPX before SGMII
|
||||
* @LOOPBACK_SGMII_FAR: loopback within BPX after SGMII
|
||||
* @LOOPBACK_XFI_FAR: loopback after XFI serdes
|
||||
* @LOOPBACK_GPHY: loopback within 1G PHY at unspecified level
|
||||
* @LOOPBACK_PHYXS: loopback within 10G PHY at PHYXS level
|
||||
* @LOOPBACK_PCS: loopback within 10G PHY at PCS level
|
||||
* @LOOPBACK_PMAPMD: loopback within 10G PHY at PMAPMD level
|
||||
* @LOOPBACK_XPORT: cross port loopback
|
||||
* @LOOPBACK_XGMII_WS: wireside loopback excluding XMAC
|
||||
* @LOOPBACK_XAUI_WS: wireside loopback within BPX within XAUI serdes
|
||||
* @LOOPBACK_XAUI_WS_FAR: wireside loopback within BPX including XAUI serdes
|
||||
* @LOOPBACK_XAUI_WS_NEAR: wireside loopback within BPX excluding XAUI serdes
|
||||
* @LOOPBACK_GMII_WS: wireside loopback excluding GMAC
|
||||
* @LOOPBACK_XFI_WS: wireside loopback excluding XFI serdes
|
||||
* @LOOPBACK_XFI_WS_FAR: wireside loopback including XFI serdes
|
||||
* @LOOPBACK_PHYXS_WS: wireside loopback within 10G PHY at PHYXS level
|
||||
*/
|
||||
/* Please keep up-to-date w.r.t the following two #defines */
|
||||
enum efx_loopback_mode {
|
||||
LOOPBACK_NONE = 0,
|
||||
LOOPBACK_DATA = 1,
|
||||
LOOPBACK_GMAC = 2,
|
||||
LOOPBACK_XGMII = 3,
|
||||
LOOPBACK_XGXS = 4,
|
||||
LOOPBACK_XAUI = 5,
|
||||
LOOPBACK_GMII = 6,
|
||||
LOOPBACK_SGMII = 7,
|
||||
LOOPBACK_XGBR = 8,
|
||||
LOOPBACK_XFI = 9,
|
||||
LOOPBACK_XAUI_FAR = 10,
|
||||
LOOPBACK_GMII_FAR = 11,
|
||||
LOOPBACK_SGMII_FAR = 12,
|
||||
LOOPBACK_XFI_FAR = 13,
|
||||
LOOPBACK_GPHY = 14,
|
||||
LOOPBACK_PHYXS = 15,
|
||||
LOOPBACK_PCS = 16,
|
||||
LOOPBACK_PMAPMD = 17,
|
||||
LOOPBACK_XPORT = 18,
|
||||
LOOPBACK_XGMII_WS = 19,
|
||||
LOOPBACK_XAUI_WS = 20,
|
||||
LOOPBACK_XAUI_WS_FAR = 21,
|
||||
LOOPBACK_XAUI_WS_NEAR = 22,
|
||||
LOOPBACK_GMII_WS = 23,
|
||||
LOOPBACK_XFI_WS = 24,
|
||||
LOOPBACK_XFI_WS_FAR = 25,
|
||||
LOOPBACK_PHYXS_WS = 26,
|
||||
LOOPBACK_MAX
|
||||
};
|
||||
#define LOOPBACK_TEST_MAX LOOPBACK_PMAPMD
|
||||
|
||||
/* These loopbacks occur within the controller */
|
||||
#define LOOPBACKS_INTERNAL ((1 << LOOPBACK_DATA) | \
|
||||
(1 << LOOPBACK_GMAC) | \
|
||||
(1 << LOOPBACK_XGMII)| \
|
||||
(1 << LOOPBACK_XGXS) | \
|
||||
(1 << LOOPBACK_XAUI) | \
|
||||
(1 << LOOPBACK_GMII) | \
|
||||
(1 << LOOPBACK_SGMII) | \
|
||||
(1 << LOOPBACK_XGBR) | \
|
||||
(1 << LOOPBACK_XFI) | \
|
||||
(1 << LOOPBACK_XAUI_FAR) | \
|
||||
(1 << LOOPBACK_GMII_FAR) | \
|
||||
(1 << LOOPBACK_SGMII_FAR) | \
|
||||
(1 << LOOPBACK_XFI_FAR) | \
|
||||
(1 << LOOPBACK_XGMII_WS) | \
|
||||
(1 << LOOPBACK_XAUI_WS) | \
|
||||
(1 << LOOPBACK_XAUI_WS_FAR) | \
|
||||
(1 << LOOPBACK_XAUI_WS_NEAR) | \
|
||||
(1 << LOOPBACK_GMII_WS) | \
|
||||
(1 << LOOPBACK_XFI_WS) | \
|
||||
(1 << LOOPBACK_XFI_WS_FAR))
|
||||
|
||||
#define LOOPBACKS_WS ((1 << LOOPBACK_XGMII_WS) | \
|
||||
(1 << LOOPBACK_XAUI_WS) | \
|
||||
(1 << LOOPBACK_XAUI_WS_FAR) | \
|
||||
(1 << LOOPBACK_XAUI_WS_NEAR) | \
|
||||
(1 << LOOPBACK_GMII_WS) | \
|
||||
(1 << LOOPBACK_XFI_WS) | \
|
||||
(1 << LOOPBACK_XFI_WS_FAR) | \
|
||||
(1 << LOOPBACK_PHYXS_WS))
|
||||
|
||||
#define LOOPBACKS_EXTERNAL(_efx) \
|
||||
((_efx)->loopback_modes & ~LOOPBACKS_INTERNAL & \
|
||||
~(1 << LOOPBACK_NONE))
|
||||
|
||||
#define LOOPBACK_MASK(_efx) \
|
||||
(1 << (_efx)->loopback_mode)
|
||||
|
||||
#define LOOPBACK_INTERNAL(_efx) \
|
||||
(!!(LOOPBACKS_INTERNAL & LOOPBACK_MASK(_efx)))
|
||||
|
||||
#define LOOPBACK_EXTERNAL(_efx) \
|
||||
(!!(LOOPBACK_MASK(_efx) & LOOPBACKS_EXTERNAL(_efx)))
|
||||
|
||||
#define LOOPBACK_CHANGED(_from, _to, _mask) \
|
||||
(!!((LOOPBACK_MASK(_from) ^ LOOPBACK_MASK(_to)) & (_mask)))
|
||||
|
||||
#define LOOPBACK_OUT_OF(_from, _to, _mask) \
|
||||
((LOOPBACK_MASK(_from) & (_mask)) && !(LOOPBACK_MASK(_to) & (_mask)))
|
||||
|
||||
/*****************************************************************************/
|
||||
|
||||
/**
|
||||
* enum reset_type - reset types
|
||||
*
|
||||
* %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and
|
||||
* %RESET_TYPE_DISABLE specify the method/scope of the reset. The
|
||||
* other valuesspecify reasons, which efx_siena_schedule_reset() will choose
|
||||
* a method for.
|
||||
*
|
||||
* Reset methods are numbered in order of increasing scope.
|
||||
*
|
||||
* @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only)
|
||||
* @RESET_TYPE_RECOVER_OR_ALL: Try to recover. Apply RESET_TYPE_ALL
|
||||
* if unsuccessful.
|
||||
* @RESET_TYPE_ALL: Reset datapath, MAC and PHY
|
||||
* @RESET_TYPE_WORLD: Reset as much as possible
|
||||
* @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
|
||||
* unsuccessful.
|
||||
* @RESET_TYPE_DATAPATH: Reset datapath only.
|
||||
* @RESET_TYPE_MC_BIST: MC entering BIST mode.
|
||||
* @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
|
||||
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
|
||||
* @RESET_TYPE_INT_ERROR: reset due to internal error
|
||||
* @RESET_TYPE_DMA_ERROR: DMA error
|
||||
* @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors
|
||||
* @RESET_TYPE_MC_FAILURE: MC reboot/assertion
|
||||
* @RESET_TYPE_MCDI_TIMEOUT: MCDI timeout.
|
||||
*/
|
||||
enum reset_type {
|
||||
RESET_TYPE_INVISIBLE,
|
||||
RESET_TYPE_RECOVER_OR_ALL,
|
||||
RESET_TYPE_ALL,
|
||||
RESET_TYPE_WORLD,
|
||||
RESET_TYPE_RECOVER_OR_DISABLE,
|
||||
RESET_TYPE_DATAPATH,
|
||||
RESET_TYPE_MC_BIST,
|
||||
RESET_TYPE_DISABLE,
|
||||
RESET_TYPE_MAX_METHOD,
|
||||
RESET_TYPE_TX_WATCHDOG,
|
||||
RESET_TYPE_INT_ERROR,
|
||||
RESET_TYPE_DMA_ERROR,
|
||||
RESET_TYPE_TX_SKIP,
|
||||
RESET_TYPE_MC_FAILURE,
|
||||
/* RESET_TYPE_MCDI_TIMEOUT is actually a method, not just a reason, but
|
||||
* it doesn't fit the scope hierarchy (not well-ordered by inclusion).
|
||||
* We encode this by having its enum value be greater than
|
||||
* RESET_TYPE_MAX_METHOD.
|
||||
*/
|
||||
RESET_TYPE_MCDI_TIMEOUT,
|
||||
RESET_TYPE_MAX,
|
||||
};
|
||||
|
||||
#endif /* EFX_ENUM_H */
|
282
drivers/net/ethernet/sfc/siena/ethtool.c
Normal file
282
drivers/net/ethernet/sfc/siena/ethtool.c
Normal file
@ -0,0 +1,282 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2013 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/in.h>
|
||||
#include "net_driver.h"
|
||||
#include "workarounds.h"
|
||||
#include "selftest.h"
|
||||
#include "efx.h"
|
||||
#include "efx_channels.h"
|
||||
#include "rx_common.h"
|
||||
#include "tx_common.h"
|
||||
#include "ethtool_common.h"
|
||||
#include "filter.h"
|
||||
#include "nic.h"
|
||||
|
||||
#define EFX_ETHTOOL_EEPROM_MAGIC 0xEFAB
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Ethtool operations
|
||||
*
|
||||
**************************************************************************
|
||||
*/
|
||||
|
||||
/* Identify device by flashing LEDs */
|
||||
static int efx_ethtool_phys_id(struct net_device *net_dev,
|
||||
enum ethtool_phys_id_state state)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
enum efx_led_mode mode = EFX_LED_DEFAULT;
|
||||
|
||||
switch (state) {
|
||||
case ETHTOOL_ID_ON:
|
||||
mode = EFX_LED_ON;
|
||||
break;
|
||||
case ETHTOOL_ID_OFF:
|
||||
mode = EFX_LED_OFF;
|
||||
break;
|
||||
case ETHTOOL_ID_INACTIVE:
|
||||
mode = EFX_LED_DEFAULT;
|
||||
break;
|
||||
case ETHTOOL_ID_ACTIVE:
|
||||
return 1; /* cycle on/off once per second */
|
||||
}
|
||||
|
||||
return efx_siena_mcdi_set_id_led(efx, mode);
|
||||
}
|
||||
|
||||
static int efx_ethtool_get_regs_len(struct net_device *net_dev)
|
||||
{
|
||||
return efx_siena_get_regs_len(netdev_priv(net_dev));
|
||||
}
|
||||
|
||||
static void efx_ethtool_get_regs(struct net_device *net_dev,
|
||||
struct ethtool_regs *regs, void *buf)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
regs->version = efx->type->revision;
|
||||
efx_siena_get_regs(efx, buf);
|
||||
}
|
||||
|
||||
/*
|
||||
* Each channel has a single IRQ and moderation timer, started by any
|
||||
* completion (or other event). Unless the module parameter
|
||||
* separate_tx_channels is set, IRQs and moderation are therefore
|
||||
* shared between RX and TX completions. In this case, when RX IRQ
|
||||
* moderation is explicitly changed then TX IRQ moderation is
|
||||
* automatically changed too, but otherwise we fail if the two values
|
||||
* are requested to be different.
|
||||
*
|
||||
* The hardware does not support a limit on the number of completions
|
||||
* before an IRQ, so we do not use the max_frames fields. We should
|
||||
* report and require that max_frames == (usecs != 0), but this would
|
||||
* invalidate existing user documentation.
|
||||
*
|
||||
* The hardware does not have distinct settings for interrupt
|
||||
* moderation while the previous IRQ is being handled, so we should
|
||||
* not use the 'irq' fields. However, an earlier developer
|
||||
* misunderstood the meaning of the 'irq' fields and the driver did
|
||||
* not support the standard fields. To avoid invalidating existing
|
||||
* user documentation, we report and accept changes through either the
|
||||
* standard or 'irq' fields. If both are changed at the same time, we
|
||||
* prefer the standard field.
|
||||
*
|
||||
* We implement adaptive IRQ moderation, but use a different algorithm
|
||||
* from that assumed in the definition of struct ethtool_coalesce.
|
||||
* Therefore we do not use any of the adaptive moderation parameters
|
||||
* in it.
|
||||
*/
|
||||
|
||||
static int efx_ethtool_get_coalesce(struct net_device *net_dev,
|
||||
struct ethtool_coalesce *coalesce,
|
||||
struct kernel_ethtool_coalesce *kernel_coal,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
unsigned int tx_usecs, rx_usecs;
|
||||
bool rx_adaptive;
|
||||
|
||||
efx_siena_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &rx_adaptive);
|
||||
|
||||
coalesce->tx_coalesce_usecs = tx_usecs;
|
||||
coalesce->tx_coalesce_usecs_irq = tx_usecs;
|
||||
coalesce->rx_coalesce_usecs = rx_usecs;
|
||||
coalesce->rx_coalesce_usecs_irq = rx_usecs;
|
||||
coalesce->use_adaptive_rx_coalesce = rx_adaptive;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int efx_ethtool_set_coalesce(struct net_device *net_dev,
|
||||
struct ethtool_coalesce *coalesce,
|
||||
struct kernel_ethtool_coalesce *kernel_coal,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
struct efx_channel *channel;
|
||||
unsigned int tx_usecs, rx_usecs;
|
||||
bool adaptive, rx_may_override_tx;
|
||||
int rc;
|
||||
|
||||
efx_siena_get_irq_moderation(efx, &tx_usecs, &rx_usecs, &adaptive);
|
||||
|
||||
if (coalesce->rx_coalesce_usecs != rx_usecs)
|
||||
rx_usecs = coalesce->rx_coalesce_usecs;
|
||||
else
|
||||
rx_usecs = coalesce->rx_coalesce_usecs_irq;
|
||||
|
||||
adaptive = coalesce->use_adaptive_rx_coalesce;
|
||||
|
||||
/* If channels are shared, TX IRQ moderation can be quietly
|
||||
* overridden unless it is changed from its old value.
|
||||
*/
|
||||
rx_may_override_tx = (coalesce->tx_coalesce_usecs == tx_usecs &&
|
||||
coalesce->tx_coalesce_usecs_irq == tx_usecs);
|
||||
if (coalesce->tx_coalesce_usecs != tx_usecs)
|
||||
tx_usecs = coalesce->tx_coalesce_usecs;
|
||||
else
|
||||
tx_usecs = coalesce->tx_coalesce_usecs_irq;
|
||||
|
||||
rc = efx_siena_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive,
|
||||
rx_may_override_tx);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
efx_for_each_channel(channel, efx)
|
||||
efx->type->push_irq_moderation(channel);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
efx_ethtool_get_ringparam(struct net_device *net_dev,
|
||||
struct ethtool_ringparam *ring,
|
||||
struct kernel_ethtool_ringparam *kernel_ring,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
ring->rx_max_pending = EFX_MAX_DMAQ_SIZE;
|
||||
ring->tx_max_pending = EFX_TXQ_MAX_ENT(efx);
|
||||
ring->rx_pending = efx->rxq_entries;
|
||||
ring->tx_pending = efx->txq_entries;
|
||||
}
|
||||
|
||||
static int
|
||||
efx_ethtool_set_ringparam(struct net_device *net_dev,
|
||||
struct ethtool_ringparam *ring,
|
||||
struct kernel_ethtool_ringparam *kernel_ring,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
u32 txq_entries;
|
||||
|
||||
if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
|
||||
ring->rx_pending > EFX_MAX_DMAQ_SIZE ||
|
||||
ring->tx_pending > EFX_TXQ_MAX_ENT(efx))
|
||||
return -EINVAL;
|
||||
|
||||
if (ring->rx_pending < EFX_RXQ_MIN_ENT) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"RX queues cannot be smaller than %u\n",
|
||||
EFX_RXQ_MIN_ENT);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx));
|
||||
if (txq_entries != ring->tx_pending)
|
||||
netif_warn(efx, drv, efx->net_dev,
|
||||
"increasing TX queue size to minimum of %u\n",
|
||||
txq_entries);
|
||||
|
||||
return efx_siena_realloc_channels(efx, ring->rx_pending, txq_entries);
|
||||
}
|
||||
|
||||
static void efx_ethtool_get_wol(struct net_device *net_dev,
|
||||
struct ethtool_wolinfo *wol)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
return efx->type->get_wol(efx, wol);
|
||||
}
|
||||
|
||||
|
||||
static int efx_ethtool_set_wol(struct net_device *net_dev,
|
||||
struct ethtool_wolinfo *wol)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
return efx->type->set_wol(efx, wol->wolopts);
|
||||
}
|
||||
|
||||
static void efx_ethtool_get_fec_stats(struct net_device *net_dev,
|
||||
struct ethtool_fec_stats *fec_stats)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
if (efx->type->get_fec_stats)
|
||||
efx->type->get_fec_stats(efx, fec_stats);
|
||||
}
|
||||
|
||||
static int efx_ethtool_get_ts_info(struct net_device *net_dev,
|
||||
struct ethtool_ts_info *ts_info)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
/* Software capabilities */
|
||||
ts_info->so_timestamping = (SOF_TIMESTAMPING_RX_SOFTWARE |
|
||||
SOF_TIMESTAMPING_SOFTWARE);
|
||||
ts_info->phc_index = -1;
|
||||
|
||||
efx_siena_ptp_get_ts_info(efx, ts_info);
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct ethtool_ops efx_siena_ethtool_ops = {
|
||||
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
|
||||
ETHTOOL_COALESCE_USECS_IRQ |
|
||||
ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
|
||||
.get_drvinfo = efx_siena_ethtool_get_drvinfo,
|
||||
.get_regs_len = efx_ethtool_get_regs_len,
|
||||
.get_regs = efx_ethtool_get_regs,
|
||||
.get_msglevel = efx_siena_ethtool_get_msglevel,
|
||||
.set_msglevel = efx_siena_ethtool_set_msglevel,
|
||||
.get_link = ethtool_op_get_link,
|
||||
.get_coalesce = efx_ethtool_get_coalesce,
|
||||
.set_coalesce = efx_ethtool_set_coalesce,
|
||||
.get_ringparam = efx_ethtool_get_ringparam,
|
||||
.set_ringparam = efx_ethtool_set_ringparam,
|
||||
.get_pauseparam = efx_siena_ethtool_get_pauseparam,
|
||||
.set_pauseparam = efx_siena_ethtool_set_pauseparam,
|
||||
.get_sset_count = efx_siena_ethtool_get_sset_count,
|
||||
.self_test = efx_siena_ethtool_self_test,
|
||||
.get_strings = efx_siena_ethtool_get_strings,
|
||||
.set_phys_id = efx_ethtool_phys_id,
|
||||
.get_ethtool_stats = efx_siena_ethtool_get_stats,
|
||||
.get_wol = efx_ethtool_get_wol,
|
||||
.set_wol = efx_ethtool_set_wol,
|
||||
.reset = efx_siena_ethtool_reset,
|
||||
.get_rxnfc = efx_siena_ethtool_get_rxnfc,
|
||||
.set_rxnfc = efx_siena_ethtool_set_rxnfc,
|
||||
.get_rxfh_indir_size = efx_siena_ethtool_get_rxfh_indir_size,
|
||||
.get_rxfh_key_size = efx_siena_ethtool_get_rxfh_key_size,
|
||||
.get_rxfh = efx_siena_ethtool_get_rxfh,
|
||||
.set_rxfh = efx_siena_ethtool_set_rxfh,
|
||||
.get_rxfh_context = efx_siena_ethtool_get_rxfh_context,
|
||||
.set_rxfh_context = efx_siena_ethtool_set_rxfh_context,
|
||||
.get_ts_info = efx_ethtool_get_ts_info,
|
||||
.get_module_info = efx_siena_ethtool_get_module_info,
|
||||
.get_module_eeprom = efx_siena_ethtool_get_module_eeprom,
|
||||
.get_link_ksettings = efx_siena_ethtool_get_link_ksettings,
|
||||
.set_link_ksettings = efx_siena_ethtool_set_link_ksettings,
|
||||
.get_fec_stats = efx_ethtool_get_fec_stats,
|
||||
.get_fecparam = efx_siena_ethtool_get_fecparam,
|
||||
.set_fecparam = efx_siena_ethtool_set_fecparam,
|
||||
};
|
1340
drivers/net/ethernet/sfc/siena/ethtool_common.c
Normal file
1340
drivers/net/ethernet/sfc/siena/ethtool_common.c
Normal file
File diff suppressed because it is too large
Load Diff
60
drivers/net/ethernet/sfc/siena/ethtool_common.h
Normal file
60
drivers/net/ethernet/sfc/siena/ethtool_common.h
Normal file
@ -0,0 +1,60 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2019 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_ETHTOOL_COMMON_H
|
||||
#define EFX_ETHTOOL_COMMON_H
|
||||
|
||||
void efx_siena_ethtool_get_drvinfo(struct net_device *net_dev,
|
||||
struct ethtool_drvinfo *info);
|
||||
u32 efx_siena_ethtool_get_msglevel(struct net_device *net_dev);
|
||||
void efx_siena_ethtool_set_msglevel(struct net_device *net_dev, u32 msg_enable);
|
||||
void efx_siena_ethtool_self_test(struct net_device *net_dev,
|
||||
struct ethtool_test *test, u64 *data);
|
||||
void efx_siena_ethtool_get_pauseparam(struct net_device *net_dev,
|
||||
struct ethtool_pauseparam *pause);
|
||||
int efx_siena_ethtool_set_pauseparam(struct net_device *net_dev,
|
||||
struct ethtool_pauseparam *pause);
|
||||
int efx_siena_ethtool_get_sset_count(struct net_device *net_dev, int string_set);
|
||||
void efx_siena_ethtool_get_strings(struct net_device *net_dev, u32 string_set,
|
||||
u8 *strings);
|
||||
void efx_siena_ethtool_get_stats(struct net_device *net_dev,
|
||||
struct ethtool_stats *stats __always_unused,
|
||||
u64 *data);
|
||||
int efx_siena_ethtool_get_link_ksettings(struct net_device *net_dev,
|
||||
struct ethtool_link_ksettings *out);
|
||||
int efx_siena_ethtool_set_link_ksettings(struct net_device *net_dev,
|
||||
const struct ethtool_link_ksettings *settings);
|
||||
int efx_siena_ethtool_get_fecparam(struct net_device *net_dev,
|
||||
struct ethtool_fecparam *fecparam);
|
||||
int efx_siena_ethtool_set_fecparam(struct net_device *net_dev,
|
||||
struct ethtool_fecparam *fecparam);
|
||||
int efx_siena_ethtool_get_rxnfc(struct net_device *net_dev,
|
||||
struct ethtool_rxnfc *info, u32 *rule_locs);
|
||||
int efx_siena_ethtool_set_rxnfc(struct net_device *net_dev,
|
||||
struct ethtool_rxnfc *info);
|
||||
u32 efx_siena_ethtool_get_rxfh_indir_size(struct net_device *net_dev);
|
||||
u32 efx_siena_ethtool_get_rxfh_key_size(struct net_device *net_dev);
|
||||
int efx_siena_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
|
||||
u8 *hfunc);
|
||||
int efx_siena_ethtool_set_rxfh(struct net_device *net_dev,
|
||||
const u32 *indir, const u8 *key, const u8 hfunc);
|
||||
int efx_siena_ethtool_get_rxfh_context(struct net_device *net_dev, u32 *indir,
|
||||
u8 *key, u8 *hfunc, u32 rss_context);
|
||||
int efx_siena_ethtool_set_rxfh_context(struct net_device *net_dev,
|
||||
const u32 *indir, const u8 *key,
|
||||
const u8 hfunc, u32 *rss_context,
|
||||
bool delete);
|
||||
int efx_siena_ethtool_reset(struct net_device *net_dev, u32 *flags);
|
||||
int efx_siena_ethtool_get_module_eeprom(struct net_device *net_dev,
|
||||
struct ethtool_eeprom *ee,
|
||||
u8 *data);
|
||||
int efx_siena_ethtool_get_module_info(struct net_device *net_dev,
|
||||
struct ethtool_modinfo *modinfo);
|
||||
#endif
|
@ -233,7 +233,7 @@ static int efx_alloc_special_buffer(struct efx_nic *efx,
|
||||
#endif
|
||||
len = ALIGN(len, EFX_BUF_SIZE);
|
||||
|
||||
if (efx_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
|
||||
if (efx_siena_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
buffer->entries = len / EFX_BUF_SIZE;
|
||||
BUG_ON(buffer->buf.dma_addr & (EFX_BUF_SIZE - 1));
|
||||
@ -269,7 +269,7 @@ efx_free_special_buffer(struct efx_nic *efx, struct efx_special_buffer *buffer)
|
||||
(u64)buffer->buf.dma_addr, buffer->buf.len,
|
||||
buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
|
||||
|
||||
efx_nic_free_buffer(efx, &buffer->buf);
|
||||
efx_siena_free_buffer(efx, &buffer->buf);
|
||||
buffer->entries = 0;
|
||||
}
|
||||
|
||||
@ -667,7 +667,7 @@ static int efx_farch_do_flush(struct efx_nic *efx)
|
||||
* completion). If that fails, fall back to the old scheme.
|
||||
*/
|
||||
if (efx_siena_sriov_enabled(efx)) {
|
||||
rc = efx_mcdi_flush_rxqs(efx);
|
||||
rc = efx_siena_mcdi_flush_rxqs(efx);
|
||||
if (!rc)
|
||||
goto wait;
|
||||
}
|
||||
@ -747,12 +747,13 @@ int efx_farch_fini_dmaq(struct efx_nic *efx)
|
||||
* completion events. This means that efx->rxq_flush_outstanding remained at 4
|
||||
* after the FLR; also, efx->active_queues was non-zero (as no flush completion
|
||||
* events were received, and we didn't go through efx_check_tx_flush_complete())
|
||||
* If we don't fix this up, on the next call to efx_realloc_channels() we won't
|
||||
* flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
|
||||
* for batched flush requests; and the efx->active_queues gets messed up because
|
||||
* we keep incrementing for the newly initialised queues, but it never went to
|
||||
* zero previously. Then we get a timeout every time we try to restart the
|
||||
* queues, as it doesn't go back to zero when we should be flushing the queues.
|
||||
* If we don't fix this up, on the next call to efx_siena_realloc_channels() we
|
||||
* won't flush any RX queues because efx->rxq_flush_outstanding is at the limit
|
||||
* of 4 for batched flush requests; and the efx->active_queues gets messed up
|
||||
* because we keep incrementing for the newly initialised queues, but it never
|
||||
* went to zero previously. Then we get a timeout every time we try to restart
|
||||
* the queues, as it doesn't go back to zero when we should be flushing the
|
||||
* queues.
|
||||
*/
|
||||
void efx_farch_finish_flr(struct efx_nic *efx)
|
||||
{
|
||||
@ -838,7 +839,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
|
||||
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
|
||||
tx_queue = channel->tx_queue +
|
||||
(tx_ev_q_label % EFX_MAX_TXQ_PER_CHANNEL);
|
||||
efx_xmit_done(tx_queue, tx_ev_desc_ptr);
|
||||
efx_siena_xmit_done(tx_queue, tx_ev_desc_ptr);
|
||||
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
|
||||
/* Rewrite the FIFO write pointer */
|
||||
tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
|
||||
@ -849,7 +850,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
|
||||
efx_farch_notify_tx_desc(tx_queue);
|
||||
netif_tx_unlock(efx->net_dev);
|
||||
} else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
|
||||
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
|
||||
efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
|
||||
} else {
|
||||
netif_err(efx, tx_err, efx->net_dev,
|
||||
"channel %d unexpected TX event "
|
||||
@ -956,7 +957,7 @@ efx_farch_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
|
||||
"dropped %d events (index=%d expected=%d)\n",
|
||||
dropped, index, expected);
|
||||
|
||||
efx_schedule_reset(efx, RESET_TYPE_DISABLE);
|
||||
efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -1001,7 +1002,7 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
|
||||
|
||||
/* Discard all pending fragments */
|
||||
if (rx_queue->scatter_n) {
|
||||
efx_rx_packet(
|
||||
efx_siena_rx_packet(
|
||||
rx_queue,
|
||||
rx_queue->removed_count & rx_queue->ptr_mask,
|
||||
rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
|
||||
@ -1015,7 +1016,7 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
|
||||
|
||||
/* Discard new fragment if not SOP */
|
||||
if (!rx_ev_sop) {
|
||||
efx_rx_packet(
|
||||
efx_siena_rx_packet(
|
||||
rx_queue,
|
||||
rx_queue->removed_count & rx_queue->ptr_mask,
|
||||
1, 0, EFX_RX_PKT_DISCARD);
|
||||
@ -1067,9 +1068,9 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
|
||||
channel->irq_mod_score += 2;
|
||||
|
||||
/* Handle received packet */
|
||||
efx_rx_packet(rx_queue,
|
||||
rx_queue->removed_count & rx_queue->ptr_mask,
|
||||
rx_queue->scatter_n, rx_ev_byte_cnt, flags);
|
||||
efx_siena_rx_packet(rx_queue,
|
||||
rx_queue->removed_count & rx_queue->ptr_mask,
|
||||
rx_queue->scatter_n, rx_ev_byte_cnt, flags);
|
||||
rx_queue->removed_count += rx_queue->scatter_n;
|
||||
rx_queue->scatter_n = 0;
|
||||
}
|
||||
@ -1159,7 +1160,7 @@ static void efx_farch_handle_generated_event(struct efx_channel *channel,
|
||||
/* The queue must be empty, so we won't receive any rx
|
||||
* events, so efx_process_channel() won't refill the
|
||||
* queue. Refill it here */
|
||||
efx_fast_push_rx_descriptors(rx_queue, true);
|
||||
efx_siena_fast_push_rx_descriptors(rx_queue, true);
|
||||
} else if (rx_queue && magic == EFX_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
|
||||
efx_farch_handle_drain_event(channel);
|
||||
} else if (code == _EFX_CHANNEL_MAGIC_TX_DRAIN) {
|
||||
@ -1222,7 +1223,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
|
||||
"channel %d seen DRIVER RX_RESET event. "
|
||||
"Resetting.\n", channel->channel);
|
||||
atomic_inc(&efx->rx_reset);
|
||||
efx_schedule_reset(efx, RESET_TYPE_DISABLE);
|
||||
efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
|
||||
break;
|
||||
case FSE_BZ_RX_DSC_ERROR_EV:
|
||||
if (ev_sub_data < EFX_VI_BASE) {
|
||||
@ -1230,7 +1231,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
|
||||
"RX DMA Q %d reports descriptor fetch error."
|
||||
" RX Q %d is disabled.\n", ev_sub_data,
|
||||
ev_sub_data);
|
||||
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
|
||||
efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
|
||||
}
|
||||
#ifdef CONFIG_SFC_SRIOV
|
||||
else
|
||||
@ -1243,7 +1244,7 @@ efx_farch_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
|
||||
"TX DMA Q %d reports descriptor fetch error."
|
||||
" TX Q %d is disabled.\n", ev_sub_data,
|
||||
ev_sub_data);
|
||||
efx_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
|
||||
efx_siena_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
|
||||
}
|
||||
#ifdef CONFIG_SFC_SRIOV
|
||||
else
|
||||
@ -1312,7 +1313,7 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
|
||||
break;
|
||||
#endif
|
||||
case FSE_CZ_EV_CODE_MCDI_EV:
|
||||
efx_mcdi_process_event(channel, &event);
|
||||
efx_siena_mcdi_process_event(channel, &event);
|
||||
break;
|
||||
case FSE_AZ_EV_CODE_GLOBAL_EV:
|
||||
if (efx->type->handle_global_event &&
|
||||
@ -1496,12 +1497,12 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx)
|
||||
if (++efx->int_error_count < EFX_MAX_INT_ERRORS) {
|
||||
netif_err(efx, hw, efx->net_dev,
|
||||
"SYSTEM ERROR - reset scheduled\n");
|
||||
efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
|
||||
efx_siena_schedule_reset(efx, RESET_TYPE_INT_ERROR);
|
||||
} else {
|
||||
netif_err(efx, hw, efx->net_dev,
|
||||
"SYSTEM ERROR - max number of errors seen."
|
||||
"NIC will be disabled\n");
|
||||
efx_schedule_reset(efx, RESET_TYPE_DISABLE);
|
||||
efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@ -1529,7 +1530,7 @@ irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id)
|
||||
* code. Disable them earlier.
|
||||
* If an EEH error occurred, the read will have returned all ones.
|
||||
*/
|
||||
if (EFX_DWORD_IS_ALL_ONES(reg) && efx_try_recovery(efx) &&
|
||||
if (EFX_DWORD_IS_ALL_ONES(reg) && efx_siena_try_recovery(efx) &&
|
||||
!efx->eeh_disabled_legacy_irq) {
|
||||
disable_irq_nosync(efx->legacy_irq);
|
||||
efx->eeh_disabled_legacy_irq = true;
|
||||
@ -2924,13 +2925,14 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
|
||||
*/
|
||||
arfs_id = 0;
|
||||
} else {
|
||||
rule = efx_rps_hash_find(efx, &spec);
|
||||
rule = efx_siena_rps_hash_find(efx, &spec);
|
||||
if (!rule) {
|
||||
/* ARFS table doesn't know of this filter, remove it */
|
||||
force = true;
|
||||
} else {
|
||||
arfs_id = rule->arfs_id;
|
||||
if (!efx_rps_check_rule(rule, index, &force))
|
||||
if (!efx_siena_rps_check_rule(rule, index,
|
||||
&force))
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
@ -2938,7 +2940,7 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
|
||||
flow_id, arfs_id)) {
|
||||
if (rule)
|
||||
rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING;
|
||||
efx_rps_hash_del(efx, &spec);
|
||||
efx_siena_rps_hash_del(efx, &spec);
|
||||
efx_farch_filter_table_clear_entry(efx, table, index);
|
||||
ret = true;
|
||||
}
|
2929
drivers/net/ethernet/sfc/siena/farch_regs.h
Normal file
2929
drivers/net/ethernet/sfc/siena/farch_regs.h
Normal file
File diff suppressed because it is too large
Load Diff
309
drivers/net/ethernet/sfc/siena/filter.h
Normal file
309
drivers/net/ethernet/sfc/siena/filter.h
Normal file
@ -0,0 +1,309 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2013 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#ifndef EFX_FILTER_H
|
||||
#define EFX_FILTER_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
/**
|
||||
* enum efx_filter_match_flags - Flags for hardware filter match type
|
||||
* @EFX_FILTER_MATCH_REM_HOST: Match by remote IP host address
|
||||
* @EFX_FILTER_MATCH_LOC_HOST: Match by local IP host address
|
||||
* @EFX_FILTER_MATCH_REM_MAC: Match by remote MAC address
|
||||
* @EFX_FILTER_MATCH_REM_PORT: Match by remote TCP/UDP port
|
||||
* @EFX_FILTER_MATCH_LOC_MAC: Match by local MAC address
|
||||
* @EFX_FILTER_MATCH_LOC_PORT: Match by local TCP/UDP port
|
||||
* @EFX_FILTER_MATCH_ETHER_TYPE: Match by Ether-type
|
||||
* @EFX_FILTER_MATCH_INNER_VID: Match by inner VLAN ID
|
||||
* @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID
|
||||
* @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol
|
||||
* @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit.
|
||||
* @EFX_FILTER_MATCH_ENCAP_TYPE: Match by encapsulation type.
|
||||
* Used for RX default unicast and multicast/broadcast filters.
|
||||
*
|
||||
* Only some combinations are supported, depending on NIC type:
|
||||
*
|
||||
* - Falcon supports RX filters matching by {TCP,UDP}/IPv4 4-tuple or
|
||||
* local 2-tuple (only implemented for Falcon B0)
|
||||
*
|
||||
* - Siena supports RX and TX filters matching by {TCP,UDP}/IPv4 4-tuple
|
||||
* or local 2-tuple, or local MAC with or without outer VID, and RX
|
||||
* default filters
|
||||
*
|
||||
* - Huntington supports filter matching controlled by firmware, potentially
|
||||
* using {TCP,UDP}/IPv{4,6} 4-tuple or local 2-tuple, local MAC or I/G bit,
|
||||
* with or without outer and inner VID
|
||||
*/
|
||||
enum efx_filter_match_flags {
|
||||
EFX_FILTER_MATCH_REM_HOST = 0x0001,
|
||||
EFX_FILTER_MATCH_LOC_HOST = 0x0002,
|
||||
EFX_FILTER_MATCH_REM_MAC = 0x0004,
|
||||
EFX_FILTER_MATCH_REM_PORT = 0x0008,
|
||||
EFX_FILTER_MATCH_LOC_MAC = 0x0010,
|
||||
EFX_FILTER_MATCH_LOC_PORT = 0x0020,
|
||||
EFX_FILTER_MATCH_ETHER_TYPE = 0x0040,
|
||||
EFX_FILTER_MATCH_INNER_VID = 0x0080,
|
||||
EFX_FILTER_MATCH_OUTER_VID = 0x0100,
|
||||
EFX_FILTER_MATCH_IP_PROTO = 0x0200,
|
||||
EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400,
|
||||
EFX_FILTER_MATCH_ENCAP_TYPE = 0x0800,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum efx_filter_priority - priority of a hardware filter specification
|
||||
* @EFX_FILTER_PRI_HINT: Performance hint
|
||||
* @EFX_FILTER_PRI_AUTO: Automatic filter based on device address list
|
||||
* or hardware requirements. This may only be used by the filter
|
||||
* implementation for each NIC type.
|
||||
* @EFX_FILTER_PRI_MANUAL: Manually configured filter
|
||||
* @EFX_FILTER_PRI_REQUIRED: Required for correct behaviour (user-level
|
||||
* networking and SR-IOV)
|
||||
*/
|
||||
enum efx_filter_priority {
|
||||
EFX_FILTER_PRI_HINT = 0,
|
||||
EFX_FILTER_PRI_AUTO,
|
||||
EFX_FILTER_PRI_MANUAL,
|
||||
EFX_FILTER_PRI_REQUIRED,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum efx_filter_flags - flags for hardware filter specifications
|
||||
* @EFX_FILTER_FLAG_RX_RSS: Use RSS to spread across multiple queues.
|
||||
* By default, matching packets will be delivered only to the
|
||||
* specified queue. If this flag is set, they will be delivered
|
||||
* to a range of queues offset from the specified queue number
|
||||
* according to the indirection table.
|
||||
* @EFX_FILTER_FLAG_RX_SCATTER: Enable DMA scatter on the receiving
|
||||
* queue.
|
||||
* @EFX_FILTER_FLAG_RX_OVER_AUTO: Indicates a filter that is
|
||||
* overriding an automatic filter (priority
|
||||
* %EFX_FILTER_PRI_AUTO). This may only be set by the filter
|
||||
* implementation for each type. A removal request will restore
|
||||
* the automatic filter in its place.
|
||||
* @EFX_FILTER_FLAG_RX: Filter is for RX
|
||||
* @EFX_FILTER_FLAG_TX: Filter is for TX
|
||||
*/
|
||||
enum efx_filter_flags {
|
||||
EFX_FILTER_FLAG_RX_RSS = 0x01,
|
||||
EFX_FILTER_FLAG_RX_SCATTER = 0x02,
|
||||
EFX_FILTER_FLAG_RX_OVER_AUTO = 0x04,
|
||||
EFX_FILTER_FLAG_RX = 0x08,
|
||||
EFX_FILTER_FLAG_TX = 0x10,
|
||||
};
|
||||
|
||||
/** enum efx_encap_type - types of encapsulation
|
||||
* @EFX_ENCAP_TYPE_NONE: no encapsulation
|
||||
* @EFX_ENCAP_TYPE_VXLAN: VXLAN encapsulation
|
||||
* @EFX_ENCAP_TYPE_NVGRE: NVGRE encapsulation
|
||||
* @EFX_ENCAP_TYPE_GENEVE: GENEVE encapsulation
|
||||
* @EFX_ENCAP_FLAG_IPV6: indicates IPv6 outer frame
|
||||
*
|
||||
* Contains both enumerated types and flags.
|
||||
* To get just the type, OR with @EFX_ENCAP_TYPES_MASK.
|
||||
*/
|
||||
enum efx_encap_type {
|
||||
EFX_ENCAP_TYPE_NONE = 0,
|
||||
EFX_ENCAP_TYPE_VXLAN = 1,
|
||||
EFX_ENCAP_TYPE_NVGRE = 2,
|
||||
EFX_ENCAP_TYPE_GENEVE = 3,
|
||||
|
||||
EFX_ENCAP_TYPES_MASK = 7,
|
||||
EFX_ENCAP_FLAG_IPV6 = 8,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct efx_filter_spec - specification for a hardware filter
|
||||
* @match_flags: Match type flags, from &enum efx_filter_match_flags
|
||||
* @priority: Priority of the filter, from &enum efx_filter_priority
|
||||
* @flags: Miscellaneous flags, from &enum efx_filter_flags
|
||||
* @rss_context: RSS context to use, if %EFX_FILTER_FLAG_RX_RSS is set. This
|
||||
* is a user_id (with 0 meaning the driver/default RSS context), not an
|
||||
* MCFW context_id.
|
||||
* @dmaq_id: Source/target queue index, or %EFX_FILTER_RX_DMAQ_ID_DROP for
|
||||
* an RX drop filter
|
||||
* @outer_vid: Outer VLAN ID to match, if %EFX_FILTER_MATCH_OUTER_VID is set
|
||||
* @inner_vid: Inner VLAN ID to match, if %EFX_FILTER_MATCH_INNER_VID is set
|
||||
* @loc_mac: Local MAC address to match, if %EFX_FILTER_MATCH_LOC_MAC or
|
||||
* %EFX_FILTER_MATCH_LOC_MAC_IG is set
|
||||
* @rem_mac: Remote MAC address to match, if %EFX_FILTER_MATCH_REM_MAC is set
|
||||
* @ether_type: Ether-type to match, if %EFX_FILTER_MATCH_ETHER_TYPE is set
|
||||
* @ip_proto: IP transport protocol to match, if %EFX_FILTER_MATCH_IP_PROTO
|
||||
* is set
|
||||
* @loc_host: Local IP host to match, if %EFX_FILTER_MATCH_LOC_HOST is set
|
||||
* @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set
|
||||
* @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set
|
||||
* @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set
|
||||
* @encap_type: Encapsulation type to match (from &enum efx_encap_type), if
|
||||
* %EFX_FILTER_MATCH_ENCAP_TYPE is set
|
||||
*
|
||||
* The efx_filter_init_rx() or efx_filter_init_tx() function *must* be
|
||||
* used to initialise the structure. The efx_filter_set_*() functions
|
||||
* may then be used to set @rss_context, @match_flags and related
|
||||
* fields.
|
||||
*
|
||||
* The @priority field is used by software to determine whether a new
|
||||
* filter may replace an old one. The hardware priority of a filter
|
||||
* depends on which fields are matched.
|
||||
*/
|
||||
struct efx_filter_spec {
|
||||
u32 match_flags:12;
|
||||
u32 priority:2;
|
||||
u32 flags:6;
|
||||
u32 dmaq_id:12;
|
||||
u32 rss_context;
|
||||
__be16 outer_vid __aligned(4); /* allow jhash2() of match values */
|
||||
__be16 inner_vid;
|
||||
u8 loc_mac[ETH_ALEN];
|
||||
u8 rem_mac[ETH_ALEN];
|
||||
__be16 ether_type;
|
||||
u8 ip_proto;
|
||||
__be32 loc_host[4];
|
||||
__be32 rem_host[4];
|
||||
__be16 loc_port;
|
||||
__be16 rem_port;
|
||||
u32 encap_type:4;
|
||||
/* total 65 bytes */
|
||||
};
|
||||
|
||||
enum {
|
||||
EFX_FILTER_RX_DMAQ_ID_DROP = 0xfff
|
||||
};
|
||||
|
||||
static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
|
||||
enum efx_filter_priority priority,
|
||||
enum efx_filter_flags flags,
|
||||
unsigned rxq_id)
|
||||
{
|
||||
memset(spec, 0, sizeof(*spec));
|
||||
spec->priority = priority;
|
||||
spec->flags = EFX_FILTER_FLAG_RX | flags;
|
||||
spec->rss_context = 0;
|
||||
spec->dmaq_id = rxq_id;
|
||||
}
|
||||
|
||||
static inline void efx_filter_init_tx(struct efx_filter_spec *spec,
|
||||
unsigned txq_id)
|
||||
{
|
||||
memset(spec, 0, sizeof(*spec));
|
||||
spec->priority = EFX_FILTER_PRI_REQUIRED;
|
||||
spec->flags = EFX_FILTER_FLAG_TX;
|
||||
spec->dmaq_id = txq_id;
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
|
||||
* @spec: Specification to initialise
|
||||
* @proto: Transport layer protocol number
|
||||
* @host: Local host address (network byte order)
|
||||
* @port: Local port (network byte order)
|
||||
*/
|
||||
static inline int
|
||||
efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
|
||||
__be32 host, __be16 port)
|
||||
{
|
||||
spec->match_flags |=
|
||||
EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
|
||||
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT;
|
||||
spec->ether_type = htons(ETH_P_IP);
|
||||
spec->ip_proto = proto;
|
||||
spec->loc_host[0] = host;
|
||||
spec->loc_port = port;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
|
||||
* @spec: Specification to initialise
|
||||
* @proto: Transport layer protocol number
|
||||
* @lhost: Local host address (network byte order)
|
||||
* @lport: Local port (network byte order)
|
||||
* @rhost: Remote host address (network byte order)
|
||||
* @rport: Remote port (network byte order)
|
||||
*/
|
||||
static inline int
|
||||
efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
|
||||
__be32 lhost, __be16 lport,
|
||||
__be32 rhost, __be16 rport)
|
||||
{
|
||||
spec->match_flags |=
|
||||
EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_IP_PROTO |
|
||||
EFX_FILTER_MATCH_LOC_HOST | EFX_FILTER_MATCH_LOC_PORT |
|
||||
EFX_FILTER_MATCH_REM_HOST | EFX_FILTER_MATCH_REM_PORT;
|
||||
spec->ether_type = htons(ETH_P_IP);
|
||||
spec->ip_proto = proto;
|
||||
spec->loc_host[0] = lhost;
|
||||
spec->loc_port = lport;
|
||||
spec->rem_host[0] = rhost;
|
||||
spec->rem_port = rport;
|
||||
return 0;
|
||||
}
|
||||
|
||||
enum {
|
||||
EFX_FILTER_VID_UNSPEC = 0xffff,
|
||||
};
|
||||
|
||||
/**
|
||||
* efx_filter_set_eth_local - specify local Ethernet address and/or VID
|
||||
* @spec: Specification to initialise
|
||||
* @vid: Outer VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
|
||||
* @addr: Local Ethernet MAC address, or %NULL
|
||||
*/
|
||||
static inline int efx_filter_set_eth_local(struct efx_filter_spec *spec,
|
||||
u16 vid, const u8 *addr)
|
||||
{
|
||||
if (vid == EFX_FILTER_VID_UNSPEC && addr == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (vid != EFX_FILTER_VID_UNSPEC) {
|
||||
spec->match_flags |= EFX_FILTER_MATCH_OUTER_VID;
|
||||
spec->outer_vid = htons(vid);
|
||||
}
|
||||
if (addr != NULL) {
|
||||
spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC;
|
||||
ether_addr_copy(spec->loc_mac, addr);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_filter_set_uc_def - specify matching otherwise-unmatched unicast
|
||||
* @spec: Specification to initialise
|
||||
*/
|
||||
static inline int efx_filter_set_uc_def(struct efx_filter_spec *spec)
|
||||
{
|
||||
spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_filter_set_mc_def - specify matching otherwise-unmatched multicast
|
||||
* @spec: Specification to initialise
|
||||
*/
|
||||
static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec)
|
||||
{
|
||||
spec->match_flags |= EFX_FILTER_MATCH_LOC_MAC_IG;
|
||||
spec->loc_mac[0] = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void efx_filter_set_encap_type(struct efx_filter_spec *spec,
|
||||
enum efx_encap_type encap_type)
|
||||
{
|
||||
spec->match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
|
||||
spec->encap_type = encap_type;
|
||||
}
|
||||
|
||||
static inline enum efx_encap_type efx_filter_get_encap_type(
|
||||
const struct efx_filter_spec *spec)
|
||||
{
|
||||
if (spec->match_flags & EFX_FILTER_MATCH_ENCAP_TYPE)
|
||||
return spec->encap_type;
|
||||
return EFX_ENCAP_TYPE_NONE;
|
||||
}
|
||||
#endif /* EFX_FILTER_H */
|
310
drivers/net/ethernet/sfc/siena/io.h
Normal file
310
drivers/net/ethernet/sfc/siena/io.h
Normal file
@ -0,0 +1,310 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2013 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#ifndef EFX_IO_H
|
||||
#define EFX_IO_H
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* NIC register I/O
|
||||
*
|
||||
**************************************************************************
|
||||
*
|
||||
* Notes on locking strategy for the Falcon architecture:
|
||||
*
|
||||
* Many CSRs are very wide and cannot be read or written atomically.
|
||||
* Writes from the host are buffered by the Bus Interface Unit (BIU)
|
||||
* up to 128 bits. Whenever the host writes part of such a register,
|
||||
* the BIU collects the written value and does not write to the
|
||||
* underlying register until all 4 dwords have been written. A
|
||||
* similar buffering scheme applies to host access to the NIC's 64-bit
|
||||
* SRAM.
|
||||
*
|
||||
* Writes to different CSRs and 64-bit SRAM words must be serialised,
|
||||
* since interleaved access can result in lost writes. We use
|
||||
* efx_nic::biu_lock for this.
|
||||
*
|
||||
* We also serialise reads from 128-bit CSRs and SRAM with the same
|
||||
* spinlock. This may not be necessary, but it doesn't really matter
|
||||
* as there are no such reads on the fast path.
|
||||
*
|
||||
* The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
|
||||
* 128-bit but are special-cased in the BIU to avoid the need for
|
||||
* locking in the host:
|
||||
*
|
||||
* - They are write-only.
|
||||
* - The semantics of writing to these registers are such that
|
||||
* replacing the low 96 bits with zero does not affect functionality.
|
||||
* - If the host writes to the last dword address of such a register
|
||||
* (i.e. the high 32 bits) the underlying register will always be
|
||||
* written. If the collector and the current write together do not
|
||||
* provide values for all 128 bits of the register, the low 96 bits
|
||||
* will be written as zero.
|
||||
* - If the host writes to the address of any other part of such a
|
||||
* register while the collector already holds values for some other
|
||||
* register, the write is discarded and the collector maintains its
|
||||
* current state.
|
||||
*
|
||||
* The EF10 architecture exposes very few registers to the host and
|
||||
* most of them are only 32 bits wide. The only exceptions are the MC
|
||||
* doorbell register pair, which has its own latching, and
|
||||
* TX_DESC_UPD, which works in a similar way to the Falcon
|
||||
* architecture.
|
||||
*/
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
#define EFX_USE_QWORD_IO 1
|
||||
#endif
|
||||
|
||||
/* Hardware issue requires that only 64-bit naturally aligned writes
|
||||
* are seen by hardware. Its not strictly necessary to restrict to
|
||||
* x86_64 arch, but done for safety since unusual write combining behaviour
|
||||
* can break PIO.
|
||||
*/
|
||||
#ifdef CONFIG_X86_64
|
||||
/* PIO is a win only if write-combining is possible */
|
||||
#ifdef ARCH_HAS_IOREMAP_WC
|
||||
#define EFX_USE_PIO 1
|
||||
#endif
|
||||
#endif
|
||||
|
||||
static inline u32 efx_reg(struct efx_nic *efx, unsigned int reg)
|
||||
{
|
||||
return efx->reg_base + reg;
|
||||
}
|
||||
|
||||
#ifdef EFX_USE_QWORD_IO
|
||||
static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
|
||||
unsigned int reg)
|
||||
{
|
||||
__raw_writeq((__force u64)value, efx->membase + reg);
|
||||
}
|
||||
static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg)
|
||||
{
|
||||
return (__force __le64)__raw_readq(efx->membase + reg);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void _efx_writed(struct efx_nic *efx, __le32 value,
|
||||
unsigned int reg)
|
||||
{
|
||||
__raw_writel((__force u32)value, efx->membase + reg);
|
||||
}
|
||||
static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
|
||||
{
|
||||
return (__force __le32)__raw_readl(efx->membase + reg);
|
||||
}
|
||||
|
||||
/* Write a normal 128-bit CSR, locking as appropriate. */
|
||||
static inline void efx_writeo(struct efx_nic *efx, const efx_oword_t *value,
|
||||
unsigned int reg)
|
||||
{
|
||||
unsigned long flags __attribute__ ((unused));
|
||||
|
||||
netif_vdbg(efx, hw, efx->net_dev,
|
||||
"writing register %x with " EFX_OWORD_FMT "\n", reg,
|
||||
EFX_OWORD_VAL(*value));
|
||||
|
||||
spin_lock_irqsave(&efx->biu_lock, flags);
|
||||
#ifdef EFX_USE_QWORD_IO
|
||||
_efx_writeq(efx, value->u64[0], reg + 0);
|
||||
_efx_writeq(efx, value->u64[1], reg + 8);
|
||||
#else
|
||||
_efx_writed(efx, value->u32[0], reg + 0);
|
||||
_efx_writed(efx, value->u32[1], reg + 4);
|
||||
_efx_writed(efx, value->u32[2], reg + 8);
|
||||
_efx_writed(efx, value->u32[3], reg + 12);
|
||||
#endif
|
||||
spin_unlock_irqrestore(&efx->biu_lock, flags);
|
||||
}
|
||||
|
||||
/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
|
||||
static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
|
||||
const efx_qword_t *value, unsigned int index)
|
||||
{
|
||||
unsigned int addr = index * sizeof(*value);
|
||||
unsigned long flags __attribute__ ((unused));
|
||||
|
||||
netif_vdbg(efx, hw, efx->net_dev,
|
||||
"writing SRAM address %x with " EFX_QWORD_FMT "\n",
|
||||
addr, EFX_QWORD_VAL(*value));
|
||||
|
||||
spin_lock_irqsave(&efx->biu_lock, flags);
|
||||
#ifdef EFX_USE_QWORD_IO
|
||||
__raw_writeq((__force u64)value->u64[0], membase + addr);
|
||||
#else
|
||||
__raw_writel((__force u32)value->u32[0], membase + addr);
|
||||
__raw_writel((__force u32)value->u32[1], membase + addr + 4);
|
||||
#endif
|
||||
spin_unlock_irqrestore(&efx->biu_lock, flags);
|
||||
}
|
||||
|
||||
/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
|
||||
static inline void efx_writed(struct efx_nic *efx, const efx_dword_t *value,
|
||||
unsigned int reg)
|
||||
{
|
||||
netif_vdbg(efx, hw, efx->net_dev,
|
||||
"writing register %x with "EFX_DWORD_FMT"\n",
|
||||
reg, EFX_DWORD_VAL(*value));
|
||||
|
||||
/* No lock required */
|
||||
_efx_writed(efx, value->u32[0], reg);
|
||||
}
|
||||
|
||||
/* Read a 128-bit CSR, locking as appropriate. */
|
||||
static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
|
||||
unsigned int reg)
|
||||
{
|
||||
unsigned long flags __attribute__ ((unused));
|
||||
|
||||
spin_lock_irqsave(&efx->biu_lock, flags);
|
||||
value->u32[0] = _efx_readd(efx, reg + 0);
|
||||
value->u32[1] = _efx_readd(efx, reg + 4);
|
||||
value->u32[2] = _efx_readd(efx, reg + 8);
|
||||
value->u32[3] = _efx_readd(efx, reg + 12);
|
||||
spin_unlock_irqrestore(&efx->biu_lock, flags);
|
||||
|
||||
netif_vdbg(efx, hw, efx->net_dev,
|
||||
"read from register %x, got " EFX_OWORD_FMT "\n", reg,
|
||||
EFX_OWORD_VAL(*value));
|
||||
}
|
||||
|
||||
/* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */
|
||||
static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
|
||||
efx_qword_t *value, unsigned int index)
|
||||
{
|
||||
unsigned int addr = index * sizeof(*value);
|
||||
unsigned long flags __attribute__ ((unused));
|
||||
|
||||
spin_lock_irqsave(&efx->biu_lock, flags);
|
||||
#ifdef EFX_USE_QWORD_IO
|
||||
value->u64[0] = (__force __le64)__raw_readq(membase + addr);
|
||||
#else
|
||||
value->u32[0] = (__force __le32)__raw_readl(membase + addr);
|
||||
value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
|
||||
#endif
|
||||
spin_unlock_irqrestore(&efx->biu_lock, flags);
|
||||
|
||||
netif_vdbg(efx, hw, efx->net_dev,
|
||||
"read from SRAM address %x, got "EFX_QWORD_FMT"\n",
|
||||
addr, EFX_QWORD_VAL(*value));
|
||||
}
|
||||
|
||||
/* Read a 32-bit CSR or SRAM */
|
||||
static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
|
||||
unsigned int reg)
|
||||
{
|
||||
value->u32[0] = _efx_readd(efx, reg);
|
||||
netif_vdbg(efx, hw, efx->net_dev,
|
||||
"read from register %x, got "EFX_DWORD_FMT"\n",
|
||||
reg, EFX_DWORD_VAL(*value));
|
||||
}
|
||||
|
||||
/* Write a 128-bit CSR forming part of a table */
|
||||
static inline void
|
||||
efx_writeo_table(struct efx_nic *efx, const efx_oword_t *value,
|
||||
unsigned int reg, unsigned int index)
|
||||
{
|
||||
efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
|
||||
}
|
||||
|
||||
/* Read a 128-bit CSR forming part of a table */
|
||||
static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
|
||||
unsigned int reg, unsigned int index)
|
||||
{
|
||||
efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
|
||||
}
|
||||
|
||||
/* default VI stride (step between per-VI registers) is 8K on EF10 and
|
||||
* 64K on EF100
|
||||
*/
|
||||
#define EFX_DEFAULT_VI_STRIDE 0x2000
|
||||
#define EF100_DEFAULT_VI_STRIDE 0x10000
|
||||
|
||||
/* Calculate offset to page-mapped register */
|
||||
static inline unsigned int efx_paged_reg(struct efx_nic *efx, unsigned int page,
|
||||
unsigned int reg)
|
||||
{
|
||||
return page * efx->vi_stride + reg;
|
||||
}
|
||||
|
||||
/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
|
||||
static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
|
||||
unsigned int reg, unsigned int page)
|
||||
{
|
||||
reg = efx_paged_reg(efx, page, reg);
|
||||
|
||||
netif_vdbg(efx, hw, efx->net_dev,
|
||||
"writing register %x with " EFX_OWORD_FMT "\n", reg,
|
||||
EFX_OWORD_VAL(*value));
|
||||
|
||||
#ifdef EFX_USE_QWORD_IO
|
||||
_efx_writeq(efx, value->u64[0], reg + 0);
|
||||
_efx_writeq(efx, value->u64[1], reg + 8);
|
||||
#else
|
||||
_efx_writed(efx, value->u32[0], reg + 0);
|
||||
_efx_writed(efx, value->u32[1], reg + 4);
|
||||
_efx_writed(efx, value->u32[2], reg + 8);
|
||||
_efx_writed(efx, value->u32[3], reg + 12);
|
||||
#endif
|
||||
}
|
||||
#define efx_writeo_page(efx, value, reg, page) \
|
||||
_efx_writeo_page(efx, value, \
|
||||
reg + \
|
||||
BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
|
||||
page)
|
||||
|
||||
/* Write a page-mapped 32-bit CSR (EVQ_RPTR, EVQ_TMR (EF10), or the
|
||||
* high bits of RX_DESC_UPD or TX_DESC_UPD)
|
||||
*/
|
||||
static inline void
|
||||
_efx_writed_page(struct efx_nic *efx, const efx_dword_t *value,
|
||||
unsigned int reg, unsigned int page)
|
||||
{
|
||||
efx_writed(efx, value, efx_paged_reg(efx, page, reg));
|
||||
}
|
||||
#define efx_writed_page(efx, value, reg, page) \
|
||||
_efx_writed_page(efx, value, \
|
||||
reg + \
|
||||
BUILD_BUG_ON_ZERO((reg) != 0x180 && \
|
||||
(reg) != 0x200 && \
|
||||
(reg) != 0x400 && \
|
||||
(reg) != 0x420 && \
|
||||
(reg) != 0x830 && \
|
||||
(reg) != 0x83c && \
|
||||
(reg) != 0xa18 && \
|
||||
(reg) != 0xa1c), \
|
||||
page)
|
||||
|
||||
/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
|
||||
* in the BIU means that writes to TIMER_COMMAND[0] invalidate the
|
||||
* collector register.
|
||||
*/
|
||||
static inline void _efx_writed_page_locked(struct efx_nic *efx,
|
||||
const efx_dword_t *value,
|
||||
unsigned int reg,
|
||||
unsigned int page)
|
||||
{
|
||||
unsigned long flags __attribute__ ((unused));
|
||||
|
||||
if (page == 0) {
|
||||
spin_lock_irqsave(&efx->biu_lock, flags);
|
||||
efx_writed(efx, value, efx_paged_reg(efx, page, reg));
|
||||
spin_unlock_irqrestore(&efx->biu_lock, flags);
|
||||
} else {
|
||||
efx_writed(efx, value, efx_paged_reg(efx, page, reg));
|
||||
}
|
||||
}
|
||||
#define efx_writed_page_locked(efx, value, reg, page) \
|
||||
_efx_writed_page_locked(efx, value, \
|
||||
reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
|
||||
page)
|
||||
|
||||
#endif /* EFX_IO_H */
|
2259
drivers/net/ethernet/sfc/siena/mcdi.c
Normal file
2259
drivers/net/ethernet/sfc/siena/mcdi.c
Normal file
File diff suppressed because it is too large
Load Diff
386
drivers/net/ethernet/sfc/siena/mcdi.h
Normal file
386
drivers/net/ethernet/sfc/siena/mcdi.h
Normal file
@ -0,0 +1,386 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2008-2013 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#ifndef EFX_MCDI_H
|
||||
#define EFX_MCDI_H
|
||||
|
||||
/**
|
||||
* enum efx_mcdi_state - MCDI request handling state
|
||||
* @MCDI_STATE_QUIESCENT: No pending MCDI requests. If the caller holds the
|
||||
* mcdi @iface_lock then they are able to move to %MCDI_STATE_RUNNING
|
||||
* @MCDI_STATE_RUNNING_SYNC: There is a synchronous MCDI request pending.
|
||||
* Only the thread that moved into this state is allowed to move out of it.
|
||||
* @MCDI_STATE_RUNNING_ASYNC: There is an asynchronous MCDI request pending.
|
||||
* @MCDI_STATE_PROXY_WAIT: An MCDI request has completed with a response that
|
||||
* indicates we must wait for a proxy try again message.
|
||||
* @MCDI_STATE_COMPLETED: An MCDI request has completed, but the owning thread
|
||||
* has not yet consumed the result. For all other threads, equivalent to
|
||||
* %MCDI_STATE_RUNNING.
|
||||
*/
|
||||
enum efx_mcdi_state {
|
||||
MCDI_STATE_QUIESCENT,
|
||||
MCDI_STATE_RUNNING_SYNC,
|
||||
MCDI_STATE_RUNNING_ASYNC,
|
||||
MCDI_STATE_PROXY_WAIT,
|
||||
MCDI_STATE_COMPLETED,
|
||||
};
|
||||
|
||||
/**
|
||||
* enum efx_mcdi_mode - MCDI transaction mode
|
||||
* @MCDI_MODE_POLL: poll for MCDI completion, until timeout
|
||||
* @MCDI_MODE_EVENTS: wait for an mcdi_event. On timeout, poll once
|
||||
* @MCDI_MODE_FAIL: we think MCDI is dead, so fail-fast all calls
|
||||
*/
|
||||
enum efx_mcdi_mode {
|
||||
MCDI_MODE_POLL,
|
||||
MCDI_MODE_EVENTS,
|
||||
MCDI_MODE_FAIL,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct efx_mcdi_iface - MCDI protocol context
|
||||
* @efx: The associated NIC.
|
||||
* @state: Request handling state. Waited for by @wq.
|
||||
* @mode: Poll for mcdi completion, or wait for an mcdi_event.
|
||||
* @wq: Wait queue for threads waiting for @state != %MCDI_STATE_RUNNING
|
||||
* @new_epoch: Indicates start of day or start of MC reboot recovery
|
||||
* @iface_lock: Serialises access to @seqno, @credits and response metadata
|
||||
* @seqno: The next sequence number to use for mcdi requests.
|
||||
* @credits: Number of spurious MCDI completion events allowed before we
|
||||
* trigger a fatal error
|
||||
* @resprc: Response error/success code (Linux numbering)
|
||||
* @resp_hdr_len: Response header length
|
||||
* @resp_data_len: Response data (SDU or error) length
|
||||
* @async_lock: Serialises access to @async_list while event processing is
|
||||
* enabled
|
||||
* @async_list: Queue of asynchronous requests
|
||||
* @async_timer: Timer for asynchronous request timeout
|
||||
* @logging_buffer: buffer that may be used to build MCDI tracing messages
|
||||
* @logging_enabled: whether to trace MCDI
|
||||
* @proxy_rx_handle: Most recently received proxy authorisation handle
|
||||
* @proxy_rx_status: Status of most recent proxy authorisation
|
||||
* @proxy_rx_wq: Wait queue for updates to proxy_rx_handle
|
||||
*/
|
||||
struct efx_mcdi_iface {
|
||||
struct efx_nic *efx;
|
||||
enum efx_mcdi_state state;
|
||||
enum efx_mcdi_mode mode;
|
||||
wait_queue_head_t wq;
|
||||
spinlock_t iface_lock;
|
||||
bool new_epoch;
|
||||
unsigned int credits;
|
||||
unsigned int seqno;
|
||||
int resprc;
|
||||
int resprc_raw;
|
||||
size_t resp_hdr_len;
|
||||
size_t resp_data_len;
|
||||
spinlock_t async_lock;
|
||||
struct list_head async_list;
|
||||
struct timer_list async_timer;
|
||||
#ifdef CONFIG_SFC_MCDI_LOGGING
|
||||
char *logging_buffer;
|
||||
bool logging_enabled;
|
||||
#endif
|
||||
unsigned int proxy_rx_handle;
|
||||
int proxy_rx_status;
|
||||
wait_queue_head_t proxy_rx_wq;
|
||||
};
|
||||
|
||||
struct efx_mcdi_mon {
|
||||
struct efx_buffer dma_buf;
|
||||
struct mutex update_lock;
|
||||
unsigned long last_update;
|
||||
struct device *device;
|
||||
struct efx_mcdi_mon_attribute *attrs;
|
||||
struct attribute_group group;
|
||||
const struct attribute_group *groups[2];
|
||||
unsigned int n_attrs;
|
||||
};
|
||||
|
||||
struct efx_mcdi_mtd_partition {
|
||||
struct efx_mtd_partition common;
|
||||
bool updating;
|
||||
u16 nvram_type;
|
||||
u16 fw_subtype;
|
||||
};
|
||||
|
||||
#define to_efx_mcdi_mtd_partition(mtd) \
|
||||
container_of(mtd, struct efx_mcdi_mtd_partition, common.mtd)
|
||||
|
||||
/**
|
||||
* struct efx_mcdi_data - extra state for NICs that implement MCDI
|
||||
* @iface: Interface/protocol state
|
||||
* @hwmon: Hardware monitor state
|
||||
* @fn_flags: Flags for this function, as returned by %MC_CMD_DRV_ATTACH.
|
||||
*/
|
||||
struct efx_mcdi_data {
|
||||
struct efx_mcdi_iface iface;
|
||||
#ifdef CONFIG_SFC_MCDI_MON
|
||||
struct efx_mcdi_mon hwmon;
|
||||
#endif
|
||||
u32 fn_flags;
|
||||
};
|
||||
|
||||
static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
|
||||
{
|
||||
EFX_WARN_ON_PARANOID(!efx->mcdi);
|
||||
return &efx->mcdi->iface;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SFC_MCDI_MON
|
||||
static inline struct efx_mcdi_mon *efx_mcdi_mon(struct efx_nic *efx)
|
||||
{
|
||||
EFX_WARN_ON_PARANOID(!efx->mcdi);
|
||||
return &efx->mcdi->hwmon;
|
||||
}
|
||||
#endif
|
||||
|
||||
int efx_siena_mcdi_init(struct efx_nic *efx);
|
||||
void efx_siena_mcdi_detach(struct efx_nic *efx);
|
||||
void efx_siena_mcdi_fini(struct efx_nic *efx);
|
||||
|
||||
int efx_siena_mcdi_rpc(struct efx_nic *efx, unsigned int cmd,
|
||||
const efx_dword_t *inbuf, size_t inlen,
|
||||
efx_dword_t *outbuf, size_t outlen,
|
||||
size_t *outlen_actual);
|
||||
int efx_siena_mcdi_rpc_quiet(struct efx_nic *efx, unsigned int cmd,
|
||||
const efx_dword_t *inbuf, size_t inlen,
|
||||
efx_dword_t *outbuf, size_t outlen,
|
||||
size_t *outlen_actual);
|
||||
|
||||
int efx_siena_mcdi_rpc_start(struct efx_nic *efx, unsigned int cmd,
|
||||
const efx_dword_t *inbuf, size_t inlen);
|
||||
int efx_siena_mcdi_rpc_finish(struct efx_nic *efx, unsigned int cmd,
|
||||
size_t inlen, efx_dword_t *outbuf, size_t outlen,
|
||||
size_t *outlen_actual);
|
||||
int efx_siena_mcdi_rpc_finish_quiet(struct efx_nic *efx, unsigned int cmd,
|
||||
size_t inlen, efx_dword_t *outbuf,
|
||||
size_t outlen, size_t *outlen_actual);
|
||||
|
||||
typedef void efx_mcdi_async_completer(struct efx_nic *efx,
|
||||
unsigned long cookie, int rc,
|
||||
efx_dword_t *outbuf,
|
||||
size_t outlen_actual);
|
||||
int efx_siena_mcdi_rpc_async(struct efx_nic *efx, unsigned int cmd,
|
||||
const efx_dword_t *inbuf, size_t inlen,
|
||||
size_t outlen,
|
||||
efx_mcdi_async_completer *complete,
|
||||
unsigned long cookie);
|
||||
int efx_siena_mcdi_rpc_async_quiet(struct efx_nic *efx, unsigned int cmd,
|
||||
const efx_dword_t *inbuf, size_t inlen,
|
||||
size_t outlen,
|
||||
efx_mcdi_async_completer *complete,
|
||||
unsigned long cookie);
|
||||
|
||||
void efx_siena_mcdi_display_error(struct efx_nic *efx, unsigned int cmd,
|
||||
size_t inlen, efx_dword_t *outbuf,
|
||||
size_t outlen, int rc);
|
||||
|
||||
int efx_siena_mcdi_poll_reboot(struct efx_nic *efx);
|
||||
void efx_siena_mcdi_mode_poll(struct efx_nic *efx);
|
||||
void efx_siena_mcdi_mode_event(struct efx_nic *efx);
|
||||
void efx_siena_mcdi_flush_async(struct efx_nic *efx);
|
||||
|
||||
void efx_siena_mcdi_process_event(struct efx_channel *channel, efx_qword_t *event);
|
||||
void efx_siena_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev);
|
||||
|
||||
/* We expect that 16- and 32-bit fields in MCDI requests and responses
|
||||
* are appropriately aligned, but 64-bit fields are only
|
||||
* 32-bit-aligned. Also, on Siena we must copy to the MC shared
|
||||
* memory strictly 32 bits at a time, so add any necessary padding.
|
||||
*/
|
||||
#define MCDI_TX_BUF_LEN(_len) DIV_ROUND_UP((_len), 4)
|
||||
#define _MCDI_DECLARE_BUF(_name, _len) \
|
||||
efx_dword_t _name[DIV_ROUND_UP(_len, 4)]
|
||||
#define MCDI_DECLARE_BUF(_name, _len) \
|
||||
_MCDI_DECLARE_BUF(_name, _len) = {{{0}}}
|
||||
#define MCDI_DECLARE_BUF_ERR(_name) \
|
||||
MCDI_DECLARE_BUF(_name, 8)
|
||||
#define _MCDI_PTR(_buf, _offset) \
|
||||
((u8 *)(_buf) + (_offset))
|
||||
#define MCDI_PTR(_buf, _field) \
|
||||
_MCDI_PTR(_buf, MC_CMD_ ## _field ## _OFST)
|
||||
#define _MCDI_CHECK_ALIGN(_ofst, _align) \
|
||||
((_ofst) + BUILD_BUG_ON_ZERO((_ofst) & (_align - 1)))
|
||||
#define _MCDI_DWORD(_buf, _field) \
|
||||
((_buf) + (_MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, 4) >> 2))
|
||||
|
||||
#define MCDI_BYTE(_buf, _field) \
|
||||
((void)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 1), \
|
||||
*MCDI_PTR(_buf, _field))
|
||||
#define MCDI_WORD(_buf, _field) \
|
||||
((u16)BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
|
||||
le16_to_cpu(*(__force const __le16 *)MCDI_PTR(_buf, _field)))
|
||||
#define MCDI_SET_DWORD(_buf, _field, _value) \
|
||||
EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0, _value)
|
||||
#define MCDI_DWORD(_buf, _field) \
|
||||
EFX_DWORD_FIELD(*_MCDI_DWORD(_buf, _field), EFX_DWORD_0)
|
||||
#define MCDI_POPULATE_DWORD_1(_buf, _field, _name1, _value1) \
|
||||
EFX_POPULATE_DWORD_1(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1)
|
||||
#define MCDI_POPULATE_DWORD_2(_buf, _field, _name1, _value1, \
|
||||
_name2, _value2) \
|
||||
EFX_POPULATE_DWORD_2(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1, \
|
||||
MC_CMD_ ## _name2, _value2)
|
||||
#define MCDI_POPULATE_DWORD_3(_buf, _field, _name1, _value1, \
|
||||
_name2, _value2, _name3, _value3) \
|
||||
EFX_POPULATE_DWORD_3(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1, \
|
||||
MC_CMD_ ## _name2, _value2, \
|
||||
MC_CMD_ ## _name3, _value3)
|
||||
#define MCDI_POPULATE_DWORD_4(_buf, _field, _name1, _value1, \
|
||||
_name2, _value2, _name3, _value3, \
|
||||
_name4, _value4) \
|
||||
EFX_POPULATE_DWORD_4(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1, \
|
||||
MC_CMD_ ## _name2, _value2, \
|
||||
MC_CMD_ ## _name3, _value3, \
|
||||
MC_CMD_ ## _name4, _value4)
|
||||
#define MCDI_POPULATE_DWORD_5(_buf, _field, _name1, _value1, \
|
||||
_name2, _value2, _name3, _value3, \
|
||||
_name4, _value4, _name5, _value5) \
|
||||
EFX_POPULATE_DWORD_5(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1, \
|
||||
MC_CMD_ ## _name2, _value2, \
|
||||
MC_CMD_ ## _name3, _value3, \
|
||||
MC_CMD_ ## _name4, _value4, \
|
||||
MC_CMD_ ## _name5, _value5)
|
||||
#define MCDI_POPULATE_DWORD_6(_buf, _field, _name1, _value1, \
|
||||
_name2, _value2, _name3, _value3, \
|
||||
_name4, _value4, _name5, _value5, \
|
||||
_name6, _value6) \
|
||||
EFX_POPULATE_DWORD_6(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1, \
|
||||
MC_CMD_ ## _name2, _value2, \
|
||||
MC_CMD_ ## _name3, _value3, \
|
||||
MC_CMD_ ## _name4, _value4, \
|
||||
MC_CMD_ ## _name5, _value5, \
|
||||
MC_CMD_ ## _name6, _value6)
|
||||
#define MCDI_POPULATE_DWORD_7(_buf, _field, _name1, _value1, \
|
||||
_name2, _value2, _name3, _value3, \
|
||||
_name4, _value4, _name5, _value5, \
|
||||
_name6, _value6, _name7, _value7) \
|
||||
EFX_POPULATE_DWORD_7(*_MCDI_DWORD(_buf, _field), \
|
||||
MC_CMD_ ## _name1, _value1, \
|
||||
MC_CMD_ ## _name2, _value2, \
|
||||
MC_CMD_ ## _name3, _value3, \
|
||||
MC_CMD_ ## _name4, _value4, \
|
||||
MC_CMD_ ## _name5, _value5, \
|
||||
MC_CMD_ ## _name6, _value6, \
|
||||
MC_CMD_ ## _name7, _value7)
|
||||
#define MCDI_SET_QWORD(_buf, _field, _value) \
|
||||
do { \
|
||||
EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[0], \
|
||||
EFX_DWORD_0, (u32)(_value)); \
|
||||
EFX_POPULATE_DWORD_1(_MCDI_DWORD(_buf, _field)[1], \
|
||||
EFX_DWORD_0, (u64)(_value) >> 32); \
|
||||
} while (0)
|
||||
#define MCDI_QWORD(_buf, _field) \
|
||||
(EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[0], EFX_DWORD_0) | \
|
||||
(u64)EFX_DWORD_FIELD(_MCDI_DWORD(_buf, _field)[1], EFX_DWORD_0) << 32)
|
||||
#define MCDI_FIELD(_ptr, _type, _field) \
|
||||
EFX_EXTRACT_DWORD( \
|
||||
*(efx_dword_t *) \
|
||||
_MCDI_PTR(_ptr, MC_CMD_ ## _type ## _ ## _field ## _OFST & ~3),\
|
||||
MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f, \
|
||||
(MC_CMD_ ## _type ## _ ## _field ## _LBN & 0x1f) + \
|
||||
MC_CMD_ ## _type ## _ ## _field ## _WIDTH - 1)
|
||||
|
||||
#define _MCDI_ARRAY_PTR(_buf, _field, _index, _align) \
|
||||
(_MCDI_PTR(_buf, _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _OFST, _align))\
|
||||
+ (_index) * _MCDI_CHECK_ALIGN(MC_CMD_ ## _field ## _LEN, _align))
|
||||
#define MCDI_DECLARE_STRUCT_PTR(_name) \
|
||||
efx_dword_t *_name
|
||||
#define MCDI_ARRAY_STRUCT_PTR(_buf, _field, _index) \
|
||||
((efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
|
||||
#define MCDI_VAR_ARRAY_LEN(_len, _field) \
|
||||
min_t(size_t, MC_CMD_ ## _field ## _MAXNUM, \
|
||||
((_len) - MC_CMD_ ## _field ## _OFST) / MC_CMD_ ## _field ## _LEN)
|
||||
#define MCDI_ARRAY_WORD(_buf, _field, _index) \
|
||||
(BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 2) + \
|
||||
le16_to_cpu(*(__force const __le16 *) \
|
||||
_MCDI_ARRAY_PTR(_buf, _field, _index, 2)))
|
||||
#define _MCDI_ARRAY_DWORD(_buf, _field, _index) \
|
||||
(BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 4) + \
|
||||
(efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
|
||||
#define MCDI_SET_ARRAY_DWORD(_buf, _field, _index, _value) \
|
||||
EFX_SET_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), \
|
||||
EFX_DWORD_0, _value)
|
||||
#define MCDI_ARRAY_DWORD(_buf, _field, _index) \
|
||||
EFX_DWORD_FIELD(*_MCDI_ARRAY_DWORD(_buf, _field, _index), EFX_DWORD_0)
|
||||
#define _MCDI_ARRAY_QWORD(_buf, _field, _index) \
|
||||
(BUILD_BUG_ON_ZERO(MC_CMD_ ## _field ## _LEN != 8) + \
|
||||
(efx_dword_t *)_MCDI_ARRAY_PTR(_buf, _field, _index, 4))
|
||||
#define MCDI_SET_ARRAY_QWORD(_buf, _field, _index, _value) \
|
||||
do { \
|
||||
EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[0],\
|
||||
EFX_DWORD_0, (u32)(_value)); \
|
||||
EFX_SET_DWORD_FIELD(_MCDI_ARRAY_QWORD(_buf, _field, _index)[1],\
|
||||
EFX_DWORD_0, (u64)(_value) >> 32); \
|
||||
} while (0)
|
||||
#define MCDI_ARRAY_FIELD(_buf, _field1, _type, _index, _field2) \
|
||||
MCDI_FIELD(MCDI_ARRAY_STRUCT_PTR(_buf, _field1, _index), \
|
||||
_type ## _TYPEDEF, _field2)
|
||||
|
||||
#define MCDI_EVENT_FIELD(_ev, _field) \
|
||||
EFX_QWORD_FIELD(_ev, MCDI_EVENT_ ## _field)
|
||||
|
||||
#define MCDI_CAPABILITY(field) \
|
||||
MC_CMD_GET_CAPABILITIES_V8_OUT_ ## field ## _LBN
|
||||
|
||||
#define MCDI_CAPABILITY_OFST(field) \
|
||||
MC_CMD_GET_CAPABILITIES_V8_OUT_ ## field ## _OFST
|
||||
|
||||
#define efx_has_cap(efx, field) \
|
||||
efx->type->check_caps(efx, \
|
||||
MCDI_CAPABILITY(field), \
|
||||
MCDI_CAPABILITY_OFST(field))
|
||||
|
||||
void efx_siena_mcdi_print_fwver(struct efx_nic *efx, char *buf, size_t len);
|
||||
int efx_siena_mcdi_get_board_cfg(struct efx_nic *efx, u8 *mac_address,
|
||||
u16 *fw_subtype_list, u32 *capabilities);
|
||||
int efx_siena_mcdi_log_ctrl(struct efx_nic *efx, bool evq, bool uart,
|
||||
u32 dest_evq);
|
||||
int efx_siena_mcdi_nvram_types(struct efx_nic *efx, u32 *nvram_types_out);
|
||||
int efx_siena_mcdi_nvram_info(struct efx_nic *efx, unsigned int type,
|
||||
size_t *size_out, size_t *erase_size_out,
|
||||
bool *protected_out);
|
||||
int efx_siena_mcdi_nvram_test_all(struct efx_nic *efx);
|
||||
int efx_siena_mcdi_handle_assertion(struct efx_nic *efx);
|
||||
int efx_siena_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
|
||||
int efx_siena_mcdi_wol_filter_set_magic(struct efx_nic *efx, const u8 *mac,
|
||||
int *id_out);
|
||||
int efx_siena_mcdi_wol_filter_get_magic(struct efx_nic *efx, int *id_out);
|
||||
int efx_siena_mcdi_wol_filter_remove(struct efx_nic *efx, int id);
|
||||
int efx_siena_mcdi_wol_filter_reset(struct efx_nic *efx);
|
||||
int efx_siena_mcdi_flush_rxqs(struct efx_nic *efx);
|
||||
void efx_siena_mcdi_process_link_change(struct efx_nic *efx, efx_qword_t *ev);
|
||||
void efx_siena_mcdi_mac_start_stats(struct efx_nic *efx);
|
||||
void efx_siena_mcdi_mac_stop_stats(struct efx_nic *efx);
|
||||
void efx_siena_mcdi_mac_pull_stats(struct efx_nic *efx);
|
||||
enum reset_type efx_siena_mcdi_map_reset_reason(enum reset_type reason);
|
||||
int efx_siena_mcdi_reset(struct efx_nic *efx, enum reset_type method);
|
||||
|
||||
#ifdef CONFIG_SFC_MCDI_MON
|
||||
int efx_siena_mcdi_mon_probe(struct efx_nic *efx);
|
||||
void efx_siena_mcdi_mon_remove(struct efx_nic *efx);
|
||||
#else
|
||||
static inline int efx_siena_mcdi_mon_probe(struct efx_nic *efx) { return 0; }
|
||||
static inline void efx_siena_mcdi_mon_remove(struct efx_nic *efx) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SFC_MTD
|
||||
int efx_siena_mcdi_mtd_read(struct mtd_info *mtd, loff_t start, size_t len,
|
||||
size_t *retlen, u8 *buffer);
|
||||
int efx_siena_mcdi_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len);
|
||||
int efx_siena_mcdi_mtd_write(struct mtd_info *mtd, loff_t start, size_t len,
|
||||
size_t *retlen, const u8 *buffer);
|
||||
int efx_siena_mcdi_mtd_sync(struct mtd_info *mtd);
|
||||
void efx_siena_mcdi_mtd_rename(struct efx_mtd_partition *part);
|
||||
#endif
|
||||
|
||||
#endif /* EFX_MCDI_H */
|
531
drivers/net/ethernet/sfc/siena/mcdi_mon.c
Normal file
531
drivers/net/ethernet/sfc/siena/mcdi_mon.c
Normal file
@ -0,0 +1,531 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2011-2013 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/hwmon.h>
|
||||
#include <linux/stat.h>
|
||||
|
||||
#include "net_driver.h"
|
||||
#include "mcdi.h"
|
||||
#include "mcdi_pcol.h"
|
||||
#include "nic.h"
|
||||
|
||||
enum efx_hwmon_type {
|
||||
EFX_HWMON_UNKNOWN,
|
||||
EFX_HWMON_TEMP, /* temperature */
|
||||
EFX_HWMON_COOL, /* cooling device, probably a heatsink */
|
||||
EFX_HWMON_IN, /* voltage */
|
||||
EFX_HWMON_CURR, /* current */
|
||||
EFX_HWMON_POWER, /* power */
|
||||
EFX_HWMON_TYPES_COUNT
|
||||
};
|
||||
|
||||
static const char *const efx_hwmon_unit[EFX_HWMON_TYPES_COUNT] = {
|
||||
[EFX_HWMON_TEMP] = " degC",
|
||||
[EFX_HWMON_COOL] = " rpm", /* though nonsense for a heatsink */
|
||||
[EFX_HWMON_IN] = " mV",
|
||||
[EFX_HWMON_CURR] = " mA",
|
||||
[EFX_HWMON_POWER] = " W",
|
||||
};
|
||||
|
||||
static const struct {
|
||||
const char *label;
|
||||
enum efx_hwmon_type hwmon_type;
|
||||
int port;
|
||||
} efx_mcdi_sensor_type[] = {
|
||||
#define SENSOR(name, label, hwmon_type, port) \
|
||||
[MC_CMD_SENSOR_##name] = { label, EFX_HWMON_ ## hwmon_type, port }
|
||||
SENSOR(CONTROLLER_TEMP, "Controller board temp.", TEMP, -1),
|
||||
SENSOR(PHY_COMMON_TEMP, "PHY temp.", TEMP, -1),
|
||||
SENSOR(CONTROLLER_COOLING, "Controller heat sink", COOL, -1),
|
||||
SENSOR(PHY0_TEMP, "PHY temp.", TEMP, 0),
|
||||
SENSOR(PHY0_COOLING, "PHY heat sink", COOL, 0),
|
||||
SENSOR(PHY1_TEMP, "PHY temp.", TEMP, 1),
|
||||
SENSOR(PHY1_COOLING, "PHY heat sink", COOL, 1),
|
||||
SENSOR(IN_1V0, "1.0V supply", IN, -1),
|
||||
SENSOR(IN_1V2, "1.2V supply", IN, -1),
|
||||
SENSOR(IN_1V8, "1.8V supply", IN, -1),
|
||||
SENSOR(IN_2V5, "2.5V supply", IN, -1),
|
||||
SENSOR(IN_3V3, "3.3V supply", IN, -1),
|
||||
SENSOR(IN_12V0, "12.0V supply", IN, -1),
|
||||
SENSOR(IN_1V2A, "1.2V analogue supply", IN, -1),
|
||||
SENSOR(IN_VREF, "Ref. voltage", IN, -1),
|
||||
SENSOR(OUT_VAOE, "AOE FPGA supply", IN, -1),
|
||||
SENSOR(AOE_TEMP, "AOE FPGA temp.", TEMP, -1),
|
||||
SENSOR(PSU_AOE_TEMP, "AOE regulator temp.", TEMP, -1),
|
||||
SENSOR(PSU_TEMP, "Controller regulator temp.",
|
||||
TEMP, -1),
|
||||
SENSOR(FAN_0, "Fan 0", COOL, -1),
|
||||
SENSOR(FAN_1, "Fan 1", COOL, -1),
|
||||
SENSOR(FAN_2, "Fan 2", COOL, -1),
|
||||
SENSOR(FAN_3, "Fan 3", COOL, -1),
|
||||
SENSOR(FAN_4, "Fan 4", COOL, -1),
|
||||
SENSOR(IN_VAOE, "AOE input supply", IN, -1),
|
||||
SENSOR(OUT_IAOE, "AOE output current", CURR, -1),
|
||||
SENSOR(IN_IAOE, "AOE input current", CURR, -1),
|
||||
SENSOR(NIC_POWER, "Board power use", POWER, -1),
|
||||
SENSOR(IN_0V9, "0.9V supply", IN, -1),
|
||||
SENSOR(IN_I0V9, "0.9V supply current", CURR, -1),
|
||||
SENSOR(IN_I1V2, "1.2V supply current", CURR, -1),
|
||||
SENSOR(IN_0V9_ADC, "0.9V supply (ext. ADC)", IN, -1),
|
||||
SENSOR(CONTROLLER_2_TEMP, "Controller board temp. 2", TEMP, -1),
|
||||
SENSOR(VREG_INTERNAL_TEMP, "Regulator die temp.", TEMP, -1),
|
||||
SENSOR(VREG_0V9_TEMP, "0.9V regulator temp.", TEMP, -1),
|
||||
SENSOR(VREG_1V2_TEMP, "1.2V regulator temp.", TEMP, -1),
|
||||
SENSOR(CONTROLLER_VPTAT,
|
||||
"Controller PTAT voltage (int. ADC)", IN, -1),
|
||||
SENSOR(CONTROLLER_INTERNAL_TEMP,
|
||||
"Controller die temp. (int. ADC)", TEMP, -1),
|
||||
SENSOR(CONTROLLER_VPTAT_EXTADC,
|
||||
"Controller PTAT voltage (ext. ADC)", IN, -1),
|
||||
SENSOR(CONTROLLER_INTERNAL_TEMP_EXTADC,
|
||||
"Controller die temp. (ext. ADC)", TEMP, -1),
|
||||
SENSOR(AMBIENT_TEMP, "Ambient temp.", TEMP, -1),
|
||||
SENSOR(AIRFLOW, "Air flow raw", IN, -1),
|
||||
SENSOR(VDD08D_VSS08D_CSR, "0.9V die (int. ADC)", IN, -1),
|
||||
SENSOR(VDD08D_VSS08D_CSR_EXTADC, "0.9V die (ext. ADC)", IN, -1),
|
||||
SENSOR(HOTPOINT_TEMP, "Controller board temp. (hotpoint)", TEMP, -1),
|
||||
#undef SENSOR
|
||||
};
|
||||
|
||||
static const char *const sensor_status_names[] = {
|
||||
[MC_CMD_SENSOR_STATE_OK] = "OK",
|
||||
[MC_CMD_SENSOR_STATE_WARNING] = "Warning",
|
||||
[MC_CMD_SENSOR_STATE_FATAL] = "Fatal",
|
||||
[MC_CMD_SENSOR_STATE_BROKEN] = "Device failure",
|
||||
[MC_CMD_SENSOR_STATE_NO_READING] = "No reading",
|
||||
};
|
||||
|
||||
void efx_siena_mcdi_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
|
||||
{
|
||||
unsigned int type, state, value;
|
||||
enum efx_hwmon_type hwmon_type = EFX_HWMON_UNKNOWN;
|
||||
const char *name = NULL, *state_txt, *unit;
|
||||
|
||||
type = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_MONITOR);
|
||||
state = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_STATE);
|
||||
value = EFX_QWORD_FIELD(*ev, MCDI_EVENT_SENSOREVT_VALUE);
|
||||
|
||||
/* Deal gracefully with the board having more drivers than we
|
||||
* know about, but do not expect new sensor states. */
|
||||
if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
|
||||
name = efx_mcdi_sensor_type[type].label;
|
||||
hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
|
||||
}
|
||||
if (!name)
|
||||
name = "No sensor name available";
|
||||
EFX_WARN_ON_PARANOID(state >= ARRAY_SIZE(sensor_status_names));
|
||||
state_txt = sensor_status_names[state];
|
||||
EFX_WARN_ON_PARANOID(hwmon_type >= EFX_HWMON_TYPES_COUNT);
|
||||
unit = efx_hwmon_unit[hwmon_type];
|
||||
if (!unit)
|
||||
unit = "";
|
||||
|
||||
netif_err(efx, hw, efx->net_dev,
|
||||
"Sensor %d (%s) reports condition '%s' for value %d%s\n",
|
||||
type, name, state_txt, value, unit);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SFC_MCDI_MON
|
||||
|
||||
struct efx_mcdi_mon_attribute {
|
||||
struct device_attribute dev_attr;
|
||||
unsigned int index;
|
||||
unsigned int type;
|
||||
enum efx_hwmon_type hwmon_type;
|
||||
unsigned int limit_value;
|
||||
char name[12];
|
||||
};
|
||||
|
||||
static int efx_mcdi_mon_update(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_READ_SENSORS_EXT_IN_LEN);
|
||||
int rc;
|
||||
|
||||
MCDI_SET_QWORD(inbuf, READ_SENSORS_EXT_IN_DMA_ADDR,
|
||||
hwmon->dma_buf.dma_addr);
|
||||
MCDI_SET_DWORD(inbuf, READ_SENSORS_EXT_IN_LENGTH, hwmon->dma_buf.len);
|
||||
|
||||
rc = efx_siena_mcdi_rpc(efx, MC_CMD_READ_SENSORS,
|
||||
inbuf, sizeof(inbuf), NULL, 0, NULL);
|
||||
if (rc == 0)
|
||||
hwmon->last_update = jiffies;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int efx_mcdi_mon_get_entry(struct device *dev, unsigned int index,
|
||||
efx_dword_t *entry)
|
||||
{
|
||||
struct efx_nic *efx = dev_get_drvdata(dev->parent);
|
||||
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
|
||||
int rc;
|
||||
|
||||
BUILD_BUG_ON(MC_CMD_READ_SENSORS_OUT_LEN != 0);
|
||||
|
||||
mutex_lock(&hwmon->update_lock);
|
||||
|
||||
/* Use cached value if last update was < 1 s ago */
|
||||
if (time_before(jiffies, hwmon->last_update + HZ))
|
||||
rc = 0;
|
||||
else
|
||||
rc = efx_mcdi_mon_update(efx);
|
||||
|
||||
/* Copy out the requested entry */
|
||||
*entry = ((efx_dword_t *)hwmon->dma_buf.addr)[index];
|
||||
|
||||
mutex_unlock(&hwmon->update_lock);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static ssize_t efx_mcdi_mon_show_value(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct efx_mcdi_mon_attribute *mon_attr =
|
||||
container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
|
||||
efx_dword_t entry;
|
||||
unsigned int value, state;
|
||||
int rc;
|
||||
|
||||
rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
|
||||
if (state == MC_CMD_SENSOR_STATE_NO_READING)
|
||||
return -EBUSY;
|
||||
|
||||
value = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_VALUE);
|
||||
|
||||
switch (mon_attr->hwmon_type) {
|
||||
case EFX_HWMON_TEMP:
|
||||
/* Convert temperature from degrees to milli-degrees Celsius */
|
||||
value *= 1000;
|
||||
break;
|
||||
case EFX_HWMON_POWER:
|
||||
/* Convert power from watts to microwatts */
|
||||
value *= 1000000;
|
||||
break;
|
||||
default:
|
||||
/* No conversion needed */
|
||||
break;
|
||||
}
|
||||
|
||||
return sprintf(buf, "%u\n", value);
|
||||
}
|
||||
|
||||
static ssize_t efx_mcdi_mon_show_limit(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct efx_mcdi_mon_attribute *mon_attr =
|
||||
container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
|
||||
unsigned int value;
|
||||
|
||||
value = mon_attr->limit_value;
|
||||
|
||||
switch (mon_attr->hwmon_type) {
|
||||
case EFX_HWMON_TEMP:
|
||||
/* Convert temperature from degrees to milli-degrees Celsius */
|
||||
value *= 1000;
|
||||
break;
|
||||
case EFX_HWMON_POWER:
|
||||
/* Convert power from watts to microwatts */
|
||||
value *= 1000000;
|
||||
break;
|
||||
default:
|
||||
/* No conversion needed */
|
||||
break;
|
||||
}
|
||||
|
||||
return sprintf(buf, "%u\n", value);
|
||||
}
|
||||
|
||||
static ssize_t efx_mcdi_mon_show_alarm(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct efx_mcdi_mon_attribute *mon_attr =
|
||||
container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
|
||||
efx_dword_t entry;
|
||||
int state;
|
||||
int rc;
|
||||
|
||||
rc = efx_mcdi_mon_get_entry(dev, mon_attr->index, &entry);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
state = EFX_DWORD_FIELD(entry, MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_STATE);
|
||||
return sprintf(buf, "%d\n", state != MC_CMD_SENSOR_STATE_OK);
|
||||
}
|
||||
|
||||
static ssize_t efx_mcdi_mon_show_label(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct efx_mcdi_mon_attribute *mon_attr =
|
||||
container_of(attr, struct efx_mcdi_mon_attribute, dev_attr);
|
||||
return sprintf(buf, "%s\n",
|
||||
efx_mcdi_sensor_type[mon_attr->type].label);
|
||||
}
|
||||
|
||||
static void
|
||||
efx_mcdi_mon_add_attr(struct efx_nic *efx, const char *name,
|
||||
ssize_t (*reader)(struct device *,
|
||||
struct device_attribute *, char *),
|
||||
unsigned int index, unsigned int type,
|
||||
unsigned int limit_value)
|
||||
{
|
||||
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
|
||||
struct efx_mcdi_mon_attribute *attr = &hwmon->attrs[hwmon->n_attrs];
|
||||
|
||||
strlcpy(attr->name, name, sizeof(attr->name));
|
||||
attr->index = index;
|
||||
attr->type = type;
|
||||
if (type < ARRAY_SIZE(efx_mcdi_sensor_type))
|
||||
attr->hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
|
||||
else
|
||||
attr->hwmon_type = EFX_HWMON_UNKNOWN;
|
||||
attr->limit_value = limit_value;
|
||||
sysfs_attr_init(&attr->dev_attr.attr);
|
||||
attr->dev_attr.attr.name = attr->name;
|
||||
attr->dev_attr.attr.mode = 0444;
|
||||
attr->dev_attr.show = reader;
|
||||
hwmon->group.attrs[hwmon->n_attrs++] = &attr->dev_attr.attr;
|
||||
}
|
||||
|
||||
int efx_siena_mcdi_mon_probe(struct efx_nic *efx)
|
||||
{
|
||||
unsigned int n_temp = 0, n_cool = 0, n_in = 0, n_curr = 0, n_power = 0;
|
||||
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_SENSOR_INFO_EXT_IN_LEN);
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_SENSOR_INFO_OUT_LENMAX);
|
||||
unsigned int n_pages, n_sensors, n_attrs, page;
|
||||
size_t outlen;
|
||||
char name[12];
|
||||
u32 mask;
|
||||
int rc, i, j, type;
|
||||
|
||||
/* Find out how many sensors are present */
|
||||
n_sensors = 0;
|
||||
page = 0;
|
||||
do {
|
||||
MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE, page);
|
||||
|
||||
rc = efx_siena_mcdi_rpc(efx, MC_CMD_SENSOR_INFO, inbuf,
|
||||
sizeof(inbuf), outbuf, sizeof(outbuf),
|
||||
&outlen);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN)
|
||||
return -EIO;
|
||||
|
||||
mask = MCDI_DWORD(outbuf, SENSOR_INFO_OUT_MASK);
|
||||
n_sensors += hweight32(mask & ~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
|
||||
++page;
|
||||
} while (mask & (1 << MC_CMD_SENSOR_PAGE0_NEXT));
|
||||
n_pages = page;
|
||||
|
||||
/* Don't create a device if there are none */
|
||||
if (n_sensors == 0)
|
||||
return 0;
|
||||
|
||||
rc = efx_siena_alloc_buffer(efx, &hwmon->dma_buf,
|
||||
n_sensors * MC_CMD_SENSOR_VALUE_ENTRY_TYPEDEF_LEN,
|
||||
GFP_KERNEL);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
mutex_init(&hwmon->update_lock);
|
||||
efx_mcdi_mon_update(efx);
|
||||
|
||||
/* Allocate space for the maximum possible number of
|
||||
* attributes for this set of sensors:
|
||||
* value, min, max, crit, alarm and label for each sensor.
|
||||
*/
|
||||
n_attrs = 6 * n_sensors;
|
||||
hwmon->attrs = kcalloc(n_attrs, sizeof(*hwmon->attrs), GFP_KERNEL);
|
||||
if (!hwmon->attrs) {
|
||||
rc = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
hwmon->group.attrs = kcalloc(n_attrs + 1, sizeof(struct attribute *),
|
||||
GFP_KERNEL);
|
||||
if (!hwmon->group.attrs) {
|
||||
rc = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0, j = -1, type = -1; ; i++) {
|
||||
enum efx_hwmon_type hwmon_type;
|
||||
const char *hwmon_prefix;
|
||||
unsigned hwmon_index;
|
||||
u16 min1, max1, min2, max2;
|
||||
|
||||
/* Find next sensor type or exit if there is none */
|
||||
do {
|
||||
type++;
|
||||
|
||||
if ((type % 32) == 0) {
|
||||
page = type / 32;
|
||||
j = -1;
|
||||
if (page == n_pages)
|
||||
goto hwmon_register;
|
||||
|
||||
MCDI_SET_DWORD(inbuf, SENSOR_INFO_EXT_IN_PAGE,
|
||||
page);
|
||||
rc = efx_siena_mcdi_rpc(efx, MC_CMD_SENSOR_INFO,
|
||||
inbuf, sizeof(inbuf),
|
||||
outbuf, sizeof(outbuf),
|
||||
&outlen);
|
||||
if (rc)
|
||||
goto fail;
|
||||
if (outlen < MC_CMD_SENSOR_INFO_OUT_LENMIN) {
|
||||
rc = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
mask = (MCDI_DWORD(outbuf,
|
||||
SENSOR_INFO_OUT_MASK) &
|
||||
~(1 << MC_CMD_SENSOR_PAGE0_NEXT));
|
||||
|
||||
/* Check again for short response */
|
||||
if (outlen <
|
||||
MC_CMD_SENSOR_INFO_OUT_LEN(hweight32(mask))) {
|
||||
rc = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
} while (!(mask & (1 << type % 32)));
|
||||
j++;
|
||||
|
||||
if (type < ARRAY_SIZE(efx_mcdi_sensor_type)) {
|
||||
hwmon_type = efx_mcdi_sensor_type[type].hwmon_type;
|
||||
|
||||
/* Skip sensors specific to a different port */
|
||||
if (hwmon_type != EFX_HWMON_UNKNOWN &&
|
||||
efx_mcdi_sensor_type[type].port >= 0 &&
|
||||
efx_mcdi_sensor_type[type].port !=
|
||||
efx_port_num(efx))
|
||||
continue;
|
||||
} else {
|
||||
hwmon_type = EFX_HWMON_UNKNOWN;
|
||||
}
|
||||
|
||||
switch (hwmon_type) {
|
||||
case EFX_HWMON_TEMP:
|
||||
hwmon_prefix = "temp";
|
||||
hwmon_index = ++n_temp; /* 1-based */
|
||||
break;
|
||||
case EFX_HWMON_COOL:
|
||||
/* This is likely to be a heatsink, but there
|
||||
* is no convention for representing cooling
|
||||
* devices other than fans.
|
||||
*/
|
||||
hwmon_prefix = "fan";
|
||||
hwmon_index = ++n_cool; /* 1-based */
|
||||
break;
|
||||
default:
|
||||
hwmon_prefix = "in";
|
||||
hwmon_index = n_in++; /* 0-based */
|
||||
break;
|
||||
case EFX_HWMON_CURR:
|
||||
hwmon_prefix = "curr";
|
||||
hwmon_index = ++n_curr; /* 1-based */
|
||||
break;
|
||||
case EFX_HWMON_POWER:
|
||||
hwmon_prefix = "power";
|
||||
hwmon_index = ++n_power; /* 1-based */
|
||||
break;
|
||||
}
|
||||
|
||||
min1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
|
||||
SENSOR_INFO_ENTRY, j, MIN1);
|
||||
max1 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
|
||||
SENSOR_INFO_ENTRY, j, MAX1);
|
||||
min2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
|
||||
SENSOR_INFO_ENTRY, j, MIN2);
|
||||
max2 = MCDI_ARRAY_FIELD(outbuf, SENSOR_ENTRY,
|
||||
SENSOR_INFO_ENTRY, j, MAX2);
|
||||
|
||||
if (min1 != max1) {
|
||||
snprintf(name, sizeof(name), "%s%u_input",
|
||||
hwmon_prefix, hwmon_index);
|
||||
efx_mcdi_mon_add_attr(
|
||||
efx, name, efx_mcdi_mon_show_value, i, type, 0);
|
||||
|
||||
if (hwmon_type != EFX_HWMON_POWER) {
|
||||
snprintf(name, sizeof(name), "%s%u_min",
|
||||
hwmon_prefix, hwmon_index);
|
||||
efx_mcdi_mon_add_attr(
|
||||
efx, name, efx_mcdi_mon_show_limit,
|
||||
i, type, min1);
|
||||
}
|
||||
|
||||
snprintf(name, sizeof(name), "%s%u_max",
|
||||
hwmon_prefix, hwmon_index);
|
||||
efx_mcdi_mon_add_attr(
|
||||
efx, name, efx_mcdi_mon_show_limit,
|
||||
i, type, max1);
|
||||
|
||||
if (min2 != max2) {
|
||||
/* Assume max2 is critical value.
|
||||
* But we have no good way to expose min2.
|
||||
*/
|
||||
snprintf(name, sizeof(name), "%s%u_crit",
|
||||
hwmon_prefix, hwmon_index);
|
||||
efx_mcdi_mon_add_attr(
|
||||
efx, name, efx_mcdi_mon_show_limit,
|
||||
i, type, max2);
|
||||
}
|
||||
}
|
||||
|
||||
snprintf(name, sizeof(name), "%s%u_alarm",
|
||||
hwmon_prefix, hwmon_index);
|
||||
efx_mcdi_mon_add_attr(
|
||||
efx, name, efx_mcdi_mon_show_alarm, i, type, 0);
|
||||
|
||||
if (type < ARRAY_SIZE(efx_mcdi_sensor_type) &&
|
||||
efx_mcdi_sensor_type[type].label) {
|
||||
snprintf(name, sizeof(name), "%s%u_label",
|
||||
hwmon_prefix, hwmon_index);
|
||||
efx_mcdi_mon_add_attr(
|
||||
efx, name, efx_mcdi_mon_show_label, i, type, 0);
|
||||
}
|
||||
}
|
||||
|
||||
hwmon_register:
|
||||
hwmon->groups[0] = &hwmon->group;
|
||||
hwmon->device = hwmon_device_register_with_groups(&efx->pci_dev->dev,
|
||||
KBUILD_MODNAME, NULL,
|
||||
hwmon->groups);
|
||||
if (IS_ERR(hwmon->device)) {
|
||||
rc = PTR_ERR(hwmon->device);
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
efx_siena_mcdi_mon_remove(efx);
|
||||
return rc;
|
||||
}
|
||||
|
||||
void efx_siena_mcdi_mon_remove(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mcdi_mon *hwmon = efx_mcdi_mon(efx);
|
||||
|
||||
if (hwmon->device)
|
||||
hwmon_device_unregister(hwmon->device);
|
||||
kfree(hwmon->attrs);
|
||||
kfree(hwmon->group.attrs);
|
||||
efx_siena_free_buffer(efx, &hwmon->dma_buf);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SFC_MCDI_MON */
|
110
drivers/net/ethernet/sfc/siena/mcdi_port.c
Normal file
110
drivers/net/ethernet/sfc/siena/mcdi_port.c
Normal file
@ -0,0 +1,110 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2009-2013 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Driver for PHY related operations via MCDI.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include "efx.h"
|
||||
#include "mcdi_port.h"
|
||||
#include "mcdi.h"
|
||||
#include "mcdi_pcol.h"
|
||||
#include "nic.h"
|
||||
#include "selftest.h"
|
||||
#include "mcdi_port_common.h"
|
||||
|
||||
static int efx_mcdi_mdio_read(struct net_device *net_dev,
|
||||
int prtad, int devad, u16 addr)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_READ_IN_LEN);
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_READ_OUT_LEN);
|
||||
size_t outlen;
|
||||
int rc;
|
||||
|
||||
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_BUS, efx->mdio_bus);
|
||||
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_PRTAD, prtad);
|
||||
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_DEVAD, devad);
|
||||
MCDI_SET_DWORD(inbuf, MDIO_READ_IN_ADDR, addr);
|
||||
|
||||
rc = efx_siena_mcdi_rpc(efx, MC_CMD_MDIO_READ, inbuf, sizeof(inbuf),
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (MCDI_DWORD(outbuf, MDIO_READ_OUT_STATUS) !=
|
||||
MC_CMD_MDIO_STATUS_GOOD)
|
||||
return -EIO;
|
||||
|
||||
return (u16)MCDI_DWORD(outbuf, MDIO_READ_OUT_VALUE);
|
||||
}
|
||||
|
||||
static int efx_mcdi_mdio_write(struct net_device *net_dev,
|
||||
int prtad, int devad, u16 addr, u16 value)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
MCDI_DECLARE_BUF(inbuf, MC_CMD_MDIO_WRITE_IN_LEN);
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_MDIO_WRITE_OUT_LEN);
|
||||
size_t outlen;
|
||||
int rc;
|
||||
|
||||
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_BUS, efx->mdio_bus);
|
||||
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_PRTAD, prtad);
|
||||
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_DEVAD, devad);
|
||||
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_ADDR, addr);
|
||||
MCDI_SET_DWORD(inbuf, MDIO_WRITE_IN_VALUE, value);
|
||||
|
||||
rc = efx_siena_mcdi_rpc(efx, MC_CMD_MDIO_WRITE, inbuf, sizeof(inbuf),
|
||||
outbuf, sizeof(outbuf), &outlen);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (MCDI_DWORD(outbuf, MDIO_WRITE_OUT_STATUS) !=
|
||||
MC_CMD_MDIO_STATUS_GOOD)
|
||||
return -EIO;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool efx_siena_mcdi_mac_check_fault(struct efx_nic *efx)
|
||||
{
|
||||
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_LINK_OUT_LEN);
|
||||
size_t outlength;
|
||||
int rc;
|
||||
|
||||
BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0);
|
||||
|
||||
rc = efx_siena_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0,
|
||||
outbuf, sizeof(outbuf), &outlength);
|
||||
if (rc)
|
||||
return true;
|
||||
|
||||
return MCDI_DWORD(outbuf, GET_LINK_OUT_MAC_FAULT) != 0;
|
||||
}
|
||||
|
||||
int efx_siena_mcdi_port_probe(struct efx_nic *efx)
|
||||
{
|
||||
int rc;
|
||||
|
||||
/* Set up MDIO structure for PHY */
|
||||
efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
|
||||
efx->mdio.mdio_read = efx_mcdi_mdio_read;
|
||||
efx->mdio.mdio_write = efx_mcdi_mdio_write;
|
||||
|
||||
/* Fill out MDIO structure, loopback modes, and initial link state */
|
||||
rc = efx_siena_mcdi_phy_probe(efx);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
return efx_siena_mcdi_mac_init_stats(efx);
|
||||
}
|
||||
|
||||
void efx_siena_mcdi_port_remove(struct efx_nic *efx)
|
||||
{
|
||||
efx_siena_mcdi_phy_remove(efx);
|
||||
efx_siena_mcdi_mac_fini_stats(efx);
|
||||
}
|
17
drivers/net/ethernet/sfc/siena/mcdi_port.h
Normal file
17
drivers/net/ethernet/sfc/siena/mcdi_port.h
Normal file
@ -0,0 +1,17 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2008-2013 Solarflare Communications Inc.
|
||||
* Copyright 2019-2020 Xilinx Inc.
|
||||
*/
|
||||
|
||||
#ifndef EFX_MCDI_PORT_H
|
||||
#define EFX_MCDI_PORT_H
|
||||
|
||||
#include "net_driver.h"
|
||||
|
||||
bool efx_siena_mcdi_mac_check_fault(struct efx_nic *efx);
|
||||
int efx_siena_mcdi_port_probe(struct efx_nic *efx);
|
||||
void efx_siena_mcdi_port_remove(struct efx_nic *efx);
|
||||
|
||||
#endif /* EFX_MCDI_PORT_H */
|
1282
drivers/net/ethernet/sfc/siena/mcdi_port_common.c
Normal file
1282
drivers/net/ethernet/sfc/siena/mcdi_port_common.c
Normal file
File diff suppressed because it is too large
Load Diff
58
drivers/net/ethernet/sfc/siena/mcdi_port_common.h
Normal file
58
drivers/net/ethernet/sfc/siena/mcdi_port_common.h
Normal file
@ -0,0 +1,58 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2018 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
#ifndef EFX_MCDI_PORT_COMMON_H
|
||||
#define EFX_MCDI_PORT_COMMON_H
|
||||
|
||||
#include "net_driver.h"
|
||||
#include "mcdi.h"
|
||||
#include "mcdi_pcol.h"
|
||||
|
||||
struct efx_mcdi_phy_data {
|
||||
u32 flags;
|
||||
u32 type;
|
||||
u32 supported_cap;
|
||||
u32 channel;
|
||||
u32 port;
|
||||
u32 stats_mask;
|
||||
u8 name[20];
|
||||
u32 media;
|
||||
u32 mmd_mask;
|
||||
u8 revision[20];
|
||||
u32 forced_cap;
|
||||
};
|
||||
|
||||
void efx_siena_link_set_advertising(struct efx_nic *efx,
|
||||
const unsigned long *advertising);
|
||||
bool efx_siena_mcdi_phy_poll(struct efx_nic *efx);
|
||||
int efx_siena_mcdi_phy_probe(struct efx_nic *efx);
|
||||
void efx_siena_mcdi_phy_remove(struct efx_nic *efx);
|
||||
void efx_siena_mcdi_phy_get_link_ksettings(struct efx_nic *efx,
|
||||
struct ethtool_link_ksettings *cmd);
|
||||
int efx_siena_mcdi_phy_set_link_ksettings(struct efx_nic *efx,
|
||||
const struct ethtool_link_ksettings *cmd);
|
||||
int efx_siena_mcdi_phy_get_fecparam(struct efx_nic *efx,
|
||||
struct ethtool_fecparam *fec);
|
||||
int efx_siena_mcdi_phy_set_fecparam(struct efx_nic *efx,
|
||||
const struct ethtool_fecparam *fec);
|
||||
int efx_siena_mcdi_phy_test_alive(struct efx_nic *efx);
|
||||
int efx_siena_mcdi_port_reconfigure(struct efx_nic *efx);
|
||||
int efx_siena_mcdi_phy_run_tests(struct efx_nic *efx, int *results,
|
||||
unsigned int flags);
|
||||
const char *efx_siena_mcdi_phy_test_name(struct efx_nic *efx,
|
||||
unsigned int index);
|
||||
int efx_siena_mcdi_phy_get_module_eeprom(struct efx_nic *efx,
|
||||
struct ethtool_eeprom *ee, u8 *data);
|
||||
int efx_siena_mcdi_phy_get_module_info(struct efx_nic *efx,
|
||||
struct ethtool_modinfo *modinfo);
|
||||
int efx_siena_mcdi_set_mac(struct efx_nic *efx);
|
||||
int efx_siena_mcdi_mac_init_stats(struct efx_nic *efx);
|
||||
void efx_siena_mcdi_mac_fini_stats(struct efx_nic *efx);
|
||||
|
||||
#endif
|
124
drivers/net/ethernet/sfc/siena/mtd.c
Normal file
124
drivers/net/ethernet/sfc/siena/mtd.c
Normal file
@ -0,0 +1,124 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2013 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/mtd/mtd.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
|
||||
#include "net_driver.h"
|
||||
#include "efx.h"
|
||||
|
||||
#define to_efx_mtd_partition(mtd) \
|
||||
container_of(mtd, struct efx_mtd_partition, mtd)
|
||||
|
||||
/* MTD interface */
|
||||
|
||||
static int efx_mtd_erase(struct mtd_info *mtd, struct erase_info *erase)
|
||||
{
|
||||
struct efx_nic *efx = mtd->priv;
|
||||
|
||||
return efx->type->mtd_erase(mtd, erase->addr, erase->len);
|
||||
}
|
||||
|
||||
static void efx_mtd_sync(struct mtd_info *mtd)
|
||||
{
|
||||
struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
|
||||
struct efx_nic *efx = mtd->priv;
|
||||
int rc;
|
||||
|
||||
rc = efx->type->mtd_sync(mtd);
|
||||
if (rc)
|
||||
pr_err("%s: %s sync failed (%d)\n",
|
||||
part->name, part->dev_type_name, rc);
|
||||
}
|
||||
|
||||
static void efx_siena_mtd_remove_partition(struct efx_mtd_partition *part)
|
||||
{
|
||||
int rc;
|
||||
|
||||
for (;;) {
|
||||
rc = mtd_device_unregister(&part->mtd);
|
||||
if (rc != -EBUSY)
|
||||
break;
|
||||
ssleep(1);
|
||||
}
|
||||
WARN_ON(rc);
|
||||
list_del(&part->node);
|
||||
}
|
||||
|
||||
int efx_siena_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
|
||||
size_t n_parts, size_t sizeof_part)
|
||||
{
|
||||
struct efx_mtd_partition *part;
|
||||
size_t i;
|
||||
|
||||
for (i = 0; i < n_parts; i++) {
|
||||
part = (struct efx_mtd_partition *)((char *)parts +
|
||||
i * sizeof_part);
|
||||
|
||||
part->mtd.writesize = 1;
|
||||
|
||||
if (!(part->mtd.flags & MTD_NO_ERASE))
|
||||
part->mtd.flags |= MTD_WRITEABLE;
|
||||
|
||||
part->mtd.owner = THIS_MODULE;
|
||||
part->mtd.priv = efx;
|
||||
part->mtd.name = part->name;
|
||||
part->mtd._erase = efx_mtd_erase;
|
||||
part->mtd._read = efx->type->mtd_read;
|
||||
part->mtd._write = efx->type->mtd_write;
|
||||
part->mtd._sync = efx_mtd_sync;
|
||||
|
||||
efx->type->mtd_rename(part);
|
||||
|
||||
if (mtd_device_register(&part->mtd, NULL, 0))
|
||||
goto fail;
|
||||
|
||||
/* Add to list in order - efx_siena_mtd_remove() depends on this */
|
||||
list_add_tail(&part->node, &efx->mtd_list);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
while (i--) {
|
||||
part = (struct efx_mtd_partition *)((char *)parts +
|
||||
i * sizeof_part);
|
||||
efx_siena_mtd_remove_partition(part);
|
||||
}
|
||||
/* Failure is unlikely here, but probably means we're out of memory */
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
void efx_siena_mtd_remove(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mtd_partition *parts, *part, *next;
|
||||
|
||||
WARN_ON(efx_dev_registered(efx));
|
||||
|
||||
if (list_empty(&efx->mtd_list))
|
||||
return;
|
||||
|
||||
parts = list_first_entry(&efx->mtd_list, struct efx_mtd_partition,
|
||||
node);
|
||||
|
||||
list_for_each_entry_safe(part, next, &efx->mtd_list, node)
|
||||
efx_siena_mtd_remove_partition(part);
|
||||
|
||||
kfree(parts);
|
||||
}
|
||||
|
||||
void efx_siena_mtd_rename(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mtd_partition *part;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
list_for_each_entry(part, &efx->mtd_list, node)
|
||||
efx->type->mtd_rename(part);
|
||||
}
|
1715
drivers/net/ethernet/sfc/siena/net_driver.h
Normal file
1715
drivers/net/ethernet/sfc/siena/net_driver.h
Normal file
File diff suppressed because it is too large
Load Diff
530
drivers/net/ethernet/sfc/siena/nic.c
Normal file
530
drivers/net/ethernet/sfc/siena/nic.c
Normal file
@ -0,0 +1,530 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2013 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/cpu_rmap.h>
|
||||
#include "net_driver.h"
|
||||
#include "bitfield.h"
|
||||
#include "efx.h"
|
||||
#include "nic.h"
|
||||
#include "farch_regs.h"
|
||||
#include "io.h"
|
||||
#include "workarounds.h"
|
||||
#include "mcdi_pcol.h"
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Generic buffer handling
|
||||
* These buffers are used for interrupt status, MAC stats, etc.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
int efx_siena_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
|
||||
unsigned int len, gfp_t gfp_flags)
|
||||
{
|
||||
buffer->addr = dma_alloc_coherent(&efx->pci_dev->dev, len,
|
||||
&buffer->dma_addr, gfp_flags);
|
||||
if (!buffer->addr)
|
||||
return -ENOMEM;
|
||||
buffer->len = len;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void efx_siena_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer)
|
||||
{
|
||||
if (buffer->addr) {
|
||||
dma_free_coherent(&efx->pci_dev->dev, buffer->len,
|
||||
buffer->addr, buffer->dma_addr);
|
||||
buffer->addr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Check whether an event is present in the eventq at the current
|
||||
* read pointer. Only useful for self-test.
|
||||
*/
|
||||
bool efx_siena_event_present(struct efx_channel *channel)
|
||||
{
|
||||
return efx_event_present(efx_event(channel, channel->eventq_read_ptr));
|
||||
}
|
||||
|
||||
void efx_siena_event_test_start(struct efx_channel *channel)
|
||||
{
|
||||
channel->event_test_cpu = -1;
|
||||
smp_wmb();
|
||||
channel->efx->type->ev_test_generate(channel);
|
||||
}
|
||||
|
||||
int efx_siena_irq_test_start(struct efx_nic *efx)
|
||||
{
|
||||
efx->last_irq_cpu = -1;
|
||||
smp_wmb();
|
||||
return efx->type->irq_test_generate(efx);
|
||||
}
|
||||
|
||||
/* Hook interrupt handler(s)
|
||||
* Try MSI and then legacy interrupts.
|
||||
*/
|
||||
int efx_siena_init_interrupt(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_channel *channel;
|
||||
unsigned int n_irqs;
|
||||
int rc;
|
||||
|
||||
if (!EFX_INT_MODE_USE_MSI(efx)) {
|
||||
rc = request_irq(efx->legacy_irq,
|
||||
efx->type->irq_handle_legacy, IRQF_SHARED,
|
||||
efx->name, efx);
|
||||
if (rc) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"failed to hook legacy IRQ %d\n",
|
||||
efx->pci_dev->irq);
|
||||
goto fail1;
|
||||
}
|
||||
efx->irqs_hooked = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
|
||||
efx->net_dev->rx_cpu_rmap =
|
||||
alloc_irq_cpu_rmap(efx->n_rx_channels);
|
||||
if (!efx->net_dev->rx_cpu_rmap) {
|
||||
rc = -ENOMEM;
|
||||
goto fail1;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Hook MSI or MSI-X interrupt */
|
||||
n_irqs = 0;
|
||||
efx_for_each_channel(channel, efx) {
|
||||
rc = request_irq(channel->irq, efx->type->irq_handle_msi,
|
||||
IRQF_PROBE_SHARED, /* Not shared */
|
||||
efx->msi_context[channel->channel].name,
|
||||
&efx->msi_context[channel->channel]);
|
||||
if (rc) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"failed to hook IRQ %d\n", channel->irq);
|
||||
goto fail2;
|
||||
}
|
||||
++n_irqs;
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
if (efx->interrupt_mode == EFX_INT_MODE_MSIX &&
|
||||
channel->channel < efx->n_rx_channels) {
|
||||
rc = irq_cpu_rmap_add(efx->net_dev->rx_cpu_rmap,
|
||||
channel->irq);
|
||||
if (rc)
|
||||
goto fail2;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
efx->irqs_hooked = true;
|
||||
return 0;
|
||||
|
||||
fail2:
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
|
||||
efx->net_dev->rx_cpu_rmap = NULL;
|
||||
#endif
|
||||
efx_for_each_channel(channel, efx) {
|
||||
if (n_irqs-- == 0)
|
||||
break;
|
||||
free_irq(channel->irq, &efx->msi_context[channel->channel]);
|
||||
}
|
||||
fail1:
|
||||
return rc;
|
||||
}
|
||||
|
||||
void efx_siena_fini_interrupt(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_channel *channel;
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
free_irq_cpu_rmap(efx->net_dev->rx_cpu_rmap);
|
||||
efx->net_dev->rx_cpu_rmap = NULL;
|
||||
#endif
|
||||
|
||||
if (!efx->irqs_hooked)
|
||||
return;
|
||||
if (EFX_INT_MODE_USE_MSI(efx)) {
|
||||
/* Disable MSI/MSI-X interrupts */
|
||||
efx_for_each_channel(channel, efx)
|
||||
free_irq(channel->irq,
|
||||
&efx->msi_context[channel->channel]);
|
||||
} else {
|
||||
/* Disable legacy interrupt */
|
||||
free_irq(efx->legacy_irq, efx);
|
||||
}
|
||||
efx->irqs_hooked = false;
|
||||
}
|
||||
|
||||
/* Register dump */
|
||||
|
||||
#define REGISTER_REVISION_FA 1
|
||||
#define REGISTER_REVISION_FB 2
|
||||
#define REGISTER_REVISION_FC 3
|
||||
#define REGISTER_REVISION_FZ 3 /* last Falcon arch revision */
|
||||
#define REGISTER_REVISION_ED 4
|
||||
#define REGISTER_REVISION_EZ 4 /* latest EF10 revision */
|
||||
|
||||
struct efx_nic_reg {
|
||||
u32 offset:24;
|
||||
u32 min_revision:3, max_revision:3;
|
||||
};
|
||||
|
||||
#define REGISTER(name, arch, min_rev, max_rev) { \
|
||||
arch ## R_ ## min_rev ## max_rev ## _ ## name, \
|
||||
REGISTER_REVISION_ ## arch ## min_rev, \
|
||||
REGISTER_REVISION_ ## arch ## max_rev \
|
||||
}
|
||||
#define REGISTER_AA(name) REGISTER(name, F, A, A)
|
||||
#define REGISTER_AB(name) REGISTER(name, F, A, B)
|
||||
#define REGISTER_AZ(name) REGISTER(name, F, A, Z)
|
||||
#define REGISTER_BB(name) REGISTER(name, F, B, B)
|
||||
#define REGISTER_BZ(name) REGISTER(name, F, B, Z)
|
||||
#define REGISTER_CZ(name) REGISTER(name, F, C, Z)
|
||||
|
||||
static const struct efx_nic_reg efx_nic_regs[] = {
|
||||
REGISTER_AZ(ADR_REGION),
|
||||
REGISTER_AZ(INT_EN_KER),
|
||||
REGISTER_BZ(INT_EN_CHAR),
|
||||
REGISTER_AZ(INT_ADR_KER),
|
||||
REGISTER_BZ(INT_ADR_CHAR),
|
||||
/* INT_ACK_KER is WO */
|
||||
/* INT_ISR0 is RC */
|
||||
REGISTER_AZ(HW_INIT),
|
||||
REGISTER_CZ(USR_EV_CFG),
|
||||
REGISTER_AB(EE_SPI_HCMD),
|
||||
REGISTER_AB(EE_SPI_HADR),
|
||||
REGISTER_AB(EE_SPI_HDATA),
|
||||
REGISTER_AB(EE_BASE_PAGE),
|
||||
REGISTER_AB(EE_VPD_CFG0),
|
||||
/* EE_VPD_SW_CNTL and EE_VPD_SW_DATA are not used */
|
||||
/* PMBX_DBG_IADDR and PBMX_DBG_IDATA are indirect */
|
||||
/* PCIE_CORE_INDIRECT is indirect */
|
||||
REGISTER_AB(NIC_STAT),
|
||||
REGISTER_AB(GPIO_CTL),
|
||||
REGISTER_AB(GLB_CTL),
|
||||
/* FATAL_INTR_KER and FATAL_INTR_CHAR are partly RC */
|
||||
REGISTER_BZ(DP_CTRL),
|
||||
REGISTER_AZ(MEM_STAT),
|
||||
REGISTER_AZ(CS_DEBUG),
|
||||
REGISTER_AZ(ALTERA_BUILD),
|
||||
REGISTER_AZ(CSR_SPARE),
|
||||
REGISTER_AB(PCIE_SD_CTL0123),
|
||||
REGISTER_AB(PCIE_SD_CTL45),
|
||||
REGISTER_AB(PCIE_PCS_CTL_STAT),
|
||||
/* DEBUG_DATA_OUT is not used */
|
||||
/* DRV_EV is WO */
|
||||
REGISTER_AZ(EVQ_CTL),
|
||||
REGISTER_AZ(EVQ_CNT1),
|
||||
REGISTER_AZ(EVQ_CNT2),
|
||||
REGISTER_AZ(BUF_TBL_CFG),
|
||||
REGISTER_AZ(SRM_RX_DC_CFG),
|
||||
REGISTER_AZ(SRM_TX_DC_CFG),
|
||||
REGISTER_AZ(SRM_CFG),
|
||||
/* BUF_TBL_UPD is WO */
|
||||
REGISTER_AZ(SRM_UPD_EVQ),
|
||||
REGISTER_AZ(SRAM_PARITY),
|
||||
REGISTER_AZ(RX_CFG),
|
||||
REGISTER_BZ(RX_FILTER_CTL),
|
||||
/* RX_FLUSH_DESCQ is WO */
|
||||
REGISTER_AZ(RX_DC_CFG),
|
||||
REGISTER_AZ(RX_DC_PF_WM),
|
||||
REGISTER_BZ(RX_RSS_TKEY),
|
||||
/* RX_NODESC_DROP is RC */
|
||||
REGISTER_AA(RX_SELF_RST),
|
||||
/* RX_DEBUG, RX_PUSH_DROP are not used */
|
||||
REGISTER_CZ(RX_RSS_IPV6_REG1),
|
||||
REGISTER_CZ(RX_RSS_IPV6_REG2),
|
||||
REGISTER_CZ(RX_RSS_IPV6_REG3),
|
||||
/* TX_FLUSH_DESCQ is WO */
|
||||
REGISTER_AZ(TX_DC_CFG),
|
||||
REGISTER_AA(TX_CHKSM_CFG),
|
||||
REGISTER_AZ(TX_CFG),
|
||||
/* TX_PUSH_DROP is not used */
|
||||
REGISTER_AZ(TX_RESERVED),
|
||||
REGISTER_BZ(TX_PACE),
|
||||
/* TX_PACE_DROP_QID is RC */
|
||||
REGISTER_BB(TX_VLAN),
|
||||
REGISTER_BZ(TX_IPFIL_PORTEN),
|
||||
REGISTER_AB(MD_TXD),
|
||||
REGISTER_AB(MD_RXD),
|
||||
REGISTER_AB(MD_CS),
|
||||
REGISTER_AB(MD_PHY_ADR),
|
||||
REGISTER_AB(MD_ID),
|
||||
/* MD_STAT is RC */
|
||||
REGISTER_AB(MAC_STAT_DMA),
|
||||
REGISTER_AB(MAC_CTRL),
|
||||
REGISTER_BB(GEN_MODE),
|
||||
REGISTER_AB(MAC_MC_HASH_REG0),
|
||||
REGISTER_AB(MAC_MC_HASH_REG1),
|
||||
REGISTER_AB(GM_CFG1),
|
||||
REGISTER_AB(GM_CFG2),
|
||||
/* GM_IPG and GM_HD are not used */
|
||||
REGISTER_AB(GM_MAX_FLEN),
|
||||
/* GM_TEST is not used */
|
||||
REGISTER_AB(GM_ADR1),
|
||||
REGISTER_AB(GM_ADR2),
|
||||
REGISTER_AB(GMF_CFG0),
|
||||
REGISTER_AB(GMF_CFG1),
|
||||
REGISTER_AB(GMF_CFG2),
|
||||
REGISTER_AB(GMF_CFG3),
|
||||
REGISTER_AB(GMF_CFG4),
|
||||
REGISTER_AB(GMF_CFG5),
|
||||
REGISTER_BB(TX_SRC_MAC_CTL),
|
||||
REGISTER_AB(XM_ADR_LO),
|
||||
REGISTER_AB(XM_ADR_HI),
|
||||
REGISTER_AB(XM_GLB_CFG),
|
||||
REGISTER_AB(XM_TX_CFG),
|
||||
REGISTER_AB(XM_RX_CFG),
|
||||
REGISTER_AB(XM_MGT_INT_MASK),
|
||||
REGISTER_AB(XM_FC),
|
||||
REGISTER_AB(XM_PAUSE_TIME),
|
||||
REGISTER_AB(XM_TX_PARAM),
|
||||
REGISTER_AB(XM_RX_PARAM),
|
||||
/* XM_MGT_INT_MSK (note no 'A') is RC */
|
||||
REGISTER_AB(XX_PWR_RST),
|
||||
REGISTER_AB(XX_SD_CTL),
|
||||
REGISTER_AB(XX_TXDRV_CTL),
|
||||
/* XX_PRBS_CTL, XX_PRBS_CHK and XX_PRBS_ERR are not used */
|
||||
/* XX_CORE_STAT is partly RC */
|
||||
};
|
||||
|
||||
struct efx_nic_reg_table {
|
||||
u32 offset:24;
|
||||
u32 min_revision:3, max_revision:3;
|
||||
u32 step:6, rows:21;
|
||||
};
|
||||
|
||||
#define REGISTER_TABLE_DIMENSIONS(_, offset, arch, min_rev, max_rev, step, rows) { \
|
||||
offset, \
|
||||
REGISTER_REVISION_ ## arch ## min_rev, \
|
||||
REGISTER_REVISION_ ## arch ## max_rev, \
|
||||
step, rows \
|
||||
}
|
||||
#define REGISTER_TABLE(name, arch, min_rev, max_rev) \
|
||||
REGISTER_TABLE_DIMENSIONS( \
|
||||
name, arch ## R_ ## min_rev ## max_rev ## _ ## name, \
|
||||
arch, min_rev, max_rev, \
|
||||
arch ## R_ ## min_rev ## max_rev ## _ ## name ## _STEP, \
|
||||
arch ## R_ ## min_rev ## max_rev ## _ ## name ## _ROWS)
|
||||
#define REGISTER_TABLE_AA(name) REGISTER_TABLE(name, F, A, A)
|
||||
#define REGISTER_TABLE_AZ(name) REGISTER_TABLE(name, F, A, Z)
|
||||
#define REGISTER_TABLE_BB(name) REGISTER_TABLE(name, F, B, B)
|
||||
#define REGISTER_TABLE_BZ(name) REGISTER_TABLE(name, F, B, Z)
|
||||
#define REGISTER_TABLE_BB_CZ(name) \
|
||||
REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, B, B, \
|
||||
FR_BZ_ ## name ## _STEP, \
|
||||
FR_BB_ ## name ## _ROWS), \
|
||||
REGISTER_TABLE_DIMENSIONS(name, FR_BZ_ ## name, F, C, Z, \
|
||||
FR_BZ_ ## name ## _STEP, \
|
||||
FR_CZ_ ## name ## _ROWS)
|
||||
#define REGISTER_TABLE_CZ(name) REGISTER_TABLE(name, F, C, Z)
|
||||
|
||||
static const struct efx_nic_reg_table efx_nic_reg_tables[] = {
|
||||
/* DRIVER is not used */
|
||||
/* EVQ_RPTR, TIMER_COMMAND, USR_EV and {RX,TX}_DESC_UPD are WO */
|
||||
REGISTER_TABLE_BB(TX_IPFIL_TBL),
|
||||
REGISTER_TABLE_BB(TX_SRC_MAC_TBL),
|
||||
REGISTER_TABLE_AA(RX_DESC_PTR_TBL_KER),
|
||||
REGISTER_TABLE_BB_CZ(RX_DESC_PTR_TBL),
|
||||
REGISTER_TABLE_AA(TX_DESC_PTR_TBL_KER),
|
||||
REGISTER_TABLE_BB_CZ(TX_DESC_PTR_TBL),
|
||||
REGISTER_TABLE_AA(EVQ_PTR_TBL_KER),
|
||||
REGISTER_TABLE_BB_CZ(EVQ_PTR_TBL),
|
||||
/* We can't reasonably read all of the buffer table (up to 8MB!).
|
||||
* However this driver will only use a few entries. Reading
|
||||
* 1K entries allows for some expansion of queue count and
|
||||
* size before we need to change the version. */
|
||||
REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL_KER, FR_AA_BUF_FULL_TBL_KER,
|
||||
F, A, A, 8, 1024),
|
||||
REGISTER_TABLE_DIMENSIONS(BUF_FULL_TBL, FR_BZ_BUF_FULL_TBL,
|
||||
F, B, Z, 8, 1024),
|
||||
REGISTER_TABLE_CZ(RX_MAC_FILTER_TBL0),
|
||||
REGISTER_TABLE_BB_CZ(TIMER_TBL),
|
||||
REGISTER_TABLE_BB_CZ(TX_PACE_TBL),
|
||||
REGISTER_TABLE_BZ(RX_INDIRECTION_TBL),
|
||||
/* TX_FILTER_TBL0 is huge and not used by this driver */
|
||||
REGISTER_TABLE_CZ(TX_MAC_FILTER_TBL0),
|
||||
REGISTER_TABLE_CZ(MC_TREG_SMEM),
|
||||
/* MSIX_PBA_TABLE is not mapped */
|
||||
/* SRM_DBG is not mapped (and is redundant with BUF_FLL_TBL) */
|
||||
REGISTER_TABLE_BZ(RX_FILTER_TBL0),
|
||||
};
|
||||
|
||||
size_t efx_siena_get_regs_len(struct efx_nic *efx)
|
||||
{
|
||||
const struct efx_nic_reg *reg;
|
||||
const struct efx_nic_reg_table *table;
|
||||
size_t len = 0;
|
||||
|
||||
for (reg = efx_nic_regs;
|
||||
reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
|
||||
reg++)
|
||||
if (efx->type->revision >= reg->min_revision &&
|
||||
efx->type->revision <= reg->max_revision)
|
||||
len += sizeof(efx_oword_t);
|
||||
|
||||
for (table = efx_nic_reg_tables;
|
||||
table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
|
||||
table++)
|
||||
if (efx->type->revision >= table->min_revision &&
|
||||
efx->type->revision <= table->max_revision)
|
||||
len += table->rows * min_t(size_t, table->step, 16);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
void efx_siena_get_regs(struct efx_nic *efx, void *buf)
|
||||
{
|
||||
const struct efx_nic_reg *reg;
|
||||
const struct efx_nic_reg_table *table;
|
||||
|
||||
for (reg = efx_nic_regs;
|
||||
reg < efx_nic_regs + ARRAY_SIZE(efx_nic_regs);
|
||||
reg++) {
|
||||
if (efx->type->revision >= reg->min_revision &&
|
||||
efx->type->revision <= reg->max_revision) {
|
||||
efx_reado(efx, (efx_oword_t *)buf, reg->offset);
|
||||
buf += sizeof(efx_oword_t);
|
||||
}
|
||||
}
|
||||
|
||||
for (table = efx_nic_reg_tables;
|
||||
table < efx_nic_reg_tables + ARRAY_SIZE(efx_nic_reg_tables);
|
||||
table++) {
|
||||
size_t size, i;
|
||||
|
||||
if (!(efx->type->revision >= table->min_revision &&
|
||||
efx->type->revision <= table->max_revision))
|
||||
continue;
|
||||
|
||||
size = min_t(size_t, table->step, 16);
|
||||
|
||||
for (i = 0; i < table->rows; i++) {
|
||||
switch (table->step) {
|
||||
case 4: /* 32-bit SRAM */
|
||||
efx_readd(efx, buf, table->offset + 4 * i);
|
||||
break;
|
||||
case 8: /* 64-bit SRAM */
|
||||
efx_sram_readq(efx,
|
||||
efx->membase + table->offset,
|
||||
buf, i);
|
||||
break;
|
||||
case 16: /* 128-bit-readable register */
|
||||
efx_reado_table(efx, buf, table->offset, i);
|
||||
break;
|
||||
case 32: /* 128-bit register, interleaved */
|
||||
efx_reado_table(efx, buf, table->offset, 2 * i);
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
buf += size;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_siena_describe_stats - Describe supported statistics for ethtool
|
||||
* @desc: Array of &struct efx_hw_stat_desc describing the statistics
|
||||
* @count: Length of the @desc array
|
||||
* @mask: Bitmask of which elements of @desc are enabled
|
||||
* @names: Buffer to copy names to, or %NULL. The names are copied
|
||||
* starting at intervals of %ETH_GSTRING_LEN bytes.
|
||||
*
|
||||
* Returns the number of visible statistics, i.e. the number of set
|
||||
* bits in the first @count bits of @mask for which a name is defined.
|
||||
*/
|
||||
size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
|
||||
const unsigned long *mask, u8 *names)
|
||||
{
|
||||
size_t visible = 0;
|
||||
size_t index;
|
||||
|
||||
for_each_set_bit(index, mask, count) {
|
||||
if (desc[index].name) {
|
||||
if (names) {
|
||||
strlcpy(names, desc[index].name,
|
||||
ETH_GSTRING_LEN);
|
||||
names += ETH_GSTRING_LEN;
|
||||
}
|
||||
++visible;
|
||||
}
|
||||
}
|
||||
|
||||
return visible;
|
||||
}
|
||||
|
||||
/**
|
||||
* efx_siena_update_stats - Convert statistics DMA buffer to array of u64
|
||||
* @desc: Array of &struct efx_hw_stat_desc describing the DMA buffer
|
||||
* layout. DMA widths of 0, 16, 32 and 64 are supported; where
|
||||
* the width is specified as 0 the corresponding element of
|
||||
* @stats is not updated.
|
||||
* @count: Length of the @desc array
|
||||
* @mask: Bitmask of which elements of @desc are enabled
|
||||
* @stats: Buffer to update with the converted statistics. The length
|
||||
* of this array must be at least @count.
|
||||
* @dma_buf: DMA buffer containing hardware statistics
|
||||
* @accumulate: If set, the converted values will be added rather than
|
||||
* directly stored to the corresponding elements of @stats
|
||||
*/
|
||||
void efx_siena_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
|
||||
const unsigned long *mask,
|
||||
u64 *stats, const void *dma_buf, bool accumulate)
|
||||
{
|
||||
size_t index;
|
||||
|
||||
for_each_set_bit(index, mask, count) {
|
||||
if (desc[index].dma_width) {
|
||||
const void *addr = dma_buf + desc[index].offset;
|
||||
u64 val;
|
||||
|
||||
switch (desc[index].dma_width) {
|
||||
case 16:
|
||||
val = le16_to_cpup((__le16 *)addr);
|
||||
break;
|
||||
case 32:
|
||||
val = le32_to_cpup((__le32 *)addr);
|
||||
break;
|
||||
case 64:
|
||||
val = le64_to_cpup((__le64 *)addr);
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
val = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
if (accumulate)
|
||||
stats[index] += val;
|
||||
else
|
||||
stats[index] = val;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void efx_siena_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops)
|
||||
{
|
||||
/* if down, or this is the first update after coming up */
|
||||
if (!(efx->net_dev->flags & IFF_UP) || !efx->rx_nodesc_drops_prev_state)
|
||||
efx->rx_nodesc_drops_while_down +=
|
||||
*rx_nodesc_drops - efx->rx_nodesc_drops_total;
|
||||
efx->rx_nodesc_drops_total = *rx_nodesc_drops;
|
||||
efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
|
||||
*rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
|
||||
}
|
206
drivers/net/ethernet/sfc/siena/nic.h
Normal file
206
drivers/net/ethernet/sfc/siena/nic.h
Normal file
@ -0,0 +1,206 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2013 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#ifndef EFX_NIC_H
|
||||
#define EFX_NIC_H
|
||||
|
||||
#include "nic_common.h"
|
||||
#include "efx.h"
|
||||
|
||||
u32 efx_farch_fpga_ver(struct efx_nic *efx);
|
||||
|
||||
enum {
|
||||
PHY_TYPE_NONE = 0,
|
||||
PHY_TYPE_TXC43128 = 1,
|
||||
PHY_TYPE_88E1111 = 2,
|
||||
PHY_TYPE_SFX7101 = 3,
|
||||
PHY_TYPE_QT2022C2 = 4,
|
||||
PHY_TYPE_PM8358 = 6,
|
||||
PHY_TYPE_SFT9001A = 8,
|
||||
PHY_TYPE_QT2025C = 9,
|
||||
PHY_TYPE_SFT9001B = 10,
|
||||
};
|
||||
|
||||
enum {
|
||||
SIENA_STAT_tx_bytes = GENERIC_STAT_COUNT,
|
||||
SIENA_STAT_tx_good_bytes,
|
||||
SIENA_STAT_tx_bad_bytes,
|
||||
SIENA_STAT_tx_packets,
|
||||
SIENA_STAT_tx_bad,
|
||||
SIENA_STAT_tx_pause,
|
||||
SIENA_STAT_tx_control,
|
||||
SIENA_STAT_tx_unicast,
|
||||
SIENA_STAT_tx_multicast,
|
||||
SIENA_STAT_tx_broadcast,
|
||||
SIENA_STAT_tx_lt64,
|
||||
SIENA_STAT_tx_64,
|
||||
SIENA_STAT_tx_65_to_127,
|
||||
SIENA_STAT_tx_128_to_255,
|
||||
SIENA_STAT_tx_256_to_511,
|
||||
SIENA_STAT_tx_512_to_1023,
|
||||
SIENA_STAT_tx_1024_to_15xx,
|
||||
SIENA_STAT_tx_15xx_to_jumbo,
|
||||
SIENA_STAT_tx_gtjumbo,
|
||||
SIENA_STAT_tx_collision,
|
||||
SIENA_STAT_tx_single_collision,
|
||||
SIENA_STAT_tx_multiple_collision,
|
||||
SIENA_STAT_tx_excessive_collision,
|
||||
SIENA_STAT_tx_deferred,
|
||||
SIENA_STAT_tx_late_collision,
|
||||
SIENA_STAT_tx_excessive_deferred,
|
||||
SIENA_STAT_tx_non_tcpudp,
|
||||
SIENA_STAT_tx_mac_src_error,
|
||||
SIENA_STAT_tx_ip_src_error,
|
||||
SIENA_STAT_rx_bytes,
|
||||
SIENA_STAT_rx_good_bytes,
|
||||
SIENA_STAT_rx_bad_bytes,
|
||||
SIENA_STAT_rx_packets,
|
||||
SIENA_STAT_rx_good,
|
||||
SIENA_STAT_rx_bad,
|
||||
SIENA_STAT_rx_pause,
|
||||
SIENA_STAT_rx_control,
|
||||
SIENA_STAT_rx_unicast,
|
||||
SIENA_STAT_rx_multicast,
|
||||
SIENA_STAT_rx_broadcast,
|
||||
SIENA_STAT_rx_lt64,
|
||||
SIENA_STAT_rx_64,
|
||||
SIENA_STAT_rx_65_to_127,
|
||||
SIENA_STAT_rx_128_to_255,
|
||||
SIENA_STAT_rx_256_to_511,
|
||||
SIENA_STAT_rx_512_to_1023,
|
||||
SIENA_STAT_rx_1024_to_15xx,
|
||||
SIENA_STAT_rx_15xx_to_jumbo,
|
||||
SIENA_STAT_rx_gtjumbo,
|
||||
SIENA_STAT_rx_bad_gtjumbo,
|
||||
SIENA_STAT_rx_overflow,
|
||||
SIENA_STAT_rx_false_carrier,
|
||||
SIENA_STAT_rx_symbol_error,
|
||||
SIENA_STAT_rx_align_error,
|
||||
SIENA_STAT_rx_length_error,
|
||||
SIENA_STAT_rx_internal_error,
|
||||
SIENA_STAT_rx_nodesc_drop_cnt,
|
||||
SIENA_STAT_COUNT
|
||||
};
|
||||
|
||||
/**
|
||||
* struct siena_nic_data - Siena NIC state
|
||||
* @efx: Pointer back to main interface structure
|
||||
* @wol_filter_id: Wake-on-LAN packet filter id
|
||||
* @stats: Hardware statistics
|
||||
* @vf: Array of &struct siena_vf objects
|
||||
* @vf_buftbl_base: The zeroth buffer table index used to back VF queues.
|
||||
* @vfdi_status: Common VFDI status page to be dmad to VF address space.
|
||||
* @local_addr_list: List of local addresses. Protected by %local_lock.
|
||||
* @local_page_list: List of DMA addressable pages used to broadcast
|
||||
* %local_addr_list. Protected by %local_lock.
|
||||
* @local_lock: Mutex protecting %local_addr_list and %local_page_list.
|
||||
* @peer_work: Work item to broadcast peer addresses to VMs.
|
||||
*/
|
||||
struct siena_nic_data {
|
||||
struct efx_nic *efx;
|
||||
int wol_filter_id;
|
||||
u64 stats[SIENA_STAT_COUNT];
|
||||
#ifdef CONFIG_SFC_SRIOV
|
||||
struct siena_vf *vf;
|
||||
struct efx_channel *vfdi_channel;
|
||||
unsigned vf_buftbl_base;
|
||||
struct efx_buffer vfdi_status;
|
||||
struct list_head local_addr_list;
|
||||
struct list_head local_page_list;
|
||||
struct mutex local_lock;
|
||||
struct work_struct peer_work;
|
||||
#endif
|
||||
};
|
||||
|
||||
extern const struct efx_nic_type siena_a0_nic_type;
|
||||
|
||||
int falcon_probe_board(struct efx_nic *efx, u16 revision_info);
|
||||
|
||||
/* Falcon/Siena queue operations */
|
||||
int efx_farch_tx_probe(struct efx_tx_queue *tx_queue);
|
||||
void efx_farch_tx_init(struct efx_tx_queue *tx_queue);
|
||||
void efx_farch_tx_fini(struct efx_tx_queue *tx_queue);
|
||||
void efx_farch_tx_remove(struct efx_tx_queue *tx_queue);
|
||||
void efx_farch_tx_write(struct efx_tx_queue *tx_queue);
|
||||
unsigned int efx_farch_tx_limit_len(struct efx_tx_queue *tx_queue,
|
||||
dma_addr_t dma_addr, unsigned int len);
|
||||
int efx_farch_rx_probe(struct efx_rx_queue *rx_queue);
|
||||
void efx_farch_rx_init(struct efx_rx_queue *rx_queue);
|
||||
void efx_farch_rx_fini(struct efx_rx_queue *rx_queue);
|
||||
void efx_farch_rx_remove(struct efx_rx_queue *rx_queue);
|
||||
void efx_farch_rx_write(struct efx_rx_queue *rx_queue);
|
||||
void efx_farch_rx_defer_refill(struct efx_rx_queue *rx_queue);
|
||||
int efx_farch_ev_probe(struct efx_channel *channel);
|
||||
int efx_farch_ev_init(struct efx_channel *channel);
|
||||
void efx_farch_ev_fini(struct efx_channel *channel);
|
||||
void efx_farch_ev_remove(struct efx_channel *channel);
|
||||
int efx_farch_ev_process(struct efx_channel *channel, int quota);
|
||||
void efx_farch_ev_read_ack(struct efx_channel *channel);
|
||||
void efx_farch_ev_test_generate(struct efx_channel *channel);
|
||||
|
||||
/* Falcon/Siena filter operations */
|
||||
int efx_farch_filter_table_probe(struct efx_nic *efx);
|
||||
void efx_farch_filter_table_restore(struct efx_nic *efx);
|
||||
void efx_farch_filter_table_remove(struct efx_nic *efx);
|
||||
void efx_farch_filter_update_rx_scatter(struct efx_nic *efx);
|
||||
s32 efx_farch_filter_insert(struct efx_nic *efx, struct efx_filter_spec *spec,
|
||||
bool replace);
|
||||
int efx_farch_filter_remove_safe(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority,
|
||||
u32 filter_id);
|
||||
int efx_farch_filter_get_safe(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority, u32 filter_id,
|
||||
struct efx_filter_spec *);
|
||||
int efx_farch_filter_clear_rx(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority);
|
||||
u32 efx_farch_filter_count_rx_used(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority);
|
||||
u32 efx_farch_filter_get_rx_id_limit(struct efx_nic *efx);
|
||||
s32 efx_farch_filter_get_rx_ids(struct efx_nic *efx,
|
||||
enum efx_filter_priority priority, u32 *buf,
|
||||
u32 size);
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
|
||||
unsigned int index);
|
||||
#endif
|
||||
void efx_farch_filter_sync_rx_mode(struct efx_nic *efx);
|
||||
|
||||
/* Falcon/Siena interrupts */
|
||||
void efx_farch_irq_enable_master(struct efx_nic *efx);
|
||||
int efx_farch_irq_test_generate(struct efx_nic *efx);
|
||||
void efx_farch_irq_disable_master(struct efx_nic *efx);
|
||||
irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id);
|
||||
irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id);
|
||||
irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx);
|
||||
|
||||
/* Global Resources */
|
||||
void efx_siena_prepare_flush(struct efx_nic *efx);
|
||||
int efx_farch_fini_dmaq(struct efx_nic *efx);
|
||||
void efx_farch_finish_flr(struct efx_nic *efx);
|
||||
void siena_finish_flush(struct efx_nic *efx);
|
||||
void falcon_start_nic_stats(struct efx_nic *efx);
|
||||
void falcon_stop_nic_stats(struct efx_nic *efx);
|
||||
int falcon_reset_xaui(struct efx_nic *efx);
|
||||
void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
|
||||
void efx_farch_init_common(struct efx_nic *efx);
|
||||
void efx_farch_rx_push_indir_table(struct efx_nic *efx);
|
||||
void efx_farch_rx_pull_indir_table(struct efx_nic *efx);
|
||||
|
||||
/* Tests */
|
||||
struct efx_farch_register_test {
|
||||
unsigned address;
|
||||
efx_oword_t mask;
|
||||
};
|
||||
|
||||
int efx_farch_test_registers(struct efx_nic *efx,
|
||||
const struct efx_farch_register_test *regs,
|
||||
size_t n_regs);
|
||||
|
||||
void efx_farch_generate_event(struct efx_nic *efx, unsigned int evq,
|
||||
efx_qword_t *event);
|
||||
|
||||
#endif /* EFX_NIC_H */
|
251
drivers/net/ethernet/sfc/siena/nic_common.h
Normal file
251
drivers/net/ethernet/sfc/siena/nic_common.h
Normal file
@ -0,0 +1,251 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2013 Solarflare Communications Inc.
|
||||
* Copyright 2019-2020 Xilinx Inc.
|
||||
*/
|
||||
|
||||
#ifndef EFX_NIC_COMMON_H
|
||||
#define EFX_NIC_COMMON_H
|
||||
|
||||
#include "net_driver.h"
|
||||
#include "efx_common.h"
|
||||
#include "mcdi.h"
|
||||
#include "ptp.h"
|
||||
|
||||
enum {
|
||||
/* Revisions 0-2 were Falcon A0, A1 and B0 respectively.
|
||||
* They are not supported by this driver but these revision numbers
|
||||
* form part of the ethtool API for register dumping.
|
||||
*/
|
||||
EFX_REV_SIENA_A0 = 3,
|
||||
EFX_REV_HUNT_A0 = 4,
|
||||
EFX_REV_EF100 = 5,
|
||||
};
|
||||
|
||||
static inline int efx_nic_rev(struct efx_nic *efx)
|
||||
{
|
||||
return efx->type->revision;
|
||||
}
|
||||
|
||||
/* Read the current event from the event queue */
|
||||
static inline efx_qword_t *efx_event(struct efx_channel *channel,
|
||||
unsigned int index)
|
||||
{
|
||||
return ((efx_qword_t *) (channel->eventq.buf.addr)) +
|
||||
(index & channel->eventq_mask);
|
||||
}
|
||||
|
||||
/* See if an event is present
|
||||
*
|
||||
* We check both the high and low dword of the event for all ones. We
|
||||
* wrote all ones when we cleared the event, and no valid event can
|
||||
* have all ones in either its high or low dwords. This approach is
|
||||
* robust against reordering.
|
||||
*
|
||||
* Note that using a single 64-bit comparison is incorrect; even
|
||||
* though the CPU read will be atomic, the DMA write may not be.
|
||||
*/
|
||||
static inline int efx_event_present(efx_qword_t *event)
|
||||
{
|
||||
return !(EFX_DWORD_IS_ALL_ONES(event->dword[0]) |
|
||||
EFX_DWORD_IS_ALL_ONES(event->dword[1]));
|
||||
}
|
||||
|
||||
/* Returns a pointer to the specified transmit descriptor in the TX
|
||||
* descriptor queue belonging to the specified channel.
|
||||
*/
|
||||
static inline efx_qword_t *
|
||||
efx_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index)
|
||||
{
|
||||
return ((efx_qword_t *) (tx_queue->txd.buf.addr)) + index;
|
||||
}
|
||||
|
||||
/* Report whether this TX queue would be empty for the given write_count.
|
||||
* May return false negative.
|
||||
*/
|
||||
static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, unsigned int write_count)
|
||||
{
|
||||
unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count);
|
||||
|
||||
if (empty_read_count == 0)
|
||||
return false;
|
||||
|
||||
return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
|
||||
}
|
||||
|
||||
/* Decide whether to push a TX descriptor to the NIC vs merely writing
|
||||
* the doorbell. This can reduce latency when we are adding a single
|
||||
* descriptor to an empty queue, but is otherwise pointless. Further,
|
||||
* Falcon and Siena have hardware bugs (SF bug 33851) that may be
|
||||
* triggered if we don't check this.
|
||||
* We use the write_count used for the last doorbell push, to get the
|
||||
* NIC's view of the tx queue.
|
||||
*/
|
||||
static inline bool efx_nic_may_push_tx_desc(struct efx_tx_queue *tx_queue,
|
||||
unsigned int write_count)
|
||||
{
|
||||
bool was_empty = efx_nic_tx_is_empty(tx_queue, write_count);
|
||||
|
||||
tx_queue->empty_read_count = 0;
|
||||
return was_empty && tx_queue->write_count - write_count == 1;
|
||||
}
|
||||
|
||||
/* Returns a pointer to the specified descriptor in the RX descriptor queue */
|
||||
static inline efx_qword_t *
|
||||
efx_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
|
||||
{
|
||||
return ((efx_qword_t *) (rx_queue->rxd.buf.addr)) + index;
|
||||
}
|
||||
|
||||
/* Alignment of PCIe DMA boundaries (4KB) */
|
||||
#define EFX_PAGE_SIZE 4096
|
||||
/* Size and alignment of buffer table entries (same) */
|
||||
#define EFX_BUF_SIZE EFX_PAGE_SIZE
|
||||
|
||||
/* NIC-generic software stats */
|
||||
enum {
|
||||
GENERIC_STAT_rx_noskb_drops,
|
||||
GENERIC_STAT_rx_nodesc_trunc,
|
||||
GENERIC_STAT_COUNT
|
||||
};
|
||||
|
||||
#define EFX_GENERIC_SW_STAT(ext_name) \
|
||||
[GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
|
||||
|
||||
/* TX data path */
|
||||
static inline int efx_nic_probe_tx(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
return tx_queue->efx->type->tx_probe(tx_queue);
|
||||
}
|
||||
static inline void efx_nic_init_tx(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
tx_queue->efx->type->tx_init(tx_queue);
|
||||
}
|
||||
static inline void efx_nic_remove_tx(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
if (tx_queue->efx->type->tx_remove)
|
||||
tx_queue->efx->type->tx_remove(tx_queue);
|
||||
}
|
||||
static inline void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
tx_queue->efx->type->tx_write(tx_queue);
|
||||
}
|
||||
|
||||
/* RX data path */
|
||||
static inline int efx_nic_probe_rx(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
return rx_queue->efx->type->rx_probe(rx_queue);
|
||||
}
|
||||
static inline void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
rx_queue->efx->type->rx_init(rx_queue);
|
||||
}
|
||||
static inline void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
rx_queue->efx->type->rx_remove(rx_queue);
|
||||
}
|
||||
static inline void efx_nic_notify_rx_desc(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
rx_queue->efx->type->rx_write(rx_queue);
|
||||
}
|
||||
static inline void efx_nic_generate_fill_event(struct efx_rx_queue *rx_queue)
|
||||
{
|
||||
rx_queue->efx->type->rx_defer_refill(rx_queue);
|
||||
}
|
||||
|
||||
/* Event data path */
|
||||
static inline int efx_nic_probe_eventq(struct efx_channel *channel)
|
||||
{
|
||||
return channel->efx->type->ev_probe(channel);
|
||||
}
|
||||
static inline int efx_nic_init_eventq(struct efx_channel *channel)
|
||||
{
|
||||
return channel->efx->type->ev_init(channel);
|
||||
}
|
||||
static inline void efx_nic_fini_eventq(struct efx_channel *channel)
|
||||
{
|
||||
channel->efx->type->ev_fini(channel);
|
||||
}
|
||||
static inline void efx_nic_remove_eventq(struct efx_channel *channel)
|
||||
{
|
||||
channel->efx->type->ev_remove(channel);
|
||||
}
|
||||
static inline int
|
||||
efx_nic_process_eventq(struct efx_channel *channel, int quota)
|
||||
{
|
||||
return channel->efx->type->ev_process(channel, quota);
|
||||
}
|
||||
static inline void efx_nic_eventq_read_ack(struct efx_channel *channel)
|
||||
{
|
||||
channel->efx->type->ev_read_ack(channel);
|
||||
}
|
||||
|
||||
void efx_siena_event_test_start(struct efx_channel *channel);
|
||||
|
||||
bool efx_siena_event_present(struct efx_channel *channel);
|
||||
|
||||
static inline void efx_sensor_event(struct efx_nic *efx, efx_qword_t *ev)
|
||||
{
|
||||
if (efx->type->sensor_event)
|
||||
efx->type->sensor_event(efx, ev);
|
||||
}
|
||||
|
||||
static inline unsigned int efx_rx_recycle_ring_size(const struct efx_nic *efx)
|
||||
{
|
||||
return efx->type->rx_recycle_ring_size(efx);
|
||||
}
|
||||
|
||||
/* Some statistics are computed as A - B where A and B each increase
|
||||
* linearly with some hardware counter(s) and the counters are read
|
||||
* asynchronously. If the counters contributing to B are always read
|
||||
* after those contributing to A, the computed value may be lower than
|
||||
* the true value by some variable amount, and may decrease between
|
||||
* subsequent computations.
|
||||
*
|
||||
* We should never allow statistics to decrease or to exceed the true
|
||||
* value. Since the computed value will never be greater than the
|
||||
* true value, we can achieve this by only storing the computed value
|
||||
* when it increases.
|
||||
*/
|
||||
static inline void efx_update_diff_stat(u64 *stat, u64 diff)
|
||||
{
|
||||
if ((s64)(diff - *stat) > 0)
|
||||
*stat = diff;
|
||||
}
|
||||
|
||||
/* Interrupts */
|
||||
int efx_siena_init_interrupt(struct efx_nic *efx);
|
||||
int efx_siena_irq_test_start(struct efx_nic *efx);
|
||||
void efx_siena_fini_interrupt(struct efx_nic *efx);
|
||||
|
||||
static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel)
|
||||
{
|
||||
return READ_ONCE(channel->event_test_cpu);
|
||||
}
|
||||
static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx)
|
||||
{
|
||||
return READ_ONCE(efx->last_irq_cpu);
|
||||
}
|
||||
|
||||
/* Global Resources */
|
||||
int efx_siena_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
|
||||
unsigned int len, gfp_t gfp_flags);
|
||||
void efx_siena_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer);
|
||||
|
||||
size_t efx_siena_get_regs_len(struct efx_nic *efx);
|
||||
void efx_siena_get_regs(struct efx_nic *efx, void *buf);
|
||||
|
||||
#define EFX_MC_STATS_GENERATION_INVALID ((__force __le64)(-1))
|
||||
|
||||
size_t efx_siena_describe_stats(const struct efx_hw_stat_desc *desc, size_t count,
|
||||
const unsigned long *mask, u8 *names);
|
||||
void efx_siena_update_stats(const struct efx_hw_stat_desc *desc, size_t count,
|
||||
const unsigned long *mask, u64 *stats,
|
||||
const void *dma_buf, bool accumulate);
|
||||
void efx_siena_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *stat);
|
||||
|
||||
#define EFX_MAX_FLUSH_TIME 5000
|
||||
|
||||
#endif /* EFX_NIC_COMMON_H */
|
2200
drivers/net/ethernet/sfc/siena/ptp.c
Normal file
2200
drivers/net/ethernet/sfc/siena/ptp.c
Normal file
File diff suppressed because it is too large
Load Diff
45
drivers/net/ethernet/sfc/siena/ptp.h
Normal file
45
drivers/net/ethernet/sfc/siena/ptp.h
Normal file
@ -0,0 +1,45 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2013 Solarflare Communications Inc.
|
||||
* Copyright 2019-2020 Xilinx Inc.
|
||||
*/
|
||||
|
||||
#ifndef EFX_PTP_H
|
||||
#define EFX_PTP_H
|
||||
|
||||
#include <linux/net_tstamp.h>
|
||||
#include "net_driver.h"
|
||||
|
||||
struct ethtool_ts_info;
|
||||
void efx_siena_ptp_defer_probe_with_channel(struct efx_nic *efx);
|
||||
struct efx_channel *efx_siena_ptp_channel(struct efx_nic *efx);
|
||||
int efx_siena_ptp_set_ts_config(struct efx_nic *efx, struct ifreq *ifr);
|
||||
int efx_siena_ptp_get_ts_config(struct efx_nic *efx, struct ifreq *ifr);
|
||||
void efx_siena_ptp_get_ts_info(struct efx_nic *efx,
|
||||
struct ethtool_ts_info *ts_info);
|
||||
bool efx_siena_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
|
||||
int efx_siena_ptp_get_mode(struct efx_nic *efx);
|
||||
int efx_siena_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
|
||||
unsigned int new_mode);
|
||||
int efx_siena_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
|
||||
void efx_siena_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
|
||||
size_t efx_siena_ptp_describe_stats(struct efx_nic *efx, u8 *strings);
|
||||
size_t efx_siena_ptp_update_stats(struct efx_nic *efx, u64 *stats);
|
||||
void efx_siena_time_sync_event(struct efx_channel *channel, efx_qword_t *ev);
|
||||
void __efx_siena_rx_skb_attach_timestamp(struct efx_channel *channel,
|
||||
struct sk_buff *skb);
|
||||
static inline void efx_rx_skb_attach_timestamp(struct efx_channel *channel,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (channel->sync_events_state == SYNC_EVENTS_VALID)
|
||||
__efx_siena_rx_skb_attach_timestamp(channel, skb);
|
||||
}
|
||||
|
||||
void efx_siena_ptp_start_datapath(struct efx_nic *efx);
|
||||
void efx_siena_ptp_stop_datapath(struct efx_nic *efx);
|
||||
bool efx_siena_ptp_use_mac_tx_timestamps(struct efx_nic *efx);
|
||||
ktime_t efx_siena_ptp_nic_to_kernel_time(struct efx_tx_queue *tx_queue);
|
||||
|
||||
#endif /* EFX_PTP_H */
|
400
drivers/net/ethernet/sfc/siena/rx.c
Normal file
400
drivers/net/ethernet/sfc/siena/rx.c
Normal file
@ -0,0 +1,400 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2005-2013 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#include <linux/socket.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/ipv6.h>
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/checksum.h>
|
||||
#include <net/xdp.h>
|
||||
#include <linux/bpf_trace.h>
|
||||
#include "net_driver.h"
|
||||
#include "efx.h"
|
||||
#include "rx_common.h"
|
||||
#include "filter.h"
|
||||
#include "nic.h"
|
||||
#include "selftest.h"
|
||||
#include "workarounds.h"
|
||||
|
||||
/* Preferred number of descriptors to fill at once */
|
||||
#define EFX_RX_PREFERRED_BATCH 8U
|
||||
|
||||
/* Maximum rx prefix used by any architecture. */
|
||||
#define EFX_MAX_RX_PREFIX_SIZE 16
|
||||
|
||||
/* Size of buffer allocated for skb header area. */
|
||||
#define EFX_SKB_HEADERS 128u
|
||||
|
||||
/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
|
||||
#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
|
||||
EFX_RX_USR_BUF_SIZE)
|
||||
|
||||
static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
int len)
|
||||
{
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding;
|
||||
|
||||
if (likely(len <= max_len))
|
||||
return;
|
||||
|
||||
/* The packet must be discarded, but this is only a fatal error
|
||||
* if the caller indicated it was
|
||||
*/
|
||||
rx_buf->flags |= EFX_RX_PKT_DISCARD;
|
||||
|
||||
if (net_ratelimit())
|
||||
netif_err(efx, rx_err, efx->net_dev,
|
||||
"RX queue %d overlength RX event (%#x > %#x)\n",
|
||||
efx_rx_queue_index(rx_queue), len, max_len);
|
||||
|
||||
efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
|
||||
}
|
||||
|
||||
/* Allocate and construct an SKB around page fragments */
|
||||
static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int n_frags,
|
||||
u8 *eh, int hdr_len)
|
||||
{
|
||||
struct efx_nic *efx = channel->efx;
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* Allocate an SKB to store the headers */
|
||||
skb = netdev_alloc_skb(efx->net_dev,
|
||||
efx->rx_ip_align + efx->rx_prefix_size +
|
||||
hdr_len);
|
||||
if (unlikely(skb == NULL)) {
|
||||
atomic_inc(&efx->n_rx_noskb_drops);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
EFX_WARN_ON_ONCE_PARANOID(rx_buf->len < hdr_len);
|
||||
|
||||
memcpy(skb->data + efx->rx_ip_align, eh - efx->rx_prefix_size,
|
||||
efx->rx_prefix_size + hdr_len);
|
||||
skb_reserve(skb, efx->rx_ip_align + efx->rx_prefix_size);
|
||||
__skb_put(skb, hdr_len);
|
||||
|
||||
/* Append the remaining page(s) onto the frag list */
|
||||
if (rx_buf->len > hdr_len) {
|
||||
rx_buf->page_offset += hdr_len;
|
||||
rx_buf->len -= hdr_len;
|
||||
|
||||
for (;;) {
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
|
||||
rx_buf->page, rx_buf->page_offset,
|
||||
rx_buf->len, efx->rx_buffer_truesize);
|
||||
rx_buf->page = NULL;
|
||||
|
||||
if (skb_shinfo(skb)->nr_frags == n_frags)
|
||||
break;
|
||||
|
||||
rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
|
||||
}
|
||||
} else {
|
||||
__free_pages(rx_buf->page, efx->rx_buffer_order);
|
||||
rx_buf->page = NULL;
|
||||
n_frags = 0;
|
||||
}
|
||||
|
||||
/* Move past the ethernet header */
|
||||
skb->protocol = eth_type_trans(skb, efx->net_dev);
|
||||
|
||||
skb_mark_napi_id(skb, &channel->napi_str);
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
void efx_siena_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
|
||||
unsigned int n_frags, unsigned int len, u16 flags)
|
||||
{
|
||||
struct efx_nic *efx = rx_queue->efx;
|
||||
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
|
||||
struct efx_rx_buffer *rx_buf;
|
||||
|
||||
rx_queue->rx_packets++;
|
||||
|
||||
rx_buf = efx_rx_buffer(rx_queue, index);
|
||||
rx_buf->flags |= flags;
|
||||
|
||||
/* Validate the number of fragments and completed length */
|
||||
if (n_frags == 1) {
|
||||
if (!(flags & EFX_RX_PKT_PREFIX_LEN))
|
||||
efx_rx_packet__check_len(rx_queue, rx_buf, len);
|
||||
} else if (unlikely(n_frags > EFX_RX_MAX_FRAGS) ||
|
||||
unlikely(len <= (n_frags - 1) * efx->rx_dma_len) ||
|
||||
unlikely(len > n_frags * efx->rx_dma_len) ||
|
||||
unlikely(!efx->rx_scatter)) {
|
||||
/* If this isn't an explicit discard request, either
|
||||
* the hardware or the driver is broken.
|
||||
*/
|
||||
WARN_ON(!(len == 0 && rx_buf->flags & EFX_RX_PKT_DISCARD));
|
||||
rx_buf->flags |= EFX_RX_PKT_DISCARD;
|
||||
}
|
||||
|
||||
netif_vdbg(efx, rx_status, efx->net_dev,
|
||||
"RX queue %d received ids %x-%x len %d %s%s\n",
|
||||
efx_rx_queue_index(rx_queue), index,
|
||||
(index + n_frags - 1) & rx_queue->ptr_mask, len,
|
||||
(rx_buf->flags & EFX_RX_PKT_CSUMMED) ? " [SUMMED]" : "",
|
||||
(rx_buf->flags & EFX_RX_PKT_DISCARD) ? " [DISCARD]" : "");
|
||||
|
||||
/* Discard packet, if instructed to do so. Process the
|
||||
* previous receive first.
|
||||
*/
|
||||
if (unlikely(rx_buf->flags & EFX_RX_PKT_DISCARD)) {
|
||||
efx_rx_flush_packet(channel);
|
||||
efx_siena_discard_rx_packet(channel, rx_buf, n_frags);
|
||||
return;
|
||||
}
|
||||
|
||||
if (n_frags == 1 && !(flags & EFX_RX_PKT_PREFIX_LEN))
|
||||
rx_buf->len = len;
|
||||
|
||||
/* Release and/or sync the DMA mapping - assumes all RX buffers
|
||||
* consumed in-order per RX queue.
|
||||
*/
|
||||
efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
|
||||
|
||||
/* Prefetch nice and early so data will (hopefully) be in cache by
|
||||
* the time we look at it.
|
||||
*/
|
||||
prefetch(efx_rx_buf_va(rx_buf));
|
||||
|
||||
rx_buf->page_offset += efx->rx_prefix_size;
|
||||
rx_buf->len -= efx->rx_prefix_size;
|
||||
|
||||
if (n_frags > 1) {
|
||||
/* Release/sync DMA mapping for additional fragments.
|
||||
* Fix length for last fragment.
|
||||
*/
|
||||
unsigned int tail_frags = n_frags - 1;
|
||||
|
||||
for (;;) {
|
||||
rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
|
||||
if (--tail_frags == 0)
|
||||
break;
|
||||
efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
|
||||
}
|
||||
rx_buf->len = len - (n_frags - 1) * efx->rx_dma_len;
|
||||
efx_sync_rx_buffer(efx, rx_buf, rx_buf->len);
|
||||
}
|
||||
|
||||
/* All fragments have been DMA-synced, so recycle pages. */
|
||||
rx_buf = efx_rx_buffer(rx_queue, index);
|
||||
efx_siena_recycle_rx_pages(channel, rx_buf, n_frags);
|
||||
|
||||
/* Pipeline receives so that we give time for packet headers to be
|
||||
* prefetched into cache.
|
||||
*/
|
||||
efx_rx_flush_packet(channel);
|
||||
channel->rx_pkt_n_frags = n_frags;
|
||||
channel->rx_pkt_index = index;
|
||||
}
|
||||
|
||||
static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int n_frags)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
u16 hdr_len = min_t(u16, rx_buf->len, EFX_SKB_HEADERS);
|
||||
|
||||
skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
|
||||
if (unlikely(skb == NULL)) {
|
||||
struct efx_rx_queue *rx_queue;
|
||||
|
||||
rx_queue = efx_channel_get_rx_queue(channel);
|
||||
efx_siena_free_rx_buffers(rx_queue, rx_buf, n_frags);
|
||||
return;
|
||||
}
|
||||
skb_record_rx_queue(skb, channel->rx_queue.core_index);
|
||||
|
||||
/* Set the SKB flags */
|
||||
skb_checksum_none_assert(skb);
|
||||
if (likely(rx_buf->flags & EFX_RX_PKT_CSUMMED)) {
|
||||
skb->ip_summed = CHECKSUM_UNNECESSARY;
|
||||
skb->csum_level = !!(rx_buf->flags & EFX_RX_PKT_CSUM_LEVEL);
|
||||
}
|
||||
|
||||
efx_rx_skb_attach_timestamp(channel, skb);
|
||||
|
||||
if (channel->type->receive_skb)
|
||||
if (channel->type->receive_skb(channel, skb))
|
||||
return;
|
||||
|
||||
/* Pass the packet up */
|
||||
if (channel->rx_list != NULL)
|
||||
/* Add to list, will pass up later */
|
||||
list_add_tail(&skb->list, channel->rx_list);
|
||||
else
|
||||
/* No list, so pass it up now */
|
||||
netif_receive_skb(skb);
|
||||
}
|
||||
|
||||
/** efx_do_xdp: perform XDP processing on a received packet
|
||||
*
|
||||
* Returns true if packet should still be delivered.
|
||||
*/
|
||||
static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
|
||||
struct efx_rx_buffer *rx_buf, u8 **ehp)
|
||||
{
|
||||
u8 rx_prefix[EFX_MAX_RX_PREFIX_SIZE];
|
||||
struct efx_rx_queue *rx_queue;
|
||||
struct bpf_prog *xdp_prog;
|
||||
struct xdp_frame *xdpf;
|
||||
struct xdp_buff xdp;
|
||||
u32 xdp_act;
|
||||
s16 offset;
|
||||
int err;
|
||||
|
||||
xdp_prog = rcu_dereference_bh(efx->xdp_prog);
|
||||
if (!xdp_prog)
|
||||
return true;
|
||||
|
||||
rx_queue = efx_channel_get_rx_queue(channel);
|
||||
|
||||
if (unlikely(channel->rx_pkt_n_frags > 1)) {
|
||||
/* We can't do XDP on fragmented packets - drop. */
|
||||
efx_siena_free_rx_buffers(rx_queue, rx_buf,
|
||||
channel->rx_pkt_n_frags);
|
||||
if (net_ratelimit())
|
||||
netif_err(efx, rx_err, efx->net_dev,
|
||||
"XDP is not possible with multiple receive fragments (%d)\n",
|
||||
channel->rx_pkt_n_frags);
|
||||
channel->n_rx_xdp_bad_drops++;
|
||||
return false;
|
||||
}
|
||||
|
||||
dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr,
|
||||
rx_buf->len, DMA_FROM_DEVICE);
|
||||
|
||||
/* Save the rx prefix. */
|
||||
EFX_WARN_ON_PARANOID(efx->rx_prefix_size > EFX_MAX_RX_PREFIX_SIZE);
|
||||
memcpy(rx_prefix, *ehp - efx->rx_prefix_size,
|
||||
efx->rx_prefix_size);
|
||||
|
||||
xdp_init_buff(&xdp, efx->rx_page_buf_step, &rx_queue->xdp_rxq_info);
|
||||
/* No support yet for XDP metadata */
|
||||
xdp_prepare_buff(&xdp, *ehp - EFX_XDP_HEADROOM, EFX_XDP_HEADROOM,
|
||||
rx_buf->len, false);
|
||||
|
||||
xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp);
|
||||
|
||||
offset = (u8 *)xdp.data - *ehp;
|
||||
|
||||
switch (xdp_act) {
|
||||
case XDP_PASS:
|
||||
/* Fix up rx prefix. */
|
||||
if (offset) {
|
||||
*ehp += offset;
|
||||
rx_buf->page_offset += offset;
|
||||
rx_buf->len -= offset;
|
||||
memcpy(*ehp - efx->rx_prefix_size, rx_prefix,
|
||||
efx->rx_prefix_size);
|
||||
}
|
||||
break;
|
||||
|
||||
case XDP_TX:
|
||||
/* Buffer ownership passes to tx on success. */
|
||||
xdpf = xdp_convert_buff_to_frame(&xdp);
|
||||
err = efx_siena_xdp_tx_buffers(efx, 1, &xdpf, true);
|
||||
if (unlikely(err != 1)) {
|
||||
efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
|
||||
if (net_ratelimit())
|
||||
netif_err(efx, rx_err, efx->net_dev,
|
||||
"XDP TX failed (%d)\n", err);
|
||||
channel->n_rx_xdp_bad_drops++;
|
||||
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
|
||||
} else {
|
||||
channel->n_rx_xdp_tx++;
|
||||
}
|
||||
break;
|
||||
|
||||
case XDP_REDIRECT:
|
||||
err = xdp_do_redirect(efx->net_dev, &xdp, xdp_prog);
|
||||
if (unlikely(err)) {
|
||||
efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
|
||||
if (net_ratelimit())
|
||||
netif_err(efx, rx_err, efx->net_dev,
|
||||
"XDP redirect failed (%d)\n", err);
|
||||
channel->n_rx_xdp_bad_drops++;
|
||||
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
|
||||
} else {
|
||||
channel->n_rx_xdp_redirect++;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
bpf_warn_invalid_xdp_action(efx->net_dev, xdp_prog, xdp_act);
|
||||
efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
|
||||
channel->n_rx_xdp_bad_drops++;
|
||||
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
|
||||
break;
|
||||
|
||||
case XDP_ABORTED:
|
||||
trace_xdp_exception(efx->net_dev, xdp_prog, xdp_act);
|
||||
fallthrough;
|
||||
case XDP_DROP:
|
||||
efx_siena_free_rx_buffers(rx_queue, rx_buf, 1);
|
||||
channel->n_rx_xdp_drops++;
|
||||
break;
|
||||
}
|
||||
|
||||
return xdp_act == XDP_PASS;
|
||||
}
|
||||
|
||||
/* Handle a received packet. Second half: Touches packet payload. */
|
||||
void __efx_siena_rx_packet(struct efx_channel *channel)
|
||||
{
|
||||
struct efx_nic *efx = channel->efx;
|
||||
struct efx_rx_buffer *rx_buf =
|
||||
efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
|
||||
u8 *eh = efx_rx_buf_va(rx_buf);
|
||||
|
||||
/* Read length from the prefix if necessary. This already
|
||||
* excludes the length of the prefix itself.
|
||||
*/
|
||||
if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
|
||||
rx_buf->len = le16_to_cpup((__le16 *)
|
||||
(eh + efx->rx_packet_len_offset));
|
||||
|
||||
/* If we're in loopback test, then pass the packet directly to the
|
||||
* loopback layer, and free the rx_buf here
|
||||
*/
|
||||
if (unlikely(efx->loopback_selftest)) {
|
||||
struct efx_rx_queue *rx_queue;
|
||||
|
||||
efx_siena_loopback_rx_packet(efx, eh, rx_buf->len);
|
||||
rx_queue = efx_channel_get_rx_queue(channel);
|
||||
efx_siena_free_rx_buffers(rx_queue, rx_buf,
|
||||
channel->rx_pkt_n_frags);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!efx_do_xdp(efx, channel, rx_buf, &eh))
|
||||
goto out;
|
||||
|
||||
if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
|
||||
rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
|
||||
|
||||
if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
|
||||
efx_siena_rx_packet_gro(channel, rx_buf,
|
||||
channel->rx_pkt_n_frags, eh, 0);
|
||||
else
|
||||
efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
|
||||
out:
|
||||
channel->rx_pkt_n_frags = 0;
|
||||
}
|
1094
drivers/net/ethernet/sfc/siena/rx_common.c
Normal file
1094
drivers/net/ethernet/sfc/siena/rx_common.c
Normal file
File diff suppressed because it is too large
Load Diff
110
drivers/net/ethernet/sfc/siena/rx_common.h
Normal file
110
drivers/net/ethernet/sfc/siena/rx_common.h
Normal file
@ -0,0 +1,110 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2018 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_RX_COMMON_H
|
||||
#define EFX_RX_COMMON_H
|
||||
|
||||
/* Preferred number of descriptors to fill at once */
|
||||
#define EFX_RX_PREFERRED_BATCH 8U
|
||||
|
||||
/* Each packet can consume up to ceil(max_frame_len / buffer_size) buffers */
|
||||
#define EFX_RX_MAX_FRAGS DIV_ROUND_UP(EFX_MAX_FRAME_LEN(EFX_MAX_MTU), \
|
||||
EFX_RX_USR_BUF_SIZE)
|
||||
|
||||
/* Number of RX buffers to recycle pages for. When creating the RX page recycle
|
||||
* ring, this number is divided by the number of buffers per page to calculate
|
||||
* the number of pages to store in the RX page recycle ring.
|
||||
*/
|
||||
#define EFX_RECYCLE_RING_SIZE_10G 256
|
||||
|
||||
static inline u8 *efx_rx_buf_va(struct efx_rx_buffer *buf)
|
||||
{
|
||||
return page_address(buf->page) + buf->page_offset;
|
||||
}
|
||||
|
||||
static inline u32 efx_rx_buf_hash(struct efx_nic *efx, const u8 *eh)
|
||||
{
|
||||
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
|
||||
return __le32_to_cpup((const __le32 *)(eh + efx->rx_packet_hash_offset));
|
||||
#else
|
||||
const u8 *data = eh + efx->rx_packet_hash_offset;
|
||||
|
||||
return (u32)data[0] |
|
||||
(u32)data[1] << 8 |
|
||||
(u32)data[2] << 16 |
|
||||
(u32)data[3] << 24;
|
||||
#endif
|
||||
}
|
||||
|
||||
void efx_siena_rx_slow_fill(struct timer_list *t);
|
||||
|
||||
void efx_siena_recycle_rx_pages(struct efx_channel *channel,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int n_frags);
|
||||
void efx_siena_discard_rx_packet(struct efx_channel *channel,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int n_frags);
|
||||
|
||||
int efx_siena_probe_rx_queue(struct efx_rx_queue *rx_queue);
|
||||
void efx_siena_init_rx_queue(struct efx_rx_queue *rx_queue);
|
||||
void efx_siena_fini_rx_queue(struct efx_rx_queue *rx_queue);
|
||||
void efx_siena_remove_rx_queue(struct efx_rx_queue *rx_queue);
|
||||
|
||||
static inline void efx_sync_rx_buffer(struct efx_nic *efx,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int len)
|
||||
{
|
||||
dma_sync_single_for_cpu(&efx->pci_dev->dev, rx_buf->dma_addr, len,
|
||||
DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
void efx_siena_free_rx_buffers(struct efx_rx_queue *rx_queue,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int num_bufs);
|
||||
|
||||
void efx_siena_rx_config_page_split(struct efx_nic *efx);
|
||||
void efx_siena_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
|
||||
bool atomic);
|
||||
|
||||
void
|
||||
efx_siena_rx_packet_gro(struct efx_channel *channel,
|
||||
struct efx_rx_buffer *rx_buf,
|
||||
unsigned int n_frags, u8 *eh, __wsum csum);
|
||||
|
||||
struct efx_rss_context *efx_siena_alloc_rss_context_entry(struct efx_nic *efx);
|
||||
struct efx_rss_context *efx_siena_find_rss_context_entry(struct efx_nic *efx,
|
||||
u32 id);
|
||||
void efx_siena_free_rss_context_entry(struct efx_rss_context *ctx);
|
||||
void efx_siena_set_default_rx_indir_table(struct efx_nic *efx,
|
||||
struct efx_rss_context *ctx);
|
||||
|
||||
bool efx_siena_filter_is_mc_recipient(const struct efx_filter_spec *spec);
|
||||
bool efx_siena_filter_spec_equal(const struct efx_filter_spec *left,
|
||||
const struct efx_filter_spec *right);
|
||||
u32 efx_siena_filter_spec_hash(const struct efx_filter_spec *spec);
|
||||
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
bool efx_siena_rps_check_rule(struct efx_arfs_rule *rule,
|
||||
unsigned int filter_idx, bool *force);
|
||||
struct efx_arfs_rule *efx_siena_rps_hash_find(struct efx_nic *efx,
|
||||
const struct efx_filter_spec *spec);
|
||||
void efx_siena_rps_hash_del(struct efx_nic *efx,
|
||||
const struct efx_filter_spec *spec);
|
||||
|
||||
int efx_siena_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
|
||||
u16 rxq_index, u32 flow_id);
|
||||
bool __efx_siena_filter_rfs_expire(struct efx_channel *channel,
|
||||
unsigned int quota);
|
||||
#endif
|
||||
|
||||
int efx_siena_probe_filters(struct efx_nic *efx);
|
||||
void efx_siena_remove_filters(struct efx_nic *efx);
|
||||
|
||||
#endif
|
807
drivers/net/ethernet/sfc/siena/selftest.c
Normal file
807
drivers/net/ethernet/sfc/siena/selftest.c
Normal file
@ -0,0 +1,807 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2012 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/ethtool.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/udp.h>
|
||||
#include <linux/rtnetlink.h>
|
||||
#include <linux/slab.h>
|
||||
#include "net_driver.h"
|
||||
#include "efx.h"
|
||||
#include "efx_common.h"
|
||||
#include "efx_channels.h"
|
||||
#include "nic.h"
|
||||
#include "mcdi_port_common.h"
|
||||
#include "selftest.h"
|
||||
#include "workarounds.h"
|
||||
|
||||
/* IRQ latency can be enormous because:
|
||||
* - All IRQs may be disabled on a CPU for a *long* time by e.g. a
|
||||
* slow serial console or an old IDE driver doing error recovery
|
||||
* - The PREEMPT_RT patches mostly deal with this, but also allow a
|
||||
* tasklet or normal task to be given higher priority than our IRQ
|
||||
* threads
|
||||
* Try to avoid blaming the hardware for this.
|
||||
*/
|
||||
#define IRQ_TIMEOUT HZ
|
||||
|
||||
/*
|
||||
* Loopback test packet structure
|
||||
*
|
||||
* The self-test should stress every RSS vector, and unfortunately
|
||||
* Falcon only performs RSS on TCP/UDP packets.
|
||||
*/
|
||||
struct efx_loopback_payload {
|
||||
struct ethhdr header;
|
||||
struct iphdr ip;
|
||||
struct udphdr udp;
|
||||
__be16 iteration;
|
||||
char msg[64];
|
||||
} __packed;
|
||||
|
||||
/* Loopback test source MAC address */
|
||||
static const u8 payload_source[ETH_ALEN] __aligned(2) = {
|
||||
0x00, 0x0f, 0x53, 0x1b, 0x1b, 0x1b,
|
||||
};
|
||||
|
||||
static const char payload_msg[] =
|
||||
"Hello world! This is an Efx loopback test in progress!";
|
||||
|
||||
/* Interrupt mode names */
|
||||
static const unsigned int efx_siena_interrupt_mode_max = EFX_INT_MODE_MAX;
|
||||
static const char *const efx_siena_interrupt_mode_names[] = {
|
||||
[EFX_INT_MODE_MSIX] = "MSI-X",
|
||||
[EFX_INT_MODE_MSI] = "MSI",
|
||||
[EFX_INT_MODE_LEGACY] = "legacy",
|
||||
};
|
||||
#define INT_MODE(efx) \
|
||||
STRING_TABLE_LOOKUP(efx->interrupt_mode, efx_siena_interrupt_mode)
|
||||
|
||||
/**
|
||||
* struct efx_loopback_state - persistent state during a loopback selftest
|
||||
* @flush: Drop all packets in efx_siena_loopback_rx_packet
|
||||
* @packet_count: Number of packets being used in this test
|
||||
* @skbs: An array of skbs transmitted
|
||||
* @offload_csum: Checksums are being offloaded
|
||||
* @rx_good: RX good packet count
|
||||
* @rx_bad: RX bad packet count
|
||||
* @payload: Payload used in tests
|
||||
*/
|
||||
struct efx_loopback_state {
|
||||
bool flush;
|
||||
int packet_count;
|
||||
struct sk_buff **skbs;
|
||||
bool offload_csum;
|
||||
atomic_t rx_good;
|
||||
atomic_t rx_bad;
|
||||
struct efx_loopback_payload payload;
|
||||
};
|
||||
|
||||
/* How long to wait for all the packets to arrive (in ms) */
|
||||
#define LOOPBACK_TIMEOUT_MS 1000
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* MII, NVRAM and register tests
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
rc = efx_siena_mcdi_phy_test_alive(efx);
|
||||
tests->phy_alive = rc ? -1 : 1;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int efx_test_nvram(struct efx_nic *efx, struct efx_self_tests *tests)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (efx->type->test_nvram) {
|
||||
rc = efx->type->test_nvram(efx);
|
||||
if (rc == -EPERM)
|
||||
rc = 0;
|
||||
else
|
||||
tests->nvram = rc ? -1 : 1;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Interrupt and event queue testing
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
/* Test generation and receipt of interrupts */
|
||||
static int efx_test_interrupts(struct efx_nic *efx,
|
||||
struct efx_self_tests *tests)
|
||||
{
|
||||
unsigned long timeout, wait;
|
||||
int cpu;
|
||||
int rc;
|
||||
|
||||
netif_dbg(efx, drv, efx->net_dev, "testing interrupts\n");
|
||||
tests->interrupt = -1;
|
||||
|
||||
rc = efx_siena_irq_test_start(efx);
|
||||
if (rc == -ENOTSUPP) {
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"direct interrupt testing not supported\n");
|
||||
tests->interrupt = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
timeout = jiffies + IRQ_TIMEOUT;
|
||||
wait = 1;
|
||||
|
||||
/* Wait for arrival of test interrupt. */
|
||||
netif_dbg(efx, drv, efx->net_dev, "waiting for test interrupt\n");
|
||||
do {
|
||||
schedule_timeout_uninterruptible(wait);
|
||||
cpu = efx_nic_irq_test_irq_cpu(efx);
|
||||
if (cpu >= 0)
|
||||
goto success;
|
||||
wait *= 2;
|
||||
} while (time_before(jiffies, timeout));
|
||||
|
||||
netif_err(efx, drv, efx->net_dev, "timed out waiting for interrupt\n");
|
||||
return -ETIMEDOUT;
|
||||
|
||||
success:
|
||||
netif_dbg(efx, drv, efx->net_dev, "%s test interrupt seen on CPU%d\n",
|
||||
INT_MODE(efx), cpu);
|
||||
tests->interrupt = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Test generation and receipt of interrupting events */
|
||||
static int efx_test_eventq_irq(struct efx_nic *efx,
|
||||
struct efx_self_tests *tests)
|
||||
{
|
||||
struct efx_channel *channel;
|
||||
unsigned int read_ptr[EFX_MAX_CHANNELS];
|
||||
unsigned long napi_ran = 0, dma_pend = 0, int_pend = 0;
|
||||
unsigned long timeout, wait;
|
||||
|
||||
BUILD_BUG_ON(EFX_MAX_CHANNELS > BITS_PER_LONG);
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
read_ptr[channel->channel] = channel->eventq_read_ptr;
|
||||
set_bit(channel->channel, &dma_pend);
|
||||
set_bit(channel->channel, &int_pend);
|
||||
efx_siena_event_test_start(channel);
|
||||
}
|
||||
|
||||
timeout = jiffies + IRQ_TIMEOUT;
|
||||
wait = 1;
|
||||
|
||||
/* Wait for arrival of interrupts. NAPI processing may or may
|
||||
* not complete in time, but we can cope in any case.
|
||||
*/
|
||||
do {
|
||||
schedule_timeout_uninterruptible(wait);
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
efx_siena_stop_eventq(channel);
|
||||
if (channel->eventq_read_ptr !=
|
||||
read_ptr[channel->channel]) {
|
||||
set_bit(channel->channel, &napi_ran);
|
||||
clear_bit(channel->channel, &dma_pend);
|
||||
clear_bit(channel->channel, &int_pend);
|
||||
} else {
|
||||
if (efx_siena_event_present(channel))
|
||||
clear_bit(channel->channel, &dma_pend);
|
||||
if (efx_nic_event_test_irq_cpu(channel) >= 0)
|
||||
clear_bit(channel->channel, &int_pend);
|
||||
}
|
||||
efx_siena_start_eventq(channel);
|
||||
}
|
||||
|
||||
wait *= 2;
|
||||
} while ((dma_pend || int_pend) && time_before(jiffies, timeout));
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
bool dma_seen = !test_bit(channel->channel, &dma_pend);
|
||||
bool int_seen = !test_bit(channel->channel, &int_pend);
|
||||
|
||||
tests->eventq_dma[channel->channel] = dma_seen ? 1 : -1;
|
||||
tests->eventq_int[channel->channel] = int_seen ? 1 : -1;
|
||||
|
||||
if (dma_seen && int_seen) {
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"channel %d event queue passed (with%s NAPI)\n",
|
||||
channel->channel,
|
||||
test_bit(channel->channel, &napi_ran) ?
|
||||
"" : "out");
|
||||
} else {
|
||||
/* Report failure and whether either interrupt or DMA
|
||||
* worked
|
||||
*/
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"channel %d timed out waiting for event queue\n",
|
||||
channel->channel);
|
||||
if (int_seen)
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"channel %d saw interrupt "
|
||||
"during event queue test\n",
|
||||
channel->channel);
|
||||
if (dma_seen)
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"channel %d event was generated, but "
|
||||
"failed to trigger an interrupt\n",
|
||||
channel->channel);
|
||||
}
|
||||
}
|
||||
|
||||
return (dma_pend || int_pend) ? -ETIMEDOUT : 0;
|
||||
}
|
||||
|
||||
static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
|
||||
unsigned flags)
|
||||
{
|
||||
int rc;
|
||||
|
||||
mutex_lock(&efx->mac_lock);
|
||||
rc = efx_siena_mcdi_phy_run_tests(efx, tests->phy_ext, flags);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
if (rc == -EPERM)
|
||||
rc = 0;
|
||||
else
|
||||
netif_info(efx, drv, efx->net_dev,
|
||||
"%s phy selftest\n", rc ? "Failed" : "Passed");
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Loopback testing
|
||||
* NB Only one loopback test can be executing concurrently.
|
||||
*
|
||||
**************************************************************************/
|
||||
|
||||
/* Loopback test RX callback
|
||||
* This is called for each received packet during loopback testing.
|
||||
*/
|
||||
void efx_siena_loopback_rx_packet(struct efx_nic *efx,
|
||||
const char *buf_ptr, int pkt_len)
|
||||
{
|
||||
struct efx_loopback_state *state = efx->loopback_selftest;
|
||||
struct efx_loopback_payload *received;
|
||||
struct efx_loopback_payload *payload;
|
||||
|
||||
BUG_ON(!buf_ptr);
|
||||
|
||||
/* If we are just flushing, then drop the packet */
|
||||
if ((state == NULL) || state->flush)
|
||||
return;
|
||||
|
||||
payload = &state->payload;
|
||||
|
||||
received = (struct efx_loopback_payload *) buf_ptr;
|
||||
received->ip.saddr = payload->ip.saddr;
|
||||
if (state->offload_csum)
|
||||
received->ip.check = payload->ip.check;
|
||||
|
||||
/* Check that header exists */
|
||||
if (pkt_len < sizeof(received->header)) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"saw runt RX packet (length %d) in %s loopback "
|
||||
"test\n", pkt_len, LOOPBACK_MODE(efx));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Check that the ethernet header exists */
|
||||
if (memcmp(&received->header, &payload->header, ETH_HLEN) != 0) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"saw non-loopback RX packet in %s loopback test\n",
|
||||
LOOPBACK_MODE(efx));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Check packet length */
|
||||
if (pkt_len != sizeof(*payload)) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"saw incorrect RX packet length %d (wanted %d) in "
|
||||
"%s loopback test\n", pkt_len, (int)sizeof(*payload),
|
||||
LOOPBACK_MODE(efx));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Check that IP header matches */
|
||||
if (memcmp(&received->ip, &payload->ip, sizeof(payload->ip)) != 0) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"saw corrupted IP header in %s loopback test\n",
|
||||
LOOPBACK_MODE(efx));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Check that msg and padding matches */
|
||||
if (memcmp(&received->msg, &payload->msg, sizeof(received->msg)) != 0) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"saw corrupted RX packet in %s loopback test\n",
|
||||
LOOPBACK_MODE(efx));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Check that iteration matches */
|
||||
if (received->iteration != payload->iteration) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"saw RX packet from iteration %d (wanted %d) in "
|
||||
"%s loopback test\n", ntohs(received->iteration),
|
||||
ntohs(payload->iteration), LOOPBACK_MODE(efx));
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* Increase correct RX count */
|
||||
netif_vdbg(efx, drv, efx->net_dev,
|
||||
"got loopback RX in %s loopback test\n", LOOPBACK_MODE(efx));
|
||||
|
||||
atomic_inc(&state->rx_good);
|
||||
return;
|
||||
|
||||
err:
|
||||
#ifdef DEBUG
|
||||
if (atomic_read(&state->rx_bad) == 0) {
|
||||
netif_err(efx, drv, efx->net_dev, "received packet:\n");
|
||||
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
|
||||
buf_ptr, pkt_len, 0);
|
||||
netif_err(efx, drv, efx->net_dev, "expected packet:\n");
|
||||
print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 0x10, 1,
|
||||
&state->payload, sizeof(state->payload), 0);
|
||||
}
|
||||
#endif
|
||||
atomic_inc(&state->rx_bad);
|
||||
}
|
||||
|
||||
/* Initialise an efx_siena_selftest_state for a new iteration */
|
||||
static void efx_iterate_state(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_loopback_state *state = efx->loopback_selftest;
|
||||
struct net_device *net_dev = efx->net_dev;
|
||||
struct efx_loopback_payload *payload = &state->payload;
|
||||
|
||||
/* Initialise the layerII header */
|
||||
ether_addr_copy((u8 *)&payload->header.h_dest, net_dev->dev_addr);
|
||||
ether_addr_copy((u8 *)&payload->header.h_source, payload_source);
|
||||
payload->header.h_proto = htons(ETH_P_IP);
|
||||
|
||||
/* saddr set later and used as incrementing count */
|
||||
payload->ip.daddr = htonl(INADDR_LOOPBACK);
|
||||
payload->ip.ihl = 5;
|
||||
payload->ip.check = (__force __sum16) htons(0xdead);
|
||||
payload->ip.tot_len = htons(sizeof(*payload) - sizeof(struct ethhdr));
|
||||
payload->ip.version = IPVERSION;
|
||||
payload->ip.protocol = IPPROTO_UDP;
|
||||
|
||||
/* Initialise udp header */
|
||||
payload->udp.source = 0;
|
||||
payload->udp.len = htons(sizeof(*payload) - sizeof(struct ethhdr) -
|
||||
sizeof(struct iphdr));
|
||||
payload->udp.check = 0; /* checksum ignored */
|
||||
|
||||
/* Fill out payload */
|
||||
payload->iteration = htons(ntohs(payload->iteration) + 1);
|
||||
memcpy(&payload->msg, payload_msg, sizeof(payload_msg));
|
||||
|
||||
/* Fill out remaining state members */
|
||||
atomic_set(&state->rx_good, 0);
|
||||
atomic_set(&state->rx_bad, 0);
|
||||
smp_wmb();
|
||||
}
|
||||
|
||||
static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
struct efx_loopback_state *state = efx->loopback_selftest;
|
||||
struct efx_loopback_payload *payload;
|
||||
struct sk_buff *skb;
|
||||
int i;
|
||||
netdev_tx_t rc;
|
||||
|
||||
/* Transmit N copies of buffer */
|
||||
for (i = 0; i < state->packet_count; i++) {
|
||||
/* Allocate an skb, holding an extra reference for
|
||||
* transmit completion counting */
|
||||
skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
state->skbs[i] = skb;
|
||||
skb_get(skb);
|
||||
|
||||
/* Copy the payload in, incrementing the source address to
|
||||
* exercise the rss vectors */
|
||||
payload = skb_put(skb, sizeof(state->payload));
|
||||
memcpy(payload, &state->payload, sizeof(state->payload));
|
||||
payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));
|
||||
|
||||
/* Ensure everything we've written is visible to the
|
||||
* interrupt handler. */
|
||||
smp_wmb();
|
||||
|
||||
netif_tx_lock_bh(efx->net_dev);
|
||||
rc = efx_enqueue_skb(tx_queue, skb);
|
||||
netif_tx_unlock_bh(efx->net_dev);
|
||||
|
||||
if (rc != NETDEV_TX_OK) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"TX queue %d could not transmit packet %d of "
|
||||
"%d in %s loopback test\n", tx_queue->label,
|
||||
i + 1, state->packet_count,
|
||||
LOOPBACK_MODE(efx));
|
||||
|
||||
/* Defer cleaning up the other skbs for the caller */
|
||||
kfree_skb(skb);
|
||||
return -EPIPE;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int efx_poll_loopback(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_loopback_state *state = efx->loopback_selftest;
|
||||
|
||||
return atomic_read(&state->rx_good) == state->packet_count;
|
||||
}
|
||||
|
||||
static int efx_end_loopback(struct efx_tx_queue *tx_queue,
|
||||
struct efx_loopback_self_tests *lb_tests)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
struct efx_loopback_state *state = efx->loopback_selftest;
|
||||
struct sk_buff *skb;
|
||||
int tx_done = 0, rx_good, rx_bad;
|
||||
int i, rc = 0;
|
||||
|
||||
netif_tx_lock_bh(efx->net_dev);
|
||||
|
||||
/* Count the number of tx completions, and decrement the refcnt. Any
|
||||
* skbs not already completed will be free'd when the queue is flushed */
|
||||
for (i = 0; i < state->packet_count; i++) {
|
||||
skb = state->skbs[i];
|
||||
if (skb && !skb_shared(skb))
|
||||
++tx_done;
|
||||
dev_kfree_skb(skb);
|
||||
}
|
||||
|
||||
netif_tx_unlock_bh(efx->net_dev);
|
||||
|
||||
/* Check TX completion and received packet counts */
|
||||
rx_good = atomic_read(&state->rx_good);
|
||||
rx_bad = atomic_read(&state->rx_bad);
|
||||
if (tx_done != state->packet_count) {
|
||||
/* Don't free the skbs; they will be picked up on TX
|
||||
* overflow or channel teardown.
|
||||
*/
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"TX queue %d saw only %d out of an expected %d "
|
||||
"TX completion events in %s loopback test\n",
|
||||
tx_queue->label, tx_done, state->packet_count,
|
||||
LOOPBACK_MODE(efx));
|
||||
rc = -ETIMEDOUT;
|
||||
/* Allow to fall through so we see the RX errors as well */
|
||||
}
|
||||
|
||||
/* We may always be up to a flush away from our desired packet total */
|
||||
if (rx_good != state->packet_count) {
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"TX queue %d saw only %d out of an expected %d "
|
||||
"received packets in %s loopback test\n",
|
||||
tx_queue->label, rx_good, state->packet_count,
|
||||
LOOPBACK_MODE(efx));
|
||||
rc = -ETIMEDOUT;
|
||||
/* Fall through */
|
||||
}
|
||||
|
||||
/* Update loopback test structure */
|
||||
lb_tests->tx_sent[tx_queue->label] += state->packet_count;
|
||||
lb_tests->tx_done[tx_queue->label] += tx_done;
|
||||
lb_tests->rx_good += rx_good;
|
||||
lb_tests->rx_bad += rx_bad;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int
|
||||
efx_test_loopback(struct efx_tx_queue *tx_queue,
|
||||
struct efx_loopback_self_tests *lb_tests)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
struct efx_loopback_state *state = efx->loopback_selftest;
|
||||
int i, begin_rc, end_rc;
|
||||
|
||||
for (i = 0; i < 3; i++) {
|
||||
/* Determine how many packets to send */
|
||||
state->packet_count = efx->txq_entries / 3;
|
||||
state->packet_count = min(1 << (i << 2), state->packet_count);
|
||||
state->skbs = kcalloc(state->packet_count,
|
||||
sizeof(state->skbs[0]), GFP_KERNEL);
|
||||
if (!state->skbs)
|
||||
return -ENOMEM;
|
||||
state->flush = false;
|
||||
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"TX queue %d (hw %d) testing %s loopback with %d packets\n",
|
||||
tx_queue->label, tx_queue->queue, LOOPBACK_MODE(efx),
|
||||
state->packet_count);
|
||||
|
||||
efx_iterate_state(efx);
|
||||
begin_rc = efx_begin_loopback(tx_queue);
|
||||
|
||||
/* This will normally complete very quickly, but be
|
||||
* prepared to wait much longer. */
|
||||
msleep(1);
|
||||
if (!efx_poll_loopback(efx)) {
|
||||
msleep(LOOPBACK_TIMEOUT_MS);
|
||||
efx_poll_loopback(efx);
|
||||
}
|
||||
|
||||
end_rc = efx_end_loopback(tx_queue, lb_tests);
|
||||
kfree(state->skbs);
|
||||
|
||||
if (begin_rc || end_rc) {
|
||||
/* Wait a while to ensure there are no packets
|
||||
* floating around after a failure. */
|
||||
schedule_timeout_uninterruptible(HZ / 10);
|
||||
return begin_rc ? begin_rc : end_rc;
|
||||
}
|
||||
}
|
||||
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"TX queue %d passed %s loopback test with a burst length "
|
||||
"of %d packets\n", tx_queue->label, LOOPBACK_MODE(efx),
|
||||
state->packet_count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Wait for link up. On Falcon, we would prefer to rely on efx_monitor, but
|
||||
* any contention on the mac lock (via e.g. efx_mac_mcast_work) causes it
|
||||
* to delay and retry. Therefore, it's safer to just poll directly. Wait
|
||||
* for link up and any faults to dissipate. */
|
||||
static int efx_wait_for_link(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_link_state *link_state = &efx->link_state;
|
||||
int count, link_up_count = 0;
|
||||
bool link_up;
|
||||
|
||||
for (count = 0; count < 40; count++) {
|
||||
schedule_timeout_uninterruptible(HZ / 10);
|
||||
|
||||
if (efx->type->monitor != NULL) {
|
||||
mutex_lock(&efx->mac_lock);
|
||||
efx->type->monitor(efx);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
}
|
||||
|
||||
mutex_lock(&efx->mac_lock);
|
||||
link_up = link_state->up;
|
||||
if (link_up)
|
||||
link_up = !efx->type->check_mac_fault(efx);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
|
||||
if (link_up) {
|
||||
if (++link_up_count == 2)
|
||||
return 0;
|
||||
} else {
|
||||
link_up_count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
static int efx_test_loopbacks(struct efx_nic *efx, struct efx_self_tests *tests,
|
||||
unsigned int loopback_modes)
|
||||
{
|
||||
enum efx_loopback_mode mode;
|
||||
struct efx_loopback_state *state;
|
||||
struct efx_channel *channel =
|
||||
efx_get_channel(efx, efx->tx_channel_offset);
|
||||
struct efx_tx_queue *tx_queue;
|
||||
int rc = 0;
|
||||
|
||||
/* Set the port loopback_selftest member. From this point on
|
||||
* all received packets will be dropped. Mark the state as
|
||||
* "flushing" so all inflight packets are dropped */
|
||||
state = kzalloc(sizeof(*state), GFP_KERNEL);
|
||||
if (state == NULL)
|
||||
return -ENOMEM;
|
||||
BUG_ON(efx->loopback_selftest);
|
||||
state->flush = true;
|
||||
efx->loopback_selftest = state;
|
||||
|
||||
/* Test all supported loopback modes */
|
||||
for (mode = LOOPBACK_NONE; mode <= LOOPBACK_TEST_MAX; mode++) {
|
||||
if (!(loopback_modes & (1 << mode)))
|
||||
continue;
|
||||
|
||||
/* Move the port into the specified loopback mode. */
|
||||
state->flush = true;
|
||||
mutex_lock(&efx->mac_lock);
|
||||
efx->loopback_mode = mode;
|
||||
rc = __efx_siena_reconfigure_port(efx);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
if (rc) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"unable to move into %s loopback\n",
|
||||
LOOPBACK_MODE(efx));
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = efx_wait_for_link(efx);
|
||||
if (rc) {
|
||||
netif_err(efx, drv, efx->net_dev,
|
||||
"loopback %s never came up\n",
|
||||
LOOPBACK_MODE(efx));
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Test all enabled types of TX queue */
|
||||
efx_for_each_channel_tx_queue(tx_queue, channel) {
|
||||
state->offload_csum = (tx_queue->type &
|
||||
EFX_TXQ_TYPE_OUTER_CSUM);
|
||||
rc = efx_test_loopback(tx_queue,
|
||||
&tests->loopback[mode]);
|
||||
if (rc)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
/* Remove the flush. The caller will remove the loopback setting */
|
||||
state->flush = true;
|
||||
efx->loopback_selftest = NULL;
|
||||
wmb();
|
||||
kfree(state);
|
||||
|
||||
if (rc == -EPERM)
|
||||
rc = 0;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
*
|
||||
* Entry point
|
||||
*
|
||||
*************************************************************************/
|
||||
|
||||
int efx_siena_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
|
||||
unsigned int flags)
|
||||
{
|
||||
enum efx_loopback_mode loopback_mode = efx->loopback_mode;
|
||||
int phy_mode = efx->phy_mode;
|
||||
int rc_test = 0, rc_reset, rc;
|
||||
|
||||
efx_siena_selftest_async_cancel(efx);
|
||||
|
||||
/* Online (i.e. non-disruptive) testing
|
||||
* This checks interrupt generation, event delivery and PHY presence. */
|
||||
|
||||
rc = efx_test_phy_alive(efx, tests);
|
||||
if (rc && !rc_test)
|
||||
rc_test = rc;
|
||||
|
||||
rc = efx_test_nvram(efx, tests);
|
||||
if (rc && !rc_test)
|
||||
rc_test = rc;
|
||||
|
||||
rc = efx_test_interrupts(efx, tests);
|
||||
if (rc && !rc_test)
|
||||
rc_test = rc;
|
||||
|
||||
rc = efx_test_eventq_irq(efx, tests);
|
||||
if (rc && !rc_test)
|
||||
rc_test = rc;
|
||||
|
||||
if (rc_test)
|
||||
return rc_test;
|
||||
|
||||
if (!(flags & ETH_TEST_FL_OFFLINE))
|
||||
return efx_test_phy(efx, tests, flags);
|
||||
|
||||
/* Offline (i.e. disruptive) testing
|
||||
* This checks MAC and PHY loopback on the specified port. */
|
||||
|
||||
/* Detach the device so the kernel doesn't transmit during the
|
||||
* loopback test and the watchdog timeout doesn't fire.
|
||||
*/
|
||||
efx_device_detach_sync(efx);
|
||||
|
||||
if (efx->type->test_chip) {
|
||||
rc_reset = efx->type->test_chip(efx, tests);
|
||||
if (rc_reset) {
|
||||
netif_err(efx, hw, efx->net_dev,
|
||||
"Unable to recover from chip test\n");
|
||||
efx_siena_schedule_reset(efx, RESET_TYPE_DISABLE);
|
||||
return rc_reset;
|
||||
}
|
||||
|
||||
if ((tests->memory < 0 || tests->registers < 0) && !rc_test)
|
||||
rc_test = -EIO;
|
||||
}
|
||||
|
||||
/* Ensure that the phy is powered and out of loopback
|
||||
* for the bist and loopback tests */
|
||||
mutex_lock(&efx->mac_lock);
|
||||
efx->phy_mode &= ~PHY_MODE_LOW_POWER;
|
||||
efx->loopback_mode = LOOPBACK_NONE;
|
||||
__efx_siena_reconfigure_port(efx);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
|
||||
rc = efx_test_phy(efx, tests, flags);
|
||||
if (rc && !rc_test)
|
||||
rc_test = rc;
|
||||
|
||||
rc = efx_test_loopbacks(efx, tests, efx->loopback_modes);
|
||||
if (rc && !rc_test)
|
||||
rc_test = rc;
|
||||
|
||||
/* restore the PHY to the previous state */
|
||||
mutex_lock(&efx->mac_lock);
|
||||
efx->phy_mode = phy_mode;
|
||||
efx->loopback_mode = loopback_mode;
|
||||
__efx_siena_reconfigure_port(efx);
|
||||
mutex_unlock(&efx->mac_lock);
|
||||
|
||||
efx_device_attach_if_not_resetting(efx);
|
||||
|
||||
return rc_test;
|
||||
}
|
||||
|
||||
void efx_siena_selftest_async_start(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_channel *channel;
|
||||
|
||||
efx_for_each_channel(channel, efx)
|
||||
efx_siena_event_test_start(channel);
|
||||
schedule_delayed_work(&efx->selftest_work, IRQ_TIMEOUT);
|
||||
}
|
||||
|
||||
void efx_siena_selftest_async_cancel(struct efx_nic *efx)
|
||||
{
|
||||
cancel_delayed_work_sync(&efx->selftest_work);
|
||||
}
|
||||
|
||||
static void efx_siena_selftest_async_work(struct work_struct *data)
|
||||
{
|
||||
struct efx_nic *efx = container_of(data, struct efx_nic,
|
||||
selftest_work.work);
|
||||
struct efx_channel *channel;
|
||||
int cpu;
|
||||
|
||||
efx_for_each_channel(channel, efx) {
|
||||
cpu = efx_nic_event_test_irq_cpu(channel);
|
||||
if (cpu < 0)
|
||||
netif_err(efx, ifup, efx->net_dev,
|
||||
"channel %d failed to trigger an interrupt\n",
|
||||
channel->channel);
|
||||
else
|
||||
netif_dbg(efx, ifup, efx->net_dev,
|
||||
"channel %d triggered interrupt on CPU %d\n",
|
||||
channel->channel, cpu);
|
||||
}
|
||||
}
|
||||
|
||||
void efx_siena_selftest_async_init(struct efx_nic *efx)
|
||||
{
|
||||
INIT_DELAYED_WORK(&efx->selftest_work, efx_siena_selftest_async_work);
|
||||
}
|
52
drivers/net/ethernet/sfc/siena/selftest.h
Normal file
52
drivers/net/ethernet/sfc/siena/selftest.h
Normal file
@ -0,0 +1,52 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2012 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#ifndef EFX_SELFTEST_H
|
||||
#define EFX_SELFTEST_H
|
||||
|
||||
#include "net_driver.h"
|
||||
|
||||
/*
|
||||
* Self tests
|
||||
*/
|
||||
|
||||
struct efx_loopback_self_tests {
|
||||
int tx_sent[EFX_MAX_TXQ_PER_CHANNEL];
|
||||
int tx_done[EFX_MAX_TXQ_PER_CHANNEL];
|
||||
int rx_good;
|
||||
int rx_bad;
|
||||
};
|
||||
|
||||
#define EFX_MAX_PHY_TESTS 20
|
||||
|
||||
/* Efx self test results
|
||||
* For fields which are not counters, 1 indicates success and -1
|
||||
* indicates failure; 0 indicates test could not be run.
|
||||
*/
|
||||
struct efx_self_tests {
|
||||
/* online tests */
|
||||
int phy_alive;
|
||||
int nvram;
|
||||
int interrupt;
|
||||
int eventq_dma[EFX_MAX_CHANNELS];
|
||||
int eventq_int[EFX_MAX_CHANNELS];
|
||||
/* offline tests */
|
||||
int memory;
|
||||
int registers;
|
||||
int phy_ext[EFX_MAX_PHY_TESTS];
|
||||
struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
|
||||
};
|
||||
|
||||
void efx_siena_loopback_rx_packet(struct efx_nic *efx, const char *buf_ptr,
|
||||
int pkt_len);
|
||||
int efx_siena_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
|
||||
unsigned int flags);
|
||||
void efx_siena_selftest_async_init(struct efx_nic *efx);
|
||||
void efx_siena_selftest_async_start(struct efx_nic *efx);
|
||||
void efx_siena_selftest_async_cancel(struct efx_nic *efx);
|
||||
|
||||
#endif /* EFX_SELFTEST_H */
|
@ -40,7 +40,7 @@ static void siena_push_irq_moderation(struct efx_channel *channel)
|
||||
if (channel->irq_moderation_us) {
|
||||
unsigned int ticks;
|
||||
|
||||
ticks = efx_usecs_to_ticks(efx, channel->irq_moderation_us);
|
||||
ticks = efx_siena_usecs_to_ticks(efx, channel->irq_moderation_us);
|
||||
EFX_POPULATE_DWORD_2(timer_cmd,
|
||||
FRF_CZ_TC_TIMER_MODE,
|
||||
FFE_CZ_TIMER_MODE_INT_HLDOFF,
|
||||
@ -56,16 +56,16 @@ static void siena_push_irq_moderation(struct efx_channel *channel)
|
||||
channel->channel);
|
||||
}
|
||||
|
||||
void siena_prepare_flush(struct efx_nic *efx)
|
||||
void efx_siena_prepare_flush(struct efx_nic *efx)
|
||||
{
|
||||
if (efx->fc_disable++ == 0)
|
||||
efx_mcdi_set_mac(efx);
|
||||
efx_siena_mcdi_set_mac(efx);
|
||||
}
|
||||
|
||||
void siena_finish_flush(struct efx_nic *efx)
|
||||
{
|
||||
if (--efx->fc_disable == 0)
|
||||
efx_mcdi_set_mac(efx);
|
||||
efx_siena_mcdi_set_mac(efx);
|
||||
}
|
||||
|
||||
static const struct efx_farch_register_test siena_register_tests[] = {
|
||||
@ -102,12 +102,12 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
|
||||
enum reset_type reset_method = RESET_TYPE_ALL;
|
||||
int rc, rc2;
|
||||
|
||||
efx_reset_down(efx, reset_method);
|
||||
efx_siena_reset_down(efx, reset_method);
|
||||
|
||||
/* Reset the chip immediately so that it is completely
|
||||
* quiescent regardless of what any VF driver does.
|
||||
*/
|
||||
rc = efx_mcdi_reset(efx, reset_method);
|
||||
rc = efx_siena_mcdi_reset(efx, reset_method);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
@ -116,9 +116,9 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
|
||||
ARRAY_SIZE(siena_register_tests))
|
||||
? -1 : 1;
|
||||
|
||||
rc = efx_mcdi_reset(efx, reset_method);
|
||||
rc = efx_siena_mcdi_reset(efx, reset_method);
|
||||
out:
|
||||
rc2 = efx_reset_up(efx, reset_method, rc == 0);
|
||||
rc2 = efx_siena_reset_up(efx, reset_method, rc == 0);
|
||||
return rc ? rc : rc2;
|
||||
}
|
||||
|
||||
@ -143,27 +143,28 @@ static int siena_ptp_set_ts_config(struct efx_nic *efx,
|
||||
switch (init->rx_filter) {
|
||||
case HWTSTAMP_FILTER_NONE:
|
||||
/* if TX timestamping is still requested then leave PTP on */
|
||||
return efx_ptp_change_mode(efx,
|
||||
init->tx_type != HWTSTAMP_TX_OFF,
|
||||
efx_ptp_get_mode(efx));
|
||||
return efx_siena_ptp_change_mode(efx,
|
||||
init->tx_type != HWTSTAMP_TX_OFF,
|
||||
efx_siena_ptp_get_mode(efx));
|
||||
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
|
||||
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
|
||||
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
|
||||
init->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
|
||||
return efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V1);
|
||||
return efx_siena_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V1);
|
||||
case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
|
||||
case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
|
||||
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
|
||||
init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
|
||||
rc = efx_ptp_change_mode(efx, true,
|
||||
MC_CMD_PTP_MODE_V2_ENHANCED);
|
||||
rc = efx_siena_ptp_change_mode(efx, true,
|
||||
MC_CMD_PTP_MODE_V2_ENHANCED);
|
||||
/* bug 33070 - old versions of the firmware do not support the
|
||||
* improved UUID filtering option. Similarly old versions of the
|
||||
* application do not expect it to be enabled. If the firmware
|
||||
* does not accept the enhanced mode, fall back to the standard
|
||||
* PTP v2 UUID filtering. */
|
||||
if (rc != 0)
|
||||
rc = efx_ptp_change_mode(efx, true, MC_CMD_PTP_MODE_V2);
|
||||
rc = efx_siena_ptp_change_mode(efx, true,
|
||||
MC_CMD_PTP_MODE_V2);
|
||||
return rc;
|
||||
default:
|
||||
return -ERANGE;
|
||||
@ -222,7 +223,8 @@ static int siena_probe_nvconfig(struct efx_nic *efx)
|
||||
u32 caps = 0;
|
||||
int rc;
|
||||
|
||||
rc = efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL, &caps);
|
||||
rc = efx_siena_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL,
|
||||
&caps);
|
||||
|
||||
efx->timer_quantum_ns =
|
||||
(caps & (1 << MC_CMD_CAPABILITIES_TURBO_ACTIVE_LBN)) ?
|
||||
@ -285,12 +287,12 @@ static int siena_probe_nic(struct efx_nic *efx)
|
||||
efx_reado(efx, ®, FR_AZ_CS_DEBUG);
|
||||
efx->port_num = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
|
||||
|
||||
rc = efx_mcdi_init(efx);
|
||||
rc = efx_siena_mcdi_init(efx);
|
||||
if (rc)
|
||||
goto fail1;
|
||||
|
||||
/* Now we can reset the NIC */
|
||||
rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
|
||||
rc = efx_siena_mcdi_reset(efx, RESET_TYPE_ALL);
|
||||
if (rc) {
|
||||
netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
|
||||
goto fail3;
|
||||
@ -299,8 +301,8 @@ static int siena_probe_nic(struct efx_nic *efx)
|
||||
siena_init_wol(efx);
|
||||
|
||||
/* Allocate memory for INT_KER */
|
||||
rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
|
||||
GFP_KERNEL);
|
||||
rc = efx_siena_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t),
|
||||
GFP_KERNEL);
|
||||
if (rc)
|
||||
goto fail4;
|
||||
BUG_ON(efx->irq_status.dma_addr & 0x0f);
|
||||
@ -322,23 +324,23 @@ static int siena_probe_nic(struct efx_nic *efx)
|
||||
goto fail5;
|
||||
}
|
||||
|
||||
rc = efx_mcdi_mon_probe(efx);
|
||||
rc = efx_siena_mcdi_mon_probe(efx);
|
||||
if (rc)
|
||||
goto fail5;
|
||||
|
||||
#ifdef CONFIG_SFC_SRIOV
|
||||
efx_siena_sriov_probe(efx);
|
||||
#endif
|
||||
efx_ptp_defer_probe_with_channel(efx);
|
||||
efx_siena_ptp_defer_probe_with_channel(efx);
|
||||
|
||||
return 0;
|
||||
|
||||
fail5:
|
||||
efx_nic_free_buffer(efx, &efx->irq_status);
|
||||
efx_siena_free_buffer(efx, &efx->irq_status);
|
||||
fail4:
|
||||
fail3:
|
||||
efx_mcdi_detach(efx);
|
||||
efx_mcdi_fini(efx);
|
||||
efx_siena_mcdi_detach(efx);
|
||||
efx_siena_mcdi_fini(efx);
|
||||
fail1:
|
||||
kfree(efx->nic_data);
|
||||
return rc;
|
||||
@ -405,7 +407,7 @@ static int siena_init_nic(struct efx_nic *efx)
|
||||
int rc;
|
||||
|
||||
/* Recover from a failed assertion post-reset */
|
||||
rc = efx_mcdi_handle_assertion(efx);
|
||||
rc = efx_siena_mcdi_handle_assertion(efx);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@ -439,7 +441,7 @@ static int siena_init_nic(struct efx_nic *efx)
|
||||
efx->rss_context.context_id = 0; /* indicates RSS is active */
|
||||
|
||||
/* Enable event logging */
|
||||
rc = efx_mcdi_log_ctrl(efx, true, false, 0);
|
||||
rc = efx_siena_mcdi_log_ctrl(efx, true, false, 0);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@ -456,14 +458,14 @@ static int siena_init_nic(struct efx_nic *efx)
|
||||
|
||||
static void siena_remove_nic(struct efx_nic *efx)
|
||||
{
|
||||
efx_mcdi_mon_remove(efx);
|
||||
efx_siena_mcdi_mon_remove(efx);
|
||||
|
||||
efx_nic_free_buffer(efx, &efx->irq_status);
|
||||
efx_siena_free_buffer(efx, &efx->irq_status);
|
||||
|
||||
efx_mcdi_reset(efx, RESET_TYPE_ALL);
|
||||
efx_siena_mcdi_reset(efx, RESET_TYPE_ALL);
|
||||
|
||||
efx_mcdi_detach(efx);
|
||||
efx_mcdi_fini(efx);
|
||||
efx_siena_mcdi_detach(efx);
|
||||
efx_siena_mcdi_fini(efx);
|
||||
|
||||
/* Tear down the private nic state */
|
||||
kfree(efx->nic_data);
|
||||
@ -545,8 +547,8 @@ static const unsigned long siena_stat_mask[] = {
|
||||
|
||||
static size_t siena_describe_nic_stats(struct efx_nic *efx, u8 *names)
|
||||
{
|
||||
return efx_nic_describe_stats(siena_stat_desc, SIENA_STAT_COUNT,
|
||||
siena_stat_mask, names);
|
||||
return efx_siena_describe_stats(siena_stat_desc, SIENA_STAT_COUNT,
|
||||
siena_stat_mask, names);
|
||||
}
|
||||
|
||||
static int siena_try_update_nic_stats(struct efx_nic *efx)
|
||||
@ -562,16 +564,16 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
|
||||
if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
|
||||
return 0;
|
||||
rmb();
|
||||
efx_nic_update_stats(siena_stat_desc, SIENA_STAT_COUNT, siena_stat_mask,
|
||||
stats, efx->stats_buffer.addr, false);
|
||||
efx_siena_update_stats(siena_stat_desc, SIENA_STAT_COUNT, siena_stat_mask,
|
||||
stats, efx->stats_buffer.addr, false);
|
||||
rmb();
|
||||
generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
|
||||
if (generation_end != generation_start)
|
||||
return -EAGAIN;
|
||||
|
||||
/* Update derived statistics */
|
||||
efx_nic_fix_nodesc_drop_stat(efx,
|
||||
&stats[SIENA_STAT_rx_nodesc_drop_cnt]);
|
||||
efx_siena_fix_nodesc_drop_stat(efx,
|
||||
&stats[SIENA_STAT_rx_nodesc_drop_cnt]);
|
||||
efx_update_diff_stat(&stats[SIENA_STAT_tx_good_bytes],
|
||||
stats[SIENA_STAT_tx_bytes] -
|
||||
stats[SIENA_STAT_tx_bad_bytes]);
|
||||
@ -583,7 +585,7 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
|
||||
efx_update_diff_stat(&stats[SIENA_STAT_rx_good_bytes],
|
||||
stats[SIENA_STAT_rx_bytes] -
|
||||
stats[SIENA_STAT_rx_bad_bytes]);
|
||||
efx_update_sw_stats(efx, stats);
|
||||
efx_siena_update_sw_stats(efx, stats);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -648,14 +650,14 @@ static int siena_mac_reconfigure(struct efx_nic *efx, bool mtu_only __always_unu
|
||||
|
||||
WARN_ON(!mutex_is_locked(&efx->mac_lock));
|
||||
|
||||
rc = efx_mcdi_set_mac(efx);
|
||||
rc = efx_siena_mcdi_set_mac(efx);
|
||||
if (rc != 0)
|
||||
return rc;
|
||||
|
||||
memcpy(MCDI_PTR(inbuf, SET_MCAST_HASH_IN_HASH0),
|
||||
efx->multicast_hash.byte, sizeof(efx->multicast_hash));
|
||||
return efx_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
|
||||
inbuf, sizeof(inbuf), NULL, 0, NULL);
|
||||
return efx_siena_mcdi_rpc(efx, MC_CMD_SET_MCAST_HASH,
|
||||
inbuf, sizeof(inbuf), NULL, 0, NULL);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
@ -688,16 +690,17 @@ static int siena_set_wol(struct efx_nic *efx, u32 type)
|
||||
|
||||
if (type & WAKE_MAGIC) {
|
||||
if (nic_data->wol_filter_id != -1)
|
||||
efx_mcdi_wol_filter_remove(efx,
|
||||
nic_data->wol_filter_id);
|
||||
rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr,
|
||||
&nic_data->wol_filter_id);
|
||||
efx_siena_mcdi_wol_filter_remove(efx,
|
||||
nic_data->wol_filter_id);
|
||||
rc = efx_siena_mcdi_wol_filter_set_magic(efx,
|
||||
efx->net_dev->dev_addr,
|
||||
&nic_data->wol_filter_id);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
pci_wake_from_d3(efx->pci_dev, true);
|
||||
} else {
|
||||
rc = efx_mcdi_wol_filter_reset(efx);
|
||||
rc = efx_siena_mcdi_wol_filter_reset(efx);
|
||||
nic_data->wol_filter_id = -1;
|
||||
pci_wake_from_d3(efx->pci_dev, false);
|
||||
if (rc)
|
||||
@ -717,12 +720,12 @@ static void siena_init_wol(struct efx_nic *efx)
|
||||
struct siena_nic_data *nic_data = efx->nic_data;
|
||||
int rc;
|
||||
|
||||
rc = efx_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id);
|
||||
rc = efx_siena_mcdi_wol_filter_get_magic(efx, &nic_data->wol_filter_id);
|
||||
|
||||
if (rc != 0) {
|
||||
/* If it failed, attempt to get into a synchronised
|
||||
* state with MC by resetting any set WoL filters */
|
||||
efx_mcdi_wol_filter_reset(efx);
|
||||
efx_siena_mcdi_wol_filter_reset(efx);
|
||||
nic_data->wol_filter_id = -1;
|
||||
} else if (nic_data->wol_filter_id != -1) {
|
||||
pci_wake_from_d3(efx->pci_dev, true);
|
||||
@ -868,7 +871,8 @@ static int siena_mtd_probe_partition(struct efx_nic *efx,
|
||||
if (info->port != efx_port_num(efx))
|
||||
return -ENODEV;
|
||||
|
||||
rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
|
||||
rc = efx_siena_mcdi_nvram_info(efx, type, &size, &erase_size,
|
||||
&protected);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (protected)
|
||||
@ -895,7 +899,7 @@ static int siena_mtd_get_fw_subtypes(struct efx_nic *efx,
|
||||
size_t i;
|
||||
int rc;
|
||||
|
||||
rc = efx_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
|
||||
rc = efx_siena_mcdi_get_board_cfg(efx, NULL, fw_subtype_list, NULL);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@ -915,7 +919,7 @@ static int siena_mtd_probe(struct efx_nic *efx)
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
rc = efx_mcdi_nvram_types(efx, &nvram_types);
|
||||
rc = efx_siena_mcdi_nvram_types(efx, &nvram_types);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@ -943,7 +947,7 @@ static int siena_mtd_probe(struct efx_nic *efx)
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
|
||||
rc = efx_siena_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
|
||||
fail:
|
||||
if (rc)
|
||||
kfree(parts);
|
||||
@ -980,36 +984,36 @@ const struct efx_nic_type siena_a0_nic_type = {
|
||||
.remove = siena_remove_nic,
|
||||
.init = siena_init_nic,
|
||||
.dimension_resources = siena_dimension_resources,
|
||||
.fini = efx_port_dummy_op_void,
|
||||
.fini = efx_siena_port_dummy_op_void,
|
||||
#ifdef CONFIG_EEH
|
||||
.monitor = siena_monitor,
|
||||
#else
|
||||
.monitor = NULL,
|
||||
#endif
|
||||
.map_reset_reason = efx_mcdi_map_reset_reason,
|
||||
.map_reset_reason = efx_siena_mcdi_map_reset_reason,
|
||||
.map_reset_flags = siena_map_reset_flags,
|
||||
.reset = efx_mcdi_reset,
|
||||
.probe_port = efx_mcdi_port_probe,
|
||||
.remove_port = efx_mcdi_port_remove,
|
||||
.reset = efx_siena_mcdi_reset,
|
||||
.probe_port = efx_siena_mcdi_port_probe,
|
||||
.remove_port = efx_siena_mcdi_port_remove,
|
||||
.fini_dmaq = efx_farch_fini_dmaq,
|
||||
.prepare_flush = siena_prepare_flush,
|
||||
.prepare_flush = efx_siena_prepare_flush,
|
||||
.finish_flush = siena_finish_flush,
|
||||
.prepare_flr = efx_port_dummy_op_void,
|
||||
.prepare_flr = efx_siena_port_dummy_op_void,
|
||||
.finish_flr = efx_farch_finish_flr,
|
||||
.describe_stats = siena_describe_nic_stats,
|
||||
.update_stats = siena_update_nic_stats,
|
||||
.start_stats = efx_mcdi_mac_start_stats,
|
||||
.pull_stats = efx_mcdi_mac_pull_stats,
|
||||
.stop_stats = efx_mcdi_mac_stop_stats,
|
||||
.start_stats = efx_siena_mcdi_mac_start_stats,
|
||||
.pull_stats = efx_siena_mcdi_mac_pull_stats,
|
||||
.stop_stats = efx_siena_mcdi_mac_stop_stats,
|
||||
.push_irq_moderation = siena_push_irq_moderation,
|
||||
.reconfigure_mac = siena_mac_reconfigure,
|
||||
.check_mac_fault = efx_mcdi_mac_check_fault,
|
||||
.reconfigure_port = efx_mcdi_port_reconfigure,
|
||||
.check_mac_fault = efx_siena_mcdi_mac_check_fault,
|
||||
.reconfigure_port = efx_siena_mcdi_port_reconfigure,
|
||||
.get_wol = siena_get_wol,
|
||||
.set_wol = siena_set_wol,
|
||||
.resume_wol = siena_init_wol,
|
||||
.test_chip = siena_test_chip,
|
||||
.test_nvram = efx_mcdi_nvram_test_all,
|
||||
.test_nvram = efx_siena_mcdi_nvram_test_all,
|
||||
.mcdi_request = siena_mcdi_request,
|
||||
.mcdi_poll_response = siena_mcdi_poll_response,
|
||||
.mcdi_read_response = siena_mcdi_read_response,
|
||||
@ -1024,7 +1028,7 @@ const struct efx_nic_type siena_a0_nic_type = {
|
||||
.tx_remove = efx_farch_tx_remove,
|
||||
.tx_write = efx_farch_tx_write,
|
||||
.tx_limit_len = efx_farch_tx_limit_len,
|
||||
.tx_enqueue = __efx_enqueue_skb,
|
||||
.tx_enqueue = __efx_siena_enqueue_skb,
|
||||
.rx_push_rss_config = siena_rx_push_rss_config,
|
||||
.rx_pull_rss_config = siena_rx_pull_rss_config,
|
||||
.rx_probe = efx_farch_rx_probe,
|
||||
@ -1032,7 +1036,7 @@ const struct efx_nic_type siena_a0_nic_type = {
|
||||
.rx_remove = efx_farch_rx_remove,
|
||||
.rx_write = efx_farch_rx_write,
|
||||
.rx_defer_refill = efx_farch_rx_defer_refill,
|
||||
.rx_packet = __efx_rx_packet,
|
||||
.rx_packet = __efx_siena_rx_packet,
|
||||
.ev_probe = efx_farch_ev_probe,
|
||||
.ev_init = efx_farch_ev_init,
|
||||
.ev_fini = efx_farch_ev_fini,
|
||||
@ -1056,11 +1060,11 @@ const struct efx_nic_type siena_a0_nic_type = {
|
||||
#endif
|
||||
#ifdef CONFIG_SFC_MTD
|
||||
.mtd_probe = siena_mtd_probe,
|
||||
.mtd_rename = efx_mcdi_mtd_rename,
|
||||
.mtd_read = efx_mcdi_mtd_read,
|
||||
.mtd_erase = efx_mcdi_mtd_erase,
|
||||
.mtd_write = efx_mcdi_mtd_write,
|
||||
.mtd_sync = efx_mcdi_mtd_sync,
|
||||
.mtd_rename = efx_siena_mcdi_mtd_rename,
|
||||
.mtd_read = efx_siena_mcdi_mtd_read,
|
||||
.mtd_erase = efx_siena_mcdi_mtd_erase,
|
||||
.mtd_write = efx_siena_mcdi_mtd_write,
|
||||
.mtd_sync = efx_siena_mcdi_mtd_sync,
|
||||
#endif
|
||||
.ptp_write_host_time = siena_ptp_write_host_time,
|
||||
.ptp_set_ts_config = siena_ptp_set_ts_config,
|
||||
@ -1075,9 +1079,9 @@ const struct efx_nic_type siena_a0_nic_type = {
|
||||
.sriov_set_vf_vlan = efx_siena_sriov_set_vf_vlan,
|
||||
.sriov_set_vf_spoofchk = efx_siena_sriov_set_vf_spoofchk,
|
||||
.sriov_get_vf_config = efx_siena_sriov_get_vf_config,
|
||||
.vswitching_probe = efx_port_dummy_op_int,
|
||||
.vswitching_restore = efx_port_dummy_op_int,
|
||||
.vswitching_remove = efx_port_dummy_op_void,
|
||||
.vswitching_probe = efx_siena_port_dummy_op_int,
|
||||
.vswitching_restore = efx_siena_port_dummy_op_int,
|
||||
.vswitching_remove = efx_siena_port_dummy_op_void,
|
||||
.set_mac_address = efx_siena_sriov_mac_address_changed,
|
||||
#endif
|
||||
|
||||
@ -1104,6 +1108,6 @@ const struct efx_nic_type siena_a0_nic_type = {
|
||||
1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT),
|
||||
.rx_hash_key_size = 16,
|
||||
.check_caps = siena_check_caps,
|
||||
.sensor_event = efx_mcdi_sensor_event,
|
||||
.sensor_event = efx_siena_mcdi_sensor_event,
|
||||
.rx_recycle_ring_size = efx_siena_recycle_ring_size,
|
||||
};
|
@ -206,8 +206,9 @@ static int efx_siena_sriov_cmd(struct efx_nic *efx, bool enable,
|
||||
MCDI_SET_DWORD(inbuf, SRIOV_IN_VI_BASE, EFX_VI_BASE);
|
||||
MCDI_SET_DWORD(inbuf, SRIOV_IN_VF_COUNT, efx->vf_count);
|
||||
|
||||
rc = efx_mcdi_rpc_quiet(efx, MC_CMD_SRIOV, inbuf, MC_CMD_SRIOV_IN_LEN,
|
||||
outbuf, MC_CMD_SRIOV_OUT_LEN, &outlen);
|
||||
rc = efx_siena_mcdi_rpc_quiet(efx, MC_CMD_SRIOV, inbuf,
|
||||
MC_CMD_SRIOV_IN_LEN, outbuf,
|
||||
MC_CMD_SRIOV_OUT_LEN, &outlen);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (outlen < MC_CMD_SRIOV_OUT_LEN)
|
||||
@ -288,7 +289,7 @@ static int efx_siena_sriov_memcpy(struct efx_nic *efx,
|
||||
++req;
|
||||
}
|
||||
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL);
|
||||
rc = efx_siena_mcdi_rpc(efx, MC_CMD_MEMCPY, inbuf, used, NULL, 0, NULL);
|
||||
out:
|
||||
mb(); /* Don't write source/read dest before DMA is complete */
|
||||
|
||||
@ -689,7 +690,7 @@ static int efx_vfdi_fini_all_queues(struct siena_vf *vf)
|
||||
MC_CMD_FLUSH_RX_QUEUES_IN_QID_OFST_MAXNUM);
|
||||
|
||||
rtnl_lock();
|
||||
siena_prepare_flush(efx);
|
||||
efx_siena_prepare_flush(efx);
|
||||
rtnl_unlock();
|
||||
|
||||
/* Flush all the initialized queues */
|
||||
@ -712,7 +713,7 @@ static int efx_vfdi_fini_all_queues(struct siena_vf *vf)
|
||||
|
||||
atomic_set(&vf->rxq_retry_count, 0);
|
||||
while (timeout && (vf->rxq_count || vf->txq_count)) {
|
||||
rc = efx_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
|
||||
rc = efx_siena_mcdi_rpc(efx, MC_CMD_FLUSH_RX_QUEUES, inbuf,
|
||||
MC_CMD_FLUSH_RX_QUEUES_IN_LEN(rxqs_count),
|
||||
NULL, 0, NULL);
|
||||
WARN_ON(rc < 0);
|
||||
@ -1011,9 +1012,9 @@ static void efx_siena_sriov_reset_vf_work(struct work_struct *work)
|
||||
struct efx_nic *efx = vf->efx;
|
||||
struct efx_buffer buf;
|
||||
|
||||
if (!efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) {
|
||||
if (!efx_siena_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO)) {
|
||||
efx_siena_sriov_reset_vf(vf, &buf);
|
||||
efx_nic_free_buffer(efx, &buf);
|
||||
efx_siena_free_buffer(efx, &buf);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1043,7 +1044,7 @@ efx_siena_sriov_get_channel_name(struct efx_channel *channel,
|
||||
static const struct efx_channel_type efx_siena_sriov_channel_type = {
|
||||
.handle_no_channel = efx_siena_sriov_handle_no_channel,
|
||||
.pre_probe = efx_siena_sriov_probe_channel,
|
||||
.post_remove = efx_channel_dummy_op_void,
|
||||
.post_remove = efx_siena_channel_dummy_op_void,
|
||||
.get_name = efx_siena_sriov_get_channel_name,
|
||||
/* no copy operation; channel must not be reallocated */
|
||||
.keep_eventq = true,
|
||||
@ -1228,7 +1229,7 @@ static void efx_siena_sriov_vfs_fini(struct efx_nic *efx)
|
||||
for (pos = 0; pos < efx->vf_count; ++pos) {
|
||||
vf = nic_data->vf + pos;
|
||||
|
||||
efx_nic_free_buffer(efx, &vf->buf);
|
||||
efx_siena_free_buffer(efx, &vf->buf);
|
||||
kfree(vf->peer_page_addrs);
|
||||
vf->peer_page_addrs = NULL;
|
||||
vf->peer_page_count = 0;
|
||||
@ -1268,8 +1269,8 @@ static int efx_siena_sriov_vfs_init(struct efx_nic *efx)
|
||||
pci_domain_nr(pci_dev->bus), pci_dev->bus->number,
|
||||
PCI_SLOT(devfn), PCI_FUNC(devfn));
|
||||
|
||||
rc = efx_nic_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE,
|
||||
GFP_KERNEL);
|
||||
rc = efx_siena_alloc_buffer(efx, &vf->buf, EFX_PAGE_SIZE,
|
||||
GFP_KERNEL);
|
||||
if (rc)
|
||||
goto fail;
|
||||
|
||||
@ -1302,8 +1303,8 @@ int efx_siena_sriov_init(struct efx_nic *efx)
|
||||
if (rc)
|
||||
goto fail_cmd;
|
||||
|
||||
rc = efx_nic_alloc_buffer(efx, &nic_data->vfdi_status,
|
||||
sizeof(*vfdi_status), GFP_KERNEL);
|
||||
rc = efx_siena_alloc_buffer(efx, &nic_data->vfdi_status,
|
||||
sizeof(*vfdi_status), GFP_KERNEL);
|
||||
if (rc)
|
||||
goto fail_status;
|
||||
vfdi_status = nic_data->vfdi_status.addr;
|
||||
@ -1358,7 +1359,7 @@ fail_vfs:
|
||||
efx_siena_sriov_free_local(efx);
|
||||
kfree(nic_data->vf);
|
||||
fail_alloc:
|
||||
efx_nic_free_buffer(efx, &nic_data->vfdi_status);
|
||||
efx_siena_free_buffer(efx, &nic_data->vfdi_status);
|
||||
fail_status:
|
||||
efx_siena_sriov_cmd(efx, false, NULL, NULL);
|
||||
fail_cmd:
|
||||
@ -1395,7 +1396,7 @@ void efx_siena_sriov_fini(struct efx_nic *efx)
|
||||
efx_siena_sriov_vfs_fini(efx);
|
||||
efx_siena_sriov_free_local(efx);
|
||||
kfree(nic_data->vf);
|
||||
efx_nic_free_buffer(efx, &nic_data->vfdi_status);
|
||||
efx_siena_free_buffer(efx, &nic_data->vfdi_status);
|
||||
efx_siena_sriov_cmd(efx, false, NULL, NULL);
|
||||
}
|
||||
|
||||
@ -1563,7 +1564,7 @@ void efx_siena_sriov_reset(struct efx_nic *efx)
|
||||
efx_siena_sriov_usrev(efx, true);
|
||||
(void)efx_siena_sriov_cmd(efx, true, NULL, NULL);
|
||||
|
||||
if (efx_nic_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO))
|
||||
if (efx_siena_alloc_buffer(efx, &buf, EFX_PAGE_SIZE, GFP_NOIO))
|
||||
return;
|
||||
|
||||
for (vf_i = 0; vf_i < efx->vf_init_count; ++vf_i) {
|
||||
@ -1571,7 +1572,7 @@ void efx_siena_sriov_reset(struct efx_nic *efx)
|
||||
efx_siena_sriov_reset_vf(vf, &buf);
|
||||
}
|
||||
|
||||
efx_nic_free_buffer(efx, &buf);
|
||||
efx_siena_free_buffer(efx, &buf);
|
||||
}
|
||||
|
||||
int efx_init_sriov(void)
|
83
drivers/net/ethernet/sfc/siena/sriov.h
Normal file
83
drivers/net/ethernet/sfc/siena/sriov.h
Normal file
@ -0,0 +1,83 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2014-2015 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#ifndef EFX_SRIOV_H
|
||||
#define EFX_SRIOV_H
|
||||
|
||||
#include "net_driver.h"
|
||||
|
||||
#ifdef CONFIG_SFC_SRIOV
|
||||
|
||||
static inline
|
||||
int efx_sriov_set_vf_mac(struct net_device *net_dev, int vf_i, u8 *mac)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
if (efx->type->sriov_set_vf_mac)
|
||||
return efx->type->sriov_set_vf_mac(efx, vf_i, mac);
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline
|
||||
int efx_sriov_set_vf_vlan(struct net_device *net_dev, int vf_i, u16 vlan,
|
||||
u8 qos, __be16 vlan_proto)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
if (efx->type->sriov_set_vf_vlan) {
|
||||
if ((vlan & ~VLAN_VID_MASK) ||
|
||||
(qos & ~(VLAN_PRIO_MASK >> VLAN_PRIO_SHIFT)))
|
||||
return -EINVAL;
|
||||
|
||||
if (vlan_proto != htons(ETH_P_8021Q))
|
||||
return -EPROTONOSUPPORT;
|
||||
|
||||
return efx->type->sriov_set_vf_vlan(efx, vf_i, vlan, qos);
|
||||
} else {
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
}
|
||||
|
||||
static inline
|
||||
int efx_sriov_set_vf_spoofchk(struct net_device *net_dev, int vf_i,
|
||||
bool spoofchk)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
if (efx->type->sriov_set_vf_spoofchk)
|
||||
return efx->type->sriov_set_vf_spoofchk(efx, vf_i, spoofchk);
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline
|
||||
int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
|
||||
struct ifla_vf_info *ivi)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
if (efx->type->sriov_get_vf_config)
|
||||
return efx->type->sriov_get_vf_config(efx, vf_i, ivi);
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline
|
||||
int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
|
||||
int link_state)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
|
||||
if (efx->type->sriov_set_vf_link_state)
|
||||
return efx->type->sriov_set_vf_link_state(efx, vf_i,
|
||||
link_state);
|
||||
else
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
#endif /* CONFIG_SFC_SRIOV */
|
||||
|
||||
#endif /* EFX_SRIOV_H */
|
399
drivers/net/ethernet/sfc/siena/tx.c
Normal file
399
drivers/net/ethernet/sfc/siena/tx.c
Normal file
@ -0,0 +1,399 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2005-2013 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/tcp.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/ipv6.h>
|
||||
#include <linux/slab.h>
|
||||
#include <net/ipv6.h>
|
||||
#include <linux/if_ether.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/cache.h>
|
||||
#include "net_driver.h"
|
||||
#include "efx.h"
|
||||
#include "io.h"
|
||||
#include "nic.h"
|
||||
#include "tx.h"
|
||||
#include "tx_common.h"
|
||||
#include "workarounds.h"
|
||||
|
||||
static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,
|
||||
struct efx_tx_buffer *buffer)
|
||||
{
|
||||
unsigned int index = efx_tx_queue_get_insert_index(tx_queue);
|
||||
struct efx_buffer *page_buf =
|
||||
&tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)];
|
||||
unsigned int offset =
|
||||
((index << EFX_TX_CB_ORDER) + NET_IP_ALIGN) & (PAGE_SIZE - 1);
|
||||
|
||||
if (unlikely(!page_buf->addr) &&
|
||||
efx_siena_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
|
||||
GFP_ATOMIC))
|
||||
return NULL;
|
||||
buffer->dma_addr = page_buf->dma_addr + offset;
|
||||
buffer->unmap_len = 0;
|
||||
return (u8 *)page_buf->addr + offset;
|
||||
}
|
||||
|
||||
static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1)
|
||||
{
|
||||
/* We need to consider all queues that the net core sees as one */
|
||||
struct efx_nic *efx = txq1->efx;
|
||||
struct efx_tx_queue *txq2;
|
||||
unsigned int fill_level;
|
||||
|
||||
fill_level = efx_channel_tx_old_fill_level(txq1->channel);
|
||||
if (likely(fill_level < efx->txq_stop_thresh))
|
||||
return;
|
||||
|
||||
/* We used the stale old_read_count above, which gives us a
|
||||
* pessimistic estimate of the fill level (which may even
|
||||
* validly be >= efx->txq_entries). Now try again using
|
||||
* read_count (more likely to be a cache miss).
|
||||
*
|
||||
* If we read read_count and then conditionally stop the
|
||||
* queue, it is possible for the completion path to race with
|
||||
* us and complete all outstanding descriptors in the middle,
|
||||
* after which there will be no more completions to wake it.
|
||||
* Therefore we stop the queue first, then read read_count
|
||||
* (with a memory barrier to ensure the ordering), then
|
||||
* restart the queue if the fill level turns out to be low
|
||||
* enough.
|
||||
*/
|
||||
netif_tx_stop_queue(txq1->core_txq);
|
||||
smp_mb();
|
||||
efx_for_each_channel_tx_queue(txq2, txq1->channel)
|
||||
txq2->old_read_count = READ_ONCE(txq2->read_count);
|
||||
|
||||
fill_level = efx_channel_tx_old_fill_level(txq1->channel);
|
||||
EFX_WARN_ON_ONCE_PARANOID(fill_level >= efx->txq_entries);
|
||||
if (likely(fill_level < efx->txq_stop_thresh)) {
|
||||
smp_mb();
|
||||
if (likely(!efx->loopback_selftest))
|
||||
netif_tx_start_queue(txq1->core_txq);
|
||||
}
|
||||
}
|
||||
|
||||
static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
unsigned int copy_len = skb->len;
|
||||
struct efx_tx_buffer *buffer;
|
||||
u8 *copy_buffer;
|
||||
int rc;
|
||||
|
||||
EFX_WARN_ON_ONCE_PARANOID(copy_len > EFX_TX_CB_SIZE);
|
||||
|
||||
buffer = efx_tx_queue_get_insert_buffer(tx_queue);
|
||||
|
||||
copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer);
|
||||
if (unlikely(!copy_buffer))
|
||||
return -ENOMEM;
|
||||
|
||||
rc = skb_copy_bits(skb, 0, copy_buffer, copy_len);
|
||||
EFX_WARN_ON_PARANOID(rc);
|
||||
buffer->len = copy_len;
|
||||
|
||||
buffer->skb = skb;
|
||||
buffer->flags = EFX_TX_BUF_SKB;
|
||||
|
||||
++tx_queue->insert_count;
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Send any pending traffic for a channel. xmit_more is shared across all
|
||||
* queues for a channel, so we must check all of them.
|
||||
*/
|
||||
static void efx_tx_send_pending(struct efx_channel *channel)
|
||||
{
|
||||
struct efx_tx_queue *q;
|
||||
|
||||
efx_for_each_channel_tx_queue(q, channel) {
|
||||
if (q->xmit_pending)
|
||||
efx_nic_push_buffers(q);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a socket buffer to a TX queue
|
||||
*
|
||||
* This maps all fragments of a socket buffer for DMA and adds them to
|
||||
* the TX queue. The queue's insert pointer will be incremented by
|
||||
* the number of fragments in the socket buffer.
|
||||
*
|
||||
* If any DMA mapping fails, any mapped fragments will be unmapped,
|
||||
* the queue's insert pointer will be restored to its original value.
|
||||
*
|
||||
* This function is split out from efx_siena_hard_start_xmit to allow the
|
||||
* loopback test to direct packets via specific TX queues.
|
||||
*
|
||||
* Returns NETDEV_TX_OK.
|
||||
* You must hold netif_tx_lock() to call this function.
|
||||
*/
|
||||
netdev_tx_t __efx_siena_enqueue_skb(struct efx_tx_queue *tx_queue,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
unsigned int old_insert_count = tx_queue->insert_count;
|
||||
bool xmit_more = netdev_xmit_more();
|
||||
bool data_mapped = false;
|
||||
unsigned int segments;
|
||||
unsigned int skb_len;
|
||||
int rc;
|
||||
|
||||
skb_len = skb->len;
|
||||
segments = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 0;
|
||||
if (segments == 1)
|
||||
segments = 0; /* Don't use TSO for a single segment. */
|
||||
|
||||
/* Handle TSO first - it's *possible* (although unlikely) that we might
|
||||
* be passed a packet to segment that's smaller than the copybreak/PIO
|
||||
* size limit.
|
||||
*/
|
||||
if (segments) {
|
||||
rc = efx_siena_tx_tso_fallback(tx_queue, skb);
|
||||
tx_queue->tso_fallbacks++;
|
||||
if (rc == 0)
|
||||
return 0;
|
||||
goto err;
|
||||
} else if (skb->data_len && skb_len <= EFX_TX_CB_SIZE) {
|
||||
/* Pad short packets or coalesce short fragmented packets. */
|
||||
if (efx_enqueue_skb_copy(tx_queue, skb))
|
||||
goto err;
|
||||
tx_queue->cb_packets++;
|
||||
data_mapped = true;
|
||||
}
|
||||
|
||||
/* Map for DMA and create descriptors if we haven't done so already. */
|
||||
if (!data_mapped && (efx_siena_tx_map_data(tx_queue, skb, segments)))
|
||||
goto err;
|
||||
|
||||
efx_tx_maybe_stop_queue(tx_queue);
|
||||
|
||||
tx_queue->xmit_pending = true;
|
||||
|
||||
/* Pass off to hardware */
|
||||
if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more))
|
||||
efx_tx_send_pending(tx_queue->channel);
|
||||
|
||||
if (segments) {
|
||||
tx_queue->tso_bursts++;
|
||||
tx_queue->tso_packets += segments;
|
||||
tx_queue->tx_packets += segments;
|
||||
} else {
|
||||
tx_queue->tx_packets++;
|
||||
}
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
|
||||
|
||||
err:
|
||||
efx_siena_enqueue_unwind(tx_queue, old_insert_count);
|
||||
dev_kfree_skb_any(skb);
|
||||
|
||||
/* If we're not expecting another transmit and we had something to push
|
||||
* on this queue or a partner queue then we need to push here to get the
|
||||
* previous packets out.
|
||||
*/
|
||||
if (!xmit_more)
|
||||
efx_tx_send_pending(tx_queue->channel);
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
/* Transmit a packet from an XDP buffer
|
||||
*
|
||||
* Returns number of packets sent on success, error code otherwise.
|
||||
* Runs in NAPI context, either in our poll (for XDP TX) or a different NIC
|
||||
* (for XDP redirect).
|
||||
*/
|
||||
int efx_siena_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
|
||||
bool flush)
|
||||
{
|
||||
struct efx_tx_buffer *tx_buffer;
|
||||
struct efx_tx_queue *tx_queue;
|
||||
struct xdp_frame *xdpf;
|
||||
dma_addr_t dma_addr;
|
||||
unsigned int len;
|
||||
int space;
|
||||
int cpu;
|
||||
int i = 0;
|
||||
|
||||
if (unlikely(n && !xdpfs))
|
||||
return -EINVAL;
|
||||
if (unlikely(!n))
|
||||
return 0;
|
||||
|
||||
cpu = raw_smp_processor_id();
|
||||
if (unlikely(cpu >= efx->xdp_tx_queue_count))
|
||||
return -EINVAL;
|
||||
|
||||
tx_queue = efx->xdp_tx_queues[cpu];
|
||||
if (unlikely(!tx_queue))
|
||||
return -EINVAL;
|
||||
|
||||
if (!tx_queue->initialised)
|
||||
return -EINVAL;
|
||||
|
||||
if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
|
||||
HARD_TX_LOCK(efx->net_dev, tx_queue->core_txq, cpu);
|
||||
|
||||
/* If we're borrowing net stack queues we have to handle stop-restart
|
||||
* or we might block the queue and it will be considered as frozen
|
||||
*/
|
||||
if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
|
||||
if (netif_tx_queue_stopped(tx_queue->core_txq))
|
||||
goto unlock;
|
||||
efx_tx_maybe_stop_queue(tx_queue);
|
||||
}
|
||||
|
||||
/* Check for available space. We should never need multiple
|
||||
* descriptors per frame.
|
||||
*/
|
||||
space = efx->txq_entries +
|
||||
tx_queue->read_count - tx_queue->insert_count;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
xdpf = xdpfs[i];
|
||||
|
||||
if (i >= space)
|
||||
break;
|
||||
|
||||
/* We'll want a descriptor for this tx. */
|
||||
prefetchw(__efx_tx_queue_get_insert_buffer(tx_queue));
|
||||
|
||||
len = xdpf->len;
|
||||
|
||||
/* Map for DMA. */
|
||||
dma_addr = dma_map_single(&efx->pci_dev->dev,
|
||||
xdpf->data, len,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(&efx->pci_dev->dev, dma_addr))
|
||||
break;
|
||||
|
||||
/* Create descriptor and set up for unmapping DMA. */
|
||||
tx_buffer = efx_siena_tx_map_chunk(tx_queue, dma_addr, len);
|
||||
tx_buffer->xdpf = xdpf;
|
||||
tx_buffer->flags = EFX_TX_BUF_XDP |
|
||||
EFX_TX_BUF_MAP_SINGLE;
|
||||
tx_buffer->dma_offset = 0;
|
||||
tx_buffer->unmap_len = len;
|
||||
tx_queue->tx_packets++;
|
||||
}
|
||||
|
||||
/* Pass mapped frames to hardware. */
|
||||
if (flush && i > 0)
|
||||
efx_nic_push_buffers(tx_queue);
|
||||
|
||||
unlock:
|
||||
if (efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED)
|
||||
HARD_TX_UNLOCK(efx->net_dev, tx_queue->core_txq);
|
||||
|
||||
return i == 0 ? -EIO : i;
|
||||
}
|
||||
|
||||
/* Initiate a packet transmission. We use one channel per CPU
|
||||
* (sharing when we have more CPUs than channels).
|
||||
*
|
||||
* Context: non-blocking.
|
||||
* Should always return NETDEV_TX_OK and consume the skb.
|
||||
*/
|
||||
netdev_tx_t efx_siena_hard_start_xmit(struct sk_buff *skb,
|
||||
struct net_device *net_dev)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
struct efx_tx_queue *tx_queue;
|
||||
unsigned index, type;
|
||||
|
||||
EFX_WARN_ON_PARANOID(!netif_device_present(net_dev));
|
||||
|
||||
index = skb_get_queue_mapping(skb);
|
||||
type = efx_tx_csum_type_skb(skb);
|
||||
if (index >= efx->n_tx_channels) {
|
||||
index -= efx->n_tx_channels;
|
||||
type |= EFX_TXQ_TYPE_HIGHPRI;
|
||||
}
|
||||
|
||||
/* PTP "event" packet */
|
||||
if (unlikely(efx_xmit_with_hwtstamp(skb)) &&
|
||||
((efx_siena_ptp_use_mac_tx_timestamps(efx) && efx->ptp_data) ||
|
||||
unlikely(efx_siena_ptp_is_ptp_tx(efx, skb)))) {
|
||||
/* There may be existing transmits on the channel that are
|
||||
* waiting for this packet to trigger the doorbell write.
|
||||
* We need to send the packets at this point.
|
||||
*/
|
||||
efx_tx_send_pending(efx_get_tx_channel(efx, index));
|
||||
return efx_siena_ptp_tx(efx, skb);
|
||||
}
|
||||
|
||||
tx_queue = efx_get_tx_queue(efx, index, type);
|
||||
if (WARN_ON_ONCE(!tx_queue)) {
|
||||
/* We don't have a TXQ of the right type.
|
||||
* This should never happen, as we don't advertise offload
|
||||
* features unless we can support them.
|
||||
*/
|
||||
dev_kfree_skb_any(skb);
|
||||
/* If we're not expecting another transmit and we had something to push
|
||||
* on this queue or a partner queue then we need to push here to get the
|
||||
* previous packets out.
|
||||
*/
|
||||
if (!netdev_xmit_more())
|
||||
efx_tx_send_pending(tx_queue->channel);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
return __efx_siena_enqueue_skb(tx_queue, skb);
|
||||
}
|
||||
|
||||
void efx_siena_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
|
||||
/* Must be inverse of queue lookup in efx_siena_hard_start_xmit() */
|
||||
tx_queue->core_txq =
|
||||
netdev_get_tx_queue(efx->net_dev,
|
||||
tx_queue->channel->channel +
|
||||
((tx_queue->type & EFX_TXQ_TYPE_HIGHPRI) ?
|
||||
efx->n_tx_channels : 0));
|
||||
}
|
||||
|
||||
int efx_siena_setup_tc(struct net_device *net_dev, enum tc_setup_type type,
|
||||
void *type_data)
|
||||
{
|
||||
struct efx_nic *efx = netdev_priv(net_dev);
|
||||
struct tc_mqprio_qopt *mqprio = type_data;
|
||||
unsigned tc, num_tc;
|
||||
|
||||
if (type != TC_SETUP_QDISC_MQPRIO)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
/* Only Siena supported highpri queues */
|
||||
if (efx_nic_rev(efx) > EFX_REV_SIENA_A0)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
num_tc = mqprio->num_tc;
|
||||
|
||||
if (num_tc > EFX_MAX_TX_TC)
|
||||
return -EINVAL;
|
||||
|
||||
mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
|
||||
|
||||
if (num_tc == net_dev->num_tc)
|
||||
return 0;
|
||||
|
||||
for (tc = 0; tc < num_tc; tc++) {
|
||||
net_dev->tc_to_txq[tc].offset = tc * efx->n_tx_channels;
|
||||
net_dev->tc_to_txq[tc].count = efx->n_tx_channels;
|
||||
}
|
||||
|
||||
net_dev->num_tc = num_tc;
|
||||
|
||||
return netif_set_real_num_tx_queues(net_dev,
|
||||
max_t(int, num_tc, 1) *
|
||||
efx->n_tx_channels);
|
||||
}
|
40
drivers/net/ethernet/sfc/siena/tx.h
Normal file
40
drivers/net/ethernet/sfc/siena/tx.h
Normal file
@ -0,0 +1,40 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2005-2006 Fen Systems Ltd.
|
||||
* Copyright 2006-2015 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#ifndef EFX_TX_H
|
||||
#define EFX_TX_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
/* Driver internal tx-path related declarations. */
|
||||
/* What TXQ type will satisfy the checksum offloads required for this skb? */
|
||||
static inline unsigned int efx_tx_csum_type_skb(struct sk_buff *skb)
|
||||
{
|
||||
if (skb->ip_summed != CHECKSUM_PARTIAL)
|
||||
return 0; /* no checksum offload */
|
||||
|
||||
if (skb->encapsulation &&
|
||||
skb_checksum_start_offset(skb) == skb_inner_transport_offset(skb)) {
|
||||
/* we only advertise features for IPv4 and IPv6 checksums on
|
||||
* encapsulated packets, so if the checksum is for the inner
|
||||
* packet, it must be one of them; no further checking required.
|
||||
*/
|
||||
|
||||
/* Do we also need to offload the outer header checksum? */
|
||||
if (skb_shinfo(skb)->gso_segs > 1 &&
|
||||
!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) &&
|
||||
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM))
|
||||
return EFX_TXQ_TYPE_OUTER_CSUM | EFX_TXQ_TYPE_INNER_CSUM;
|
||||
return EFX_TXQ_TYPE_INNER_CSUM;
|
||||
}
|
||||
|
||||
/* similarly, we only advertise features for IPv4 and IPv6 checksums,
|
||||
* so it must be one of them. No need for further checks.
|
||||
*/
|
||||
return EFX_TXQ_TYPE_OUTER_CSUM;
|
||||
}
|
||||
#endif /* EFX_TX_H */
|
448
drivers/net/ethernet/sfc/siena/tx_common.c
Normal file
448
drivers/net/ethernet/sfc/siena/tx_common.c
Normal file
@ -0,0 +1,448 @@
|
||||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2018 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#include "net_driver.h"
|
||||
#include "efx.h"
|
||||
#include "nic_common.h"
|
||||
#include "tx_common.h"
|
||||
|
||||
static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
return DIV_ROUND_UP(tx_queue->ptr_mask + 1,
|
||||
PAGE_SIZE >> EFX_TX_CB_ORDER);
|
||||
}
|
||||
|
||||
int efx_siena_probe_tx_queue(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
unsigned int entries;
|
||||
int rc;
|
||||
|
||||
/* Create the smallest power-of-two aligned ring */
|
||||
entries = max(roundup_pow_of_two(efx->txq_entries), EFX_MIN_DMAQ_SIZE);
|
||||
EFX_WARN_ON_PARANOID(entries > EFX_MAX_DMAQ_SIZE);
|
||||
tx_queue->ptr_mask = entries - 1;
|
||||
|
||||
netif_dbg(efx, probe, efx->net_dev,
|
||||
"creating TX queue %d size %#x mask %#x\n",
|
||||
tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
|
||||
|
||||
/* Allocate software ring */
|
||||
tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
|
||||
GFP_KERNEL);
|
||||
if (!tx_queue->buffer)
|
||||
return -ENOMEM;
|
||||
|
||||
tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue),
|
||||
sizeof(tx_queue->cb_page[0]), GFP_KERNEL);
|
||||
if (!tx_queue->cb_page) {
|
||||
rc = -ENOMEM;
|
||||
goto fail1;
|
||||
}
|
||||
|
||||
/* Allocate hardware ring, determine TXQ type */
|
||||
rc = efx_nic_probe_tx(tx_queue);
|
||||
if (rc)
|
||||
goto fail2;
|
||||
|
||||
tx_queue->channel->tx_queue_by_type[tx_queue->type] = tx_queue;
|
||||
return 0;
|
||||
|
||||
fail2:
|
||||
kfree(tx_queue->cb_page);
|
||||
tx_queue->cb_page = NULL;
|
||||
fail1:
|
||||
kfree(tx_queue->buffer);
|
||||
tx_queue->buffer = NULL;
|
||||
return rc;
|
||||
}
|
||||
|
||||
void efx_siena_init_tx_queue(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
|
||||
netif_dbg(efx, drv, efx->net_dev,
|
||||
"initialising TX queue %d\n", tx_queue->queue);
|
||||
|
||||
tx_queue->insert_count = 0;
|
||||
tx_queue->notify_count = 0;
|
||||
tx_queue->write_count = 0;
|
||||
tx_queue->packet_write_count = 0;
|
||||
tx_queue->old_write_count = 0;
|
||||
tx_queue->read_count = 0;
|
||||
tx_queue->old_read_count = 0;
|
||||
tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
|
||||
tx_queue->xmit_pending = false;
|
||||
tx_queue->timestamping = (efx_siena_ptp_use_mac_tx_timestamps(efx) &&
|
||||
tx_queue->channel == efx_siena_ptp_channel(efx));
|
||||
tx_queue->completed_timestamp_major = 0;
|
||||
tx_queue->completed_timestamp_minor = 0;
|
||||
|
||||
tx_queue->xdp_tx = efx_channel_is_xdp_tx(tx_queue->channel);
|
||||
tx_queue->tso_version = 0;
|
||||
|
||||
/* Set up TX descriptor ring */
|
||||
efx_nic_init_tx(tx_queue);
|
||||
|
||||
tx_queue->initialised = true;
|
||||
}
|
||||
|
||||
void efx_siena_remove_tx_queue(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!tx_queue->buffer)
|
||||
return;
|
||||
|
||||
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
|
||||
"destroying TX queue %d\n", tx_queue->queue);
|
||||
efx_nic_remove_tx(tx_queue);
|
||||
|
||||
if (tx_queue->cb_page) {
|
||||
for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++)
|
||||
efx_siena_free_buffer(tx_queue->efx,
|
||||
&tx_queue->cb_page[i]);
|
||||
kfree(tx_queue->cb_page);
|
||||
tx_queue->cb_page = NULL;
|
||||
}
|
||||
|
||||
kfree(tx_queue->buffer);
|
||||
tx_queue->buffer = NULL;
|
||||
tx_queue->channel->tx_queue_by_type[tx_queue->type] = NULL;
|
||||
}
|
||||
|
||||
static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
|
||||
struct efx_tx_buffer *buffer,
|
||||
unsigned int *pkts_compl,
|
||||
unsigned int *bytes_compl)
|
||||
{
|
||||
if (buffer->unmap_len) {
|
||||
struct device *dma_dev = &tx_queue->efx->pci_dev->dev;
|
||||
dma_addr_t unmap_addr = buffer->dma_addr - buffer->dma_offset;
|
||||
|
||||
if (buffer->flags & EFX_TX_BUF_MAP_SINGLE)
|
||||
dma_unmap_single(dma_dev, unmap_addr, buffer->unmap_len,
|
||||
DMA_TO_DEVICE);
|
||||
else
|
||||
dma_unmap_page(dma_dev, unmap_addr, buffer->unmap_len,
|
||||
DMA_TO_DEVICE);
|
||||
buffer->unmap_len = 0;
|
||||
}
|
||||
|
||||
if (buffer->flags & EFX_TX_BUF_SKB) {
|
||||
struct sk_buff *skb = (struct sk_buff *)buffer->skb;
|
||||
|
||||
EFX_WARN_ON_PARANOID(!pkts_compl || !bytes_compl);
|
||||
(*pkts_compl)++;
|
||||
(*bytes_compl) += skb->len;
|
||||
if (tx_queue->timestamping &&
|
||||
(tx_queue->completed_timestamp_major ||
|
||||
tx_queue->completed_timestamp_minor)) {
|
||||
struct skb_shared_hwtstamps hwtstamp;
|
||||
|
||||
hwtstamp.hwtstamp =
|
||||
efx_siena_ptp_nic_to_kernel_time(tx_queue);
|
||||
skb_tstamp_tx(skb, &hwtstamp);
|
||||
|
||||
tx_queue->completed_timestamp_major = 0;
|
||||
tx_queue->completed_timestamp_minor = 0;
|
||||
}
|
||||
dev_consume_skb_any((struct sk_buff *)buffer->skb);
|
||||
netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
|
||||
"TX queue %d transmission id %x complete\n",
|
||||
tx_queue->queue, tx_queue->read_count);
|
||||
} else if (buffer->flags & EFX_TX_BUF_XDP) {
|
||||
xdp_return_frame_rx_napi(buffer->xdpf);
|
||||
}
|
||||
|
||||
buffer->len = 0;
|
||||
buffer->flags = 0;
|
||||
}
|
||||
|
||||
void efx_siena_fini_tx_queue(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
struct efx_tx_buffer *buffer;
|
||||
|
||||
netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev,
|
||||
"shutting down TX queue %d\n", tx_queue->queue);
|
||||
|
||||
if (!tx_queue->buffer)
|
||||
return;
|
||||
|
||||
/* Free any buffers left in the ring */
|
||||
while (tx_queue->read_count != tx_queue->write_count) {
|
||||
unsigned int pkts_compl = 0, bytes_compl = 0;
|
||||
|
||||
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
|
||||
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
|
||||
|
||||
++tx_queue->read_count;
|
||||
}
|
||||
tx_queue->xmit_pending = false;
|
||||
netdev_tx_reset_queue(tx_queue->core_txq);
|
||||
}
|
||||
|
||||
/* Remove packets from the TX queue
|
||||
*
|
||||
* This removes packets from the TX queue, up to and including the
|
||||
* specified index.
|
||||
*/
|
||||
static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
|
||||
unsigned int index,
|
||||
unsigned int *pkts_compl,
|
||||
unsigned int *bytes_compl)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
unsigned int stop_index, read_ptr;
|
||||
|
||||
stop_index = (index + 1) & tx_queue->ptr_mask;
|
||||
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
|
||||
|
||||
while (read_ptr != stop_index) {
|
||||
struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
|
||||
|
||||
if (!efx_tx_buffer_in_use(buffer)) {
|
||||
netif_err(efx, tx_err, efx->net_dev,
|
||||
"TX queue %d spurious TX completion id %d\n",
|
||||
tx_queue->queue, read_ptr);
|
||||
efx_siena_schedule_reset(efx, RESET_TYPE_TX_SKIP);
|
||||
return;
|
||||
}
|
||||
|
||||
efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
|
||||
|
||||
++tx_queue->read_count;
|
||||
read_ptr = tx_queue->read_count & tx_queue->ptr_mask;
|
||||
}
|
||||
}
|
||||
|
||||
void efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue)
|
||||
{
|
||||
if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
|
||||
tx_queue->old_write_count = READ_ONCE(tx_queue->write_count);
|
||||
if (tx_queue->read_count == tx_queue->old_write_count) {
|
||||
/* Ensure that read_count is flushed. */
|
||||
smp_mb();
|
||||
tx_queue->empty_read_count =
|
||||
tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
|
||||
{
|
||||
unsigned int fill_level, pkts_compl = 0, bytes_compl = 0;
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
|
||||
EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask);
|
||||
|
||||
efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
|
||||
tx_queue->pkts_compl += pkts_compl;
|
||||
tx_queue->bytes_compl += bytes_compl;
|
||||
|
||||
if (pkts_compl > 1)
|
||||
++tx_queue->merge_events;
|
||||
|
||||
/* See if we need to restart the netif queue. This memory
|
||||
* barrier ensures that we write read_count (inside
|
||||
* efx_dequeue_buffers()) before reading the queue status.
|
||||
*/
|
||||
smp_mb();
|
||||
if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
|
||||
likely(efx->port_enabled) &&
|
||||
likely(netif_device_present(efx->net_dev))) {
|
||||
fill_level = efx_channel_tx_fill_level(tx_queue->channel);
|
||||
if (fill_level <= efx->txq_wake_thresh)
|
||||
netif_tx_wake_queue(tx_queue->core_txq);
|
||||
}
|
||||
|
||||
efx_siena_xmit_done_check_empty(tx_queue);
|
||||
}
|
||||
|
||||
/* Remove buffers put into a tx_queue for the current packet.
|
||||
* None of the buffers must have an skb attached.
|
||||
*/
|
||||
void efx_siena_enqueue_unwind(struct efx_tx_queue *tx_queue,
|
||||
unsigned int insert_count)
|
||||
{
|
||||
struct efx_tx_buffer *buffer;
|
||||
unsigned int bytes_compl = 0;
|
||||
unsigned int pkts_compl = 0;
|
||||
|
||||
/* Work backwards until we hit the original insert pointer value */
|
||||
while (tx_queue->insert_count != insert_count) {
|
||||
--tx_queue->insert_count;
|
||||
buffer = __efx_tx_queue_get_insert_buffer(tx_queue);
|
||||
efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
|
||||
}
|
||||
}
|
||||
|
||||
struct efx_tx_buffer *efx_siena_tx_map_chunk(struct efx_tx_queue *tx_queue,
|
||||
dma_addr_t dma_addr, size_t len)
|
||||
{
|
||||
const struct efx_nic_type *nic_type = tx_queue->efx->type;
|
||||
struct efx_tx_buffer *buffer;
|
||||
unsigned int dma_len;
|
||||
|
||||
/* Map the fragment taking account of NIC-dependent DMA limits. */
|
||||
do {
|
||||
buffer = efx_tx_queue_get_insert_buffer(tx_queue);
|
||||
|
||||
if (nic_type->tx_limit_len)
|
||||
dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len);
|
||||
else
|
||||
dma_len = len;
|
||||
|
||||
buffer->len = dma_len;
|
||||
buffer->dma_addr = dma_addr;
|
||||
buffer->flags = EFX_TX_BUF_CONT;
|
||||
len -= dma_len;
|
||||
dma_addr += dma_len;
|
||||
++tx_queue->insert_count;
|
||||
} while (len);
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
static int efx_tx_tso_header_length(struct sk_buff *skb)
|
||||
{
|
||||
size_t header_len;
|
||||
|
||||
if (skb->encapsulation)
|
||||
header_len = skb_inner_transport_header(skb) -
|
||||
skb->data +
|
||||
(inner_tcp_hdr(skb)->doff << 2u);
|
||||
else
|
||||
header_len = skb_transport_header(skb) - skb->data +
|
||||
(tcp_hdr(skb)->doff << 2u);
|
||||
return header_len;
|
||||
}
|
||||
|
||||
/* Map all data from an SKB for DMA and create descriptors on the queue. */
|
||||
int efx_siena_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
|
||||
unsigned int segment_count)
|
||||
{
|
||||
struct efx_nic *efx = tx_queue->efx;
|
||||
struct device *dma_dev = &efx->pci_dev->dev;
|
||||
unsigned int frag_index, nr_frags;
|
||||
dma_addr_t dma_addr, unmap_addr;
|
||||
unsigned short dma_flags;
|
||||
size_t len, unmap_len;
|
||||
|
||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
frag_index = 0;
|
||||
|
||||
/* Map header data. */
|
||||
len = skb_headlen(skb);
|
||||
dma_addr = dma_map_single(dma_dev, skb->data, len, DMA_TO_DEVICE);
|
||||
dma_flags = EFX_TX_BUF_MAP_SINGLE;
|
||||
unmap_len = len;
|
||||
unmap_addr = dma_addr;
|
||||
|
||||
if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
|
||||
return -EIO;
|
||||
|
||||
if (segment_count) {
|
||||
/* For TSO we need to put the header in to a separate
|
||||
* descriptor. Map this separately if necessary.
|
||||
*/
|
||||
size_t header_len = efx_tx_tso_header_length(skb);
|
||||
|
||||
if (header_len != len) {
|
||||
tx_queue->tso_long_headers++;
|
||||
efx_siena_tx_map_chunk(tx_queue, dma_addr, header_len);
|
||||
len -= header_len;
|
||||
dma_addr += header_len;
|
||||
}
|
||||
}
|
||||
|
||||
/* Add descriptors for each fragment. */
|
||||
do {
|
||||
struct efx_tx_buffer *buffer;
|
||||
skb_frag_t *fragment;
|
||||
|
||||
buffer = efx_siena_tx_map_chunk(tx_queue, dma_addr, len);
|
||||
|
||||
/* The final descriptor for a fragment is responsible for
|
||||
* unmapping the whole fragment.
|
||||
*/
|
||||
buffer->flags = EFX_TX_BUF_CONT | dma_flags;
|
||||
buffer->unmap_len = unmap_len;
|
||||
buffer->dma_offset = buffer->dma_addr - unmap_addr;
|
||||
|
||||
if (frag_index >= nr_frags) {
|
||||
/* Store SKB details with the final buffer for
|
||||
* the completion.
|
||||
*/
|
||||
buffer->skb = skb;
|
||||
buffer->flags = EFX_TX_BUF_SKB | dma_flags;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Move on to the next fragment. */
|
||||
fragment = &skb_shinfo(skb)->frags[frag_index++];
|
||||
len = skb_frag_size(fragment);
|
||||
dma_addr = skb_frag_dma_map(dma_dev, fragment, 0, len,
|
||||
DMA_TO_DEVICE);
|
||||
dma_flags = 0;
|
||||
unmap_len = len;
|
||||
unmap_addr = dma_addr;
|
||||
|
||||
if (unlikely(dma_mapping_error(dma_dev, dma_addr)))
|
||||
return -EIO;
|
||||
} while (1);
|
||||
}
|
||||
|
||||
unsigned int efx_siena_tx_max_skb_descs(struct efx_nic *efx)
|
||||
{
|
||||
/* Header and payload descriptor for each output segment, plus
|
||||
* one for every input fragment boundary within a segment
|
||||
*/
|
||||
unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS;
|
||||
|
||||
/* Possibly one more per segment for option descriptors */
|
||||
if (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
|
||||
max_descs += EFX_TSO_MAX_SEGS;
|
||||
|
||||
/* Possibly more for PCIe page boundaries within input fragments */
|
||||
if (PAGE_SIZE > EFX_PAGE_SIZE)
|
||||
max_descs += max_t(unsigned int, MAX_SKB_FRAGS,
|
||||
DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE));
|
||||
|
||||
return max_descs;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fallback to software TSO.
|
||||
*
|
||||
* This is used if we are unable to send a GSO packet through hardware TSO.
|
||||
* This should only ever happen due to per-queue restrictions - unsupported
|
||||
* packets should first be filtered by the feature flags.
|
||||
*
|
||||
* Returns 0 on success, error code otherwise.
|
||||
*/
|
||||
int efx_siena_tx_tso_fallback(struct efx_tx_queue *tx_queue,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct sk_buff *segments, *next;
|
||||
|
||||
segments = skb_gso_segment(skb, 0);
|
||||
if (IS_ERR(segments))
|
||||
return PTR_ERR(segments);
|
||||
|
||||
dev_consume_skb_any(skb);
|
||||
|
||||
skb_list_walk_safe(segments, skb, next) {
|
||||
skb_mark_not_on_list(skb);
|
||||
efx_enqueue_skb(tx_queue, skb);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
39
drivers/net/ethernet/sfc/siena/tx_common.h
Normal file
39
drivers/net/ethernet/sfc/siena/tx_common.h
Normal file
@ -0,0 +1,39 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2018 Solarflare Communications Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 as published
|
||||
* by the Free Software Foundation, incorporated herein by reference.
|
||||
*/
|
||||
|
||||
#ifndef EFX_TX_COMMON_H
|
||||
#define EFX_TX_COMMON_H
|
||||
|
||||
int efx_siena_probe_tx_queue(struct efx_tx_queue *tx_queue);
|
||||
void efx_siena_init_tx_queue(struct efx_tx_queue *tx_queue);
|
||||
void efx_siena_fini_tx_queue(struct efx_tx_queue *tx_queue);
|
||||
void efx_siena_remove_tx_queue(struct efx_tx_queue *tx_queue);
|
||||
|
||||
static inline bool efx_tx_buffer_in_use(struct efx_tx_buffer *buffer)
|
||||
{
|
||||
return buffer->len || (buffer->flags & EFX_TX_BUF_OPTION);
|
||||
}
|
||||
|
||||
void efx_siena_xmit_done_check_empty(struct efx_tx_queue *tx_queue);
|
||||
void efx_siena_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
|
||||
|
||||
void efx_siena_enqueue_unwind(struct efx_tx_queue *tx_queue,
|
||||
unsigned int insert_count);
|
||||
|
||||
struct efx_tx_buffer *efx_siena_tx_map_chunk(struct efx_tx_queue *tx_queue,
|
||||
dma_addr_t dma_addr, size_t len);
|
||||
int efx_siena_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb,
|
||||
unsigned int segment_count);
|
||||
|
||||
unsigned int efx_siena_tx_max_skb_descs(struct efx_nic *efx);
|
||||
int efx_siena_tx_tso_fallback(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
|
||||
|
||||
extern bool efx_siena_separate_tx_channels;
|
||||
#endif
|
252
drivers/net/ethernet/sfc/siena/vfdi.h
Normal file
252
drivers/net/ethernet/sfc/siena/vfdi.h
Normal file
@ -0,0 +1,252 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2010-2012 Solarflare Communications Inc.
|
||||
*/
|
||||
#ifndef _VFDI_H
|
||||
#define _VFDI_H
|
||||
|
||||
/**
|
||||
* DOC: Virtual Function Driver Interface
|
||||
*
|
||||
* This file contains software structures used to form a two way
|
||||
* communication channel between the VF driver and the PF driver,
|
||||
* named Virtual Function Driver Interface (VFDI).
|
||||
*
|
||||
* For the purposes of VFDI, a page is a memory region with size and
|
||||
* alignment of 4K. All addresses are DMA addresses to be used within
|
||||
* the domain of the relevant VF.
|
||||
*
|
||||
* The only hardware-defined channels for a VF driver to communicate
|
||||
* with the PF driver are the event mailboxes (%FR_CZ_USR_EV
|
||||
* registers). Writing to these registers generates an event with
|
||||
* EV_CODE = EV_CODE_USR_EV, USER_QID set to the index of the mailbox
|
||||
* and USER_EV_REG_VALUE set to the value written. The PF driver may
|
||||
* direct or disable delivery of these events by setting
|
||||
* %FR_CZ_USR_EV_CFG.
|
||||
*
|
||||
* The PF driver can send arbitrary events to arbitrary event queues.
|
||||
* However, for consistency, VFDI events from the PF are defined to
|
||||
* follow the same form and be sent to the first event queue assigned
|
||||
* to the VF while that queue is enabled by the VF driver.
|
||||
*
|
||||
* The general form of the variable bits of VFDI events is:
|
||||
*
|
||||
* 0 16 24 31
|
||||
* | DATA | TYPE | SEQ |
|
||||
*
|
||||
* SEQ is a sequence number which should be incremented by 1 (modulo
|
||||
* 256) for each event. The sequence numbers used in each direction
|
||||
* are independent.
|
||||
*
|
||||
* The VF submits requests of type &struct vfdi_req by sending the
|
||||
* address of the request (ADDR) in a series of 4 events:
|
||||
*
|
||||
* 0 16 24 31
|
||||
* | ADDR[0:15] | VFDI_EV_TYPE_REQ_WORD0 | SEQ |
|
||||
* | ADDR[16:31] | VFDI_EV_TYPE_REQ_WORD1 | SEQ+1 |
|
||||
* | ADDR[32:47] | VFDI_EV_TYPE_REQ_WORD2 | SEQ+2 |
|
||||
* | ADDR[48:63] | VFDI_EV_TYPE_REQ_WORD3 | SEQ+3 |
|
||||
*
|
||||
* The address must be page-aligned. After receiving such a valid
|
||||
* series of events, the PF driver will attempt to read the request
|
||||
* and write a response to the same address. In case of an invalid
|
||||
* sequence of events or a DMA error, there will be no response.
|
||||
*
|
||||
* The VF driver may request that the PF driver writes status
|
||||
* information into its domain asynchronously. After writing the
|
||||
* status, the PF driver will send an event of the form:
|
||||
*
|
||||
* 0 16 24 31
|
||||
* | reserved | VFDI_EV_TYPE_STATUS | SEQ |
|
||||
*
|
||||
* In case the VF must be reset for any reason, the PF driver will
|
||||
* send an event of the form:
|
||||
*
|
||||
* 0 16 24 31
|
||||
* | reserved | VFDI_EV_TYPE_RESET | SEQ |
|
||||
*
|
||||
* It is then the responsibility of the VF driver to request
|
||||
* reinitialisation of its queues.
|
||||
*/
|
||||
#define VFDI_EV_SEQ_LBN 24
|
||||
#define VFDI_EV_SEQ_WIDTH 8
|
||||
#define VFDI_EV_TYPE_LBN 16
|
||||
#define VFDI_EV_TYPE_WIDTH 8
|
||||
#define VFDI_EV_TYPE_REQ_WORD0 0
|
||||
#define VFDI_EV_TYPE_REQ_WORD1 1
|
||||
#define VFDI_EV_TYPE_REQ_WORD2 2
|
||||
#define VFDI_EV_TYPE_REQ_WORD3 3
|
||||
#define VFDI_EV_TYPE_STATUS 4
|
||||
#define VFDI_EV_TYPE_RESET 5
|
||||
#define VFDI_EV_DATA_LBN 0
|
||||
#define VFDI_EV_DATA_WIDTH 16
|
||||
|
||||
struct vfdi_endpoint {
|
||||
u8 mac_addr[ETH_ALEN];
|
||||
__be16 tci;
|
||||
};
|
||||
|
||||
/**
|
||||
* enum vfdi_op - VFDI operation enumeration
|
||||
* @VFDI_OP_RESPONSE: Indicates a response to the request.
|
||||
* @VFDI_OP_INIT_EVQ: Initialize SRAM entries and initialize an EVQ.
|
||||
* @VFDI_OP_INIT_RXQ: Initialize SRAM entries and initialize an RXQ.
|
||||
* @VFDI_OP_INIT_TXQ: Initialize SRAM entries and initialize a TXQ.
|
||||
* @VFDI_OP_FINI_ALL_QUEUES: Flush all queues, finalize all queues, then
|
||||
* finalize the SRAM entries.
|
||||
* @VFDI_OP_INSERT_FILTER: Insert a MAC filter targeting the given RXQ.
|
||||
* @VFDI_OP_REMOVE_ALL_FILTERS: Remove all filters.
|
||||
* @VFDI_OP_SET_STATUS_PAGE: Set the DMA page(s) used for status updates
|
||||
* from PF and write the initial status.
|
||||
* @VFDI_OP_CLEAR_STATUS_PAGE: Clear the DMA page(s) used for status
|
||||
* updates from PF.
|
||||
*/
|
||||
enum vfdi_op {
|
||||
VFDI_OP_RESPONSE = 0,
|
||||
VFDI_OP_INIT_EVQ = 1,
|
||||
VFDI_OP_INIT_RXQ = 2,
|
||||
VFDI_OP_INIT_TXQ = 3,
|
||||
VFDI_OP_FINI_ALL_QUEUES = 4,
|
||||
VFDI_OP_INSERT_FILTER = 5,
|
||||
VFDI_OP_REMOVE_ALL_FILTERS = 6,
|
||||
VFDI_OP_SET_STATUS_PAGE = 7,
|
||||
VFDI_OP_CLEAR_STATUS_PAGE = 8,
|
||||
VFDI_OP_LIMIT,
|
||||
};
|
||||
|
||||
/* Response codes for VFDI operations. Other values may be used in future. */
|
||||
#define VFDI_RC_SUCCESS 0
|
||||
#define VFDI_RC_ENOMEM (-12)
|
||||
#define VFDI_RC_EINVAL (-22)
|
||||
#define VFDI_RC_EOPNOTSUPP (-95)
|
||||
#define VFDI_RC_ETIMEDOUT (-110)
|
||||
|
||||
/**
|
||||
* struct vfdi_req - Request from VF driver to PF driver
|
||||
* @op: Operation code or response indicator, taken from &enum vfdi_op.
|
||||
* @rc: Response code. Set to 0 on success or a negative error code on failure.
|
||||
* @u.init_evq.index: Index of event queue to create.
|
||||
* @u.init_evq.buf_count: Number of 4k buffers backing event queue.
|
||||
* @u.init_evq.addr: Array of length %u.init_evq.buf_count containing DMA
|
||||
* address of each page backing the event queue.
|
||||
* @u.init_rxq.index: Index of receive queue to create.
|
||||
* @u.init_rxq.buf_count: Number of 4k buffers backing receive queue.
|
||||
* @u.init_rxq.evq: Instance of event queue to target receive events at.
|
||||
* @u.init_rxq.label: Label used in receive events.
|
||||
* @u.init_rxq.flags: Unused.
|
||||
* @u.init_rxq.addr: Array of length %u.init_rxq.buf_count containing DMA
|
||||
* address of each page backing the receive queue.
|
||||
* @u.init_txq.index: Index of transmit queue to create.
|
||||
* @u.init_txq.buf_count: Number of 4k buffers backing transmit queue.
|
||||
* @u.init_txq.evq: Instance of event queue to target transmit completion
|
||||
* events at.
|
||||
* @u.init_txq.label: Label used in transmit completion events.
|
||||
* @u.init_txq.flags: Checksum offload flags.
|
||||
* @u.init_txq.addr: Array of length %u.init_txq.buf_count containing DMA
|
||||
* address of each page backing the transmit queue.
|
||||
* @u.mac_filter.rxq: Insert MAC filter at VF local address/VLAN targeting
|
||||
* all traffic at this receive queue.
|
||||
* @u.mac_filter.flags: MAC filter flags.
|
||||
* @u.set_status_page.dma_addr: Base address for the &struct vfdi_status.
|
||||
* This address must be page-aligned and the PF may write up to a
|
||||
* whole page (allowing for extension of the structure).
|
||||
* @u.set_status_page.peer_page_count: Number of additional pages the VF
|
||||
* has provided into which peer addresses may be DMAd.
|
||||
* @u.set_status_page.peer_page_addr: Array of DMA addresses of pages.
|
||||
* If the number of peers exceeds 256, then the VF must provide
|
||||
* additional pages in this array. The PF will then DMA up to
|
||||
* 512 vfdi_endpoint structures into each page. These addresses
|
||||
* must be page-aligned.
|
||||
*/
|
||||
struct vfdi_req {
|
||||
u32 op;
|
||||
u32 reserved1;
|
||||
s32 rc;
|
||||
u32 reserved2;
|
||||
union {
|
||||
struct {
|
||||
u32 index;
|
||||
u32 buf_count;
|
||||
u64 addr[];
|
||||
} init_evq;
|
||||
struct {
|
||||
u32 index;
|
||||
u32 buf_count;
|
||||
u32 evq;
|
||||
u32 label;
|
||||
u32 flags;
|
||||
#define VFDI_RXQ_FLAG_SCATTER_EN 1
|
||||
u32 reserved;
|
||||
u64 addr[];
|
||||
} init_rxq;
|
||||
struct {
|
||||
u32 index;
|
||||
u32 buf_count;
|
||||
u32 evq;
|
||||
u32 label;
|
||||
u32 flags;
|
||||
#define VFDI_TXQ_FLAG_IP_CSUM_DIS 1
|
||||
#define VFDI_TXQ_FLAG_TCPUDP_CSUM_DIS 2
|
||||
u32 reserved;
|
||||
u64 addr[];
|
||||
} init_txq;
|
||||
struct {
|
||||
u32 rxq;
|
||||
u32 flags;
|
||||
#define VFDI_MAC_FILTER_FLAG_RSS 1
|
||||
#define VFDI_MAC_FILTER_FLAG_SCATTER 2
|
||||
} mac_filter;
|
||||
struct {
|
||||
u64 dma_addr;
|
||||
u64 peer_page_count;
|
||||
u64 peer_page_addr[];
|
||||
} set_status_page;
|
||||
} u;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct vfdi_status - Status provided by PF driver to VF driver
|
||||
* @generation_start: A generation count DMA'd to VF *before* the
|
||||
* rest of the structure.
|
||||
* @generation_end: A generation count DMA'd to VF *after* the
|
||||
* rest of the structure.
|
||||
* @version: Version of this structure; currently set to 1. Later
|
||||
* versions must either be layout-compatible or only be sent to VFs
|
||||
* that specifically request them.
|
||||
* @length: Total length of this structure including embedded tables
|
||||
* @vi_scale: log2 the number of VIs available on this VF. This quantity
|
||||
* is used by the hardware for register decoding.
|
||||
* @max_tx_channels: The maximum number of transmit queues the VF can use.
|
||||
* @rss_rxq_count: The number of receive queues present in the shared RSS
|
||||
* indirection table.
|
||||
* @peer_count: Total number of peers in the complete peer list. If larger
|
||||
* than ARRAY_SIZE(%peers), then the VF must provide sufficient
|
||||
* additional pages each of which is filled with vfdi_endpoint structures.
|
||||
* @local: The MAC address and outer VLAN tag of *this* VF
|
||||
* @peers: Table of peer addresses. The @tci fields in these structures
|
||||
* are currently unused and must be ignored. Additional peers are
|
||||
* written into any additional pages provided by the VF.
|
||||
* @timer_quantum_ns: Timer quantum (nominal period between timer ticks)
|
||||
* for interrupt moderation timers, in nanoseconds. This member is only
|
||||
* present if @length is sufficiently large.
|
||||
*/
|
||||
struct vfdi_status {
|
||||
u32 generation_start;
|
||||
u32 generation_end;
|
||||
u32 version;
|
||||
u32 length;
|
||||
u8 vi_scale;
|
||||
u8 max_tx_channels;
|
||||
u8 rss_rxq_count;
|
||||
u8 reserved1;
|
||||
u16 peer_count;
|
||||
u16 reserved2;
|
||||
struct vfdi_endpoint local;
|
||||
struct vfdi_endpoint peers[256];
|
||||
|
||||
/* Members below here extend version 1 of this structure */
|
||||
u32 timer_quantum_ns;
|
||||
};
|
||||
|
||||
#endif
|
28
drivers/net/ethernet/sfc/siena/workarounds.h
Normal file
28
drivers/net/ethernet/sfc/siena/workarounds.h
Normal file
@ -0,0 +1,28 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/****************************************************************************
|
||||
* Driver for Solarflare network controllers and boards
|
||||
* Copyright 2006-2013 Solarflare Communications Inc.
|
||||
*/
|
||||
|
||||
#ifndef EFX_WORKAROUNDS_H
|
||||
#define EFX_WORKAROUNDS_H
|
||||
|
||||
/*
|
||||
* Hardware workarounds.
|
||||
* Bug numbers are from Solarflare's Bugzilla.
|
||||
*/
|
||||
|
||||
#define EFX_WORKAROUND_SIENA(efx) (efx_nic_rev(efx) == EFX_REV_SIENA_A0)
|
||||
#define EFX_WORKAROUND_EF10(efx) (efx_nic_rev(efx) >= EFX_REV_HUNT_A0)
|
||||
#define EFX_WORKAROUND_10G(efx) 1
|
||||
|
||||
/* Bit-bashed I2C reads cause performance drop */
|
||||
#define EFX_WORKAROUND_7884 EFX_WORKAROUND_10G
|
||||
/* Legacy interrupt storm when interrupt fifo fills */
|
||||
#define EFX_WORKAROUND_17213 EFX_WORKAROUND_SIENA
|
||||
|
||||
/* Moderation timer access must go through MCDI */
|
||||
#define EFX_EF10_WORKAROUND_61265(efx) \
|
||||
(((struct efx_ef10_nic_data *)efx->nic_data)->workaround_61265)
|
||||
|
||||
#endif /* EFX_WORKAROUNDS_H */
|
Loading…
Reference in New Issue
Block a user