diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 2bc11a61c4d0..e92d63d3e878 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -966,10 +966,6 @@
debugpat [X86] Enable PAT debugging
- decnet.addr= [HW,NET]
- Format: [,]
- See also Documentation/networking/decnet.rst.
-
default_hugepagesz=
[HW] The size of the default HugeTLB page. This is
the size represented by the legacy /proc/ hugepages
diff --git a/Documentation/admin-guide/sysctl/net.rst b/Documentation/admin-guide/sysctl/net.rst
index 60d44165fba7..6394f5dc2303 100644
--- a/Documentation/admin-guide/sysctl/net.rst
+++ b/Documentation/admin-guide/sysctl/net.rst
@@ -31,17 +31,18 @@ see only some of them, depending on your kernel's configuration.
Table : Subdirectories in /proc/sys/net
- ========= =================== = ========== ==================
+ ========= =================== = ========== ===================
Directory Content Directory Content
- ========= =================== = ========== ==================
- core General parameter appletalk Appletalk protocol
- unix Unix domain sockets netrom NET/ROM
- 802 E802 protocol ax25 AX25
- ethernet Ethernet protocol rose X.25 PLP layer
+ ========= =================== = ========== ===================
+ 802 E802 protocol mptcp Multipath TCP
+ appletalk Appletalk protocol netfilter Network Filter
+ ax25 AX25 netrom NET/ROM
+ bridge Bridging rose X.25 PLP layer
+ core General parameter tipc TIPC
+ ethernet Ethernet protocol unix Unix domain sockets
ipv4 IP version 4 x25 X.25 protocol
- bridge Bridging decnet DEC net
- ipv6 IP version 6 tipc TIPC
- ========= =================== = ========== ==================
+ ipv6 IP version 6
+ ========= =================== = ========== ===================
1. /proc/sys/net/core - Network core options
============================================
@@ -101,6 +102,9 @@ Values:
- 1 - enable JIT hardening for unprivileged users only
- 2 - enable JIT hardening for all users
+where "privileged user" in this context means a process having
+CAP_BPF or CAP_SYS_ADMIN in the root user name space.
+
bpf_jit_kallsyms
----------------
diff --git a/Documentation/bpf/clang-notes.rst b/Documentation/bpf/clang-notes.rst
new file mode 100644
index 000000000000..528feddf2db9
--- /dev/null
+++ b/Documentation/bpf/clang-notes.rst
@@ -0,0 +1,30 @@
+.. contents::
+.. sectnum::
+
+==========================
+Clang implementation notes
+==========================
+
+This document provides more details specific to the Clang/LLVM implementation of the eBPF instruction set.
+
+Versions
+========
+
+Clang defined "CPU" versions, where a CPU version of 3 corresponds to the current eBPF ISA.
+
+Clang can select the eBPF ISA version using ``-mcpu=v3`` for example to select version 3.
+
+Arithmetic instructions
+=======================
+
+For CPU versions prior to 3, Clang v7.0 and later can enable ``BPF_ALU`` support with
+``-Xclang -target-feature -Xclang +alu32``. In CPU version 3, support is automatically included.
+
+Atomic operations
+=================
+
+Clang can generate atomic instructions by default when ``-mcpu=v3`` is
+enabled. If a lower version for ``-mcpu`` is set, the only atomic instruction
+Clang can generate is ``BPF_ADD`` *without* ``BPF_FETCH``. If you need to enable
+the atomics features, while keeping a lower ``-mcpu`` version, you can use
+``-Xclang -target-feature -Xclang +alu32``.
diff --git a/Documentation/bpf/index.rst b/Documentation/bpf/index.rst
index 1bc2c5c58bdb..1b50de1983ee 100644
--- a/Documentation/bpf/index.rst
+++ b/Documentation/bpf/index.rst
@@ -26,6 +26,8 @@ that goes into great technical depth about the BPF Architecture.
classic_vs_extended.rst
bpf_licensing
test_debug
+ clang-notes
+ linux-notes
other
.. only:: subproject and html
diff --git a/Documentation/bpf/instruction-set.rst b/Documentation/bpf/instruction-set.rst
index 0ac7ae40be37..5d798437dad4 100644
--- a/Documentation/bpf/instruction-set.rst
+++ b/Documentation/bpf/instruction-set.rst
@@ -1,7 +1,12 @@
+.. contents::
+.. sectnum::
+
+========================================
+eBPF Instruction Set Specification, v1.0
+========================================
+
+This document specifies version 1.0 of the eBPF instruction set.
-====================
-eBPF Instruction Set
-====================
Registers and calling convention
================================
@@ -11,10 +16,10 @@ all of which are 64-bits wide.
The eBPF calling convention is defined as:
- * R0: return value from function calls, and exit value for eBPF programs
- * R1 - R5: arguments for function calls
- * R6 - R9: callee saved registers that function calls will preserve
- * R10: read-only frame pointer to access stack
+* R0: return value from function calls, and exit value for eBPF programs
+* R1 - R5: arguments for function calls
+* R6 - R9: callee saved registers that function calls will preserve
+* R10: read-only frame pointer to access stack
R0 - R5 are scratch registers and eBPF programs needs to spill/fill them if
necessary across calls.
@@ -24,17 +29,17 @@ Instruction encoding
eBPF has two instruction encodings:
- * the basic instruction encoding, which uses 64 bits to encode an instruction
- * the wide instruction encoding, which appends a second 64-bit immediate value
- (imm64) after the basic instruction for a total of 128 bits.
+* the basic instruction encoding, which uses 64 bits to encode an instruction
+* the wide instruction encoding, which appends a second 64-bit immediate value
+ (imm64) after the basic instruction for a total of 128 bits.
The basic instruction encoding looks as follows:
- ============= ======= =============== ==================== ============
- 32 bits (MSB) 16 bits 4 bits 4 bits 8 bits (LSB)
- ============= ======= =============== ==================== ============
- immediate offset source register destination register opcode
- ============= ======= =============== ==================== ============
+============= ======= =============== ==================== ============
+32 bits (MSB) 16 bits 4 bits 4 bits 8 bits (LSB)
+============= ======= =============== ==================== ============
+immediate offset source register destination register opcode
+============= ======= =============== ==================== ============
Note that most instructions do not use all of the fields.
Unused fields shall be cleared to zero.
@@ -44,30 +49,30 @@ Instruction classes
The three LSB bits of the 'opcode' field store the instruction class:
- ========= ===== ===============================
- class value description
- ========= ===== ===============================
- BPF_LD 0x00 non-standard load operations
- BPF_LDX 0x01 load into register operations
- BPF_ST 0x02 store from immediate operations
- BPF_STX 0x03 store from register operations
- BPF_ALU 0x04 32-bit arithmetic operations
- BPF_JMP 0x05 64-bit jump operations
- BPF_JMP32 0x06 32-bit jump operations
- BPF_ALU64 0x07 64-bit arithmetic operations
- ========= ===== ===============================
+========= ===== =============================== ===================================
+class value description reference
+========= ===== =============================== ===================================
+BPF_LD 0x00 non-standard load operations `Load and store instructions`_
+BPF_LDX 0x01 load into register operations `Load and store instructions`_
+BPF_ST 0x02 store from immediate operations `Load and store instructions`_
+BPF_STX 0x03 store from register operations `Load and store instructions`_
+BPF_ALU 0x04 32-bit arithmetic operations `Arithmetic and jump instructions`_
+BPF_JMP 0x05 64-bit jump operations `Arithmetic and jump instructions`_
+BPF_JMP32 0x06 32-bit jump operations `Arithmetic and jump instructions`_
+BPF_ALU64 0x07 64-bit arithmetic operations `Arithmetic and jump instructions`_
+========= ===== =============================== ===================================
Arithmetic and jump instructions
================================
-For arithmetic and jump instructions (BPF_ALU, BPF_ALU64, BPF_JMP and
-BPF_JMP32), the 8-bit 'opcode' field is divided into three parts:
+For arithmetic and jump instructions (``BPF_ALU``, ``BPF_ALU64``, ``BPF_JMP`` and
+``BPF_JMP32``), the 8-bit 'opcode' field is divided into three parts:
- ============== ====== =================
- 4 bits (MSB) 1 bit 3 bits (LSB)
- ============== ====== =================
- operation code source instruction class
- ============== ====== =================
+============== ====== =================
+4 bits (MSB) 1 bit 3 bits (LSB)
+============== ====== =================
+operation code source instruction class
+============== ====== =================
The 4th bit encodes the source operand:
@@ -84,51 +89,51 @@ The four MSB bits store the operation code.
Arithmetic instructions
-----------------------
-BPF_ALU uses 32-bit wide operands while BPF_ALU64 uses 64-bit wide operands for
+``BPF_ALU`` uses 32-bit wide operands while ``BPF_ALU64`` uses 64-bit wide operands for
otherwise identical operations.
-The code field encodes the operation as below:
+The 'code' field encodes the operation as below:
- ======== ===== =================================================
- code value description
- ======== ===== =================================================
- BPF_ADD 0x00 dst += src
- BPF_SUB 0x10 dst -= src
- BPF_MUL 0x20 dst \*= src
- BPF_DIV 0x30 dst /= src
- BPF_OR 0x40 dst \|= src
- BPF_AND 0x50 dst &= src
- BPF_LSH 0x60 dst <<= src
- BPF_RSH 0x70 dst >>= src
- BPF_NEG 0x80 dst = ~src
- BPF_MOD 0x90 dst %= src
- BPF_XOR 0xa0 dst ^= src
- BPF_MOV 0xb0 dst = src
- BPF_ARSH 0xc0 sign extending shift right
- BPF_END 0xd0 byte swap operations (see separate section below)
- ======== ===== =================================================
+======== ===== ==========================================================
+code value description
+======== ===== ==========================================================
+BPF_ADD 0x00 dst += src
+BPF_SUB 0x10 dst -= src
+BPF_MUL 0x20 dst \*= src
+BPF_DIV 0x30 dst /= src
+BPF_OR 0x40 dst \|= src
+BPF_AND 0x50 dst &= src
+BPF_LSH 0x60 dst <<= src
+BPF_RSH 0x70 dst >>= src
+BPF_NEG 0x80 dst = ~src
+BPF_MOD 0x90 dst %= src
+BPF_XOR 0xa0 dst ^= src
+BPF_MOV 0xb0 dst = src
+BPF_ARSH 0xc0 sign extending shift right
+BPF_END 0xd0 byte swap operations (see `Byte swap instructions`_ below)
+======== ===== ==========================================================
-BPF_ADD | BPF_X | BPF_ALU means::
+``BPF_ADD | BPF_X | BPF_ALU`` means::
dst_reg = (u32) dst_reg + (u32) src_reg;
-BPF_ADD | BPF_X | BPF_ALU64 means::
+``BPF_ADD | BPF_X | BPF_ALU64`` means::
dst_reg = dst_reg + src_reg
-BPF_XOR | BPF_K | BPF_ALU means::
+``BPF_XOR | BPF_K | BPF_ALU`` means::
src_reg = (u32) src_reg ^ (u32) imm32
-BPF_XOR | BPF_K | BPF_ALU64 means::
+``BPF_XOR | BPF_K | BPF_ALU64`` means::
src_reg = src_reg ^ imm32
Byte swap instructions
-----------------------
+~~~~~~~~~~~~~~~~~~~~~~
The byte swap instructions use an instruction class of ``BPF_ALU`` and a 4-bit
-code field of ``BPF_END``.
+'code' field of ``BPF_END``.
The byte swap instructions operate on the destination register
only and do not use a separate source register or immediate value.
@@ -136,14 +141,14 @@ only and do not use a separate source register or immediate value.
The 1-bit source operand field in the opcode is used to select what byte
order the operation convert from or to:
- ========= ===== =================================================
- source value description
- ========= ===== =================================================
- BPF_TO_LE 0x00 convert between host byte order and little endian
- BPF_TO_BE 0x08 convert between host byte order and big endian
- ========= ===== =================================================
+========= ===== =================================================
+source value description
+========= ===== =================================================
+BPF_TO_LE 0x00 convert between host byte order and little endian
+BPF_TO_BE 0x08 convert between host byte order and big endian
+========= ===== =================================================
-The imm field encodes the width of the swap operations. The following widths
+The 'imm' field encodes the width of the swap operations. The following widths
are supported: 16, 32 and 64.
Examples:
@@ -156,35 +161,31 @@ Examples:
dst_reg = htobe64(dst_reg)
-``BPF_FROM_LE`` and ``BPF_FROM_BE`` exist as aliases for ``BPF_TO_LE`` and
-``BPF_TO_BE`` respectively.
-
-
Jump instructions
-----------------
-BPF_JMP32 uses 32-bit wide operands while BPF_JMP uses 64-bit wide operands for
+``BPF_JMP32`` uses 32-bit wide operands while ``BPF_JMP`` uses 64-bit wide operands for
otherwise identical operations.
-The code field encodes the operation as below:
+The 'code' field encodes the operation as below:
- ======== ===== ========================= ============
- code value description notes
- ======== ===== ========================= ============
- BPF_JA 0x00 PC += off BPF_JMP only
- BPF_JEQ 0x10 PC += off if dst == src
- BPF_JGT 0x20 PC += off if dst > src unsigned
- BPF_JGE 0x30 PC += off if dst >= src unsigned
- BPF_JSET 0x40 PC += off if dst & src
- BPF_JNE 0x50 PC += off if dst != src
- BPF_JSGT 0x60 PC += off if dst > src signed
- BPF_JSGE 0x70 PC += off if dst >= src signed
- BPF_CALL 0x80 function call
- BPF_EXIT 0x90 function / program return BPF_JMP only
- BPF_JLT 0xa0 PC += off if dst < src unsigned
- BPF_JLE 0xb0 PC += off if dst <= src unsigned
- BPF_JSLT 0xc0 PC += off if dst < src signed
- BPF_JSLE 0xd0 PC += off if dst <= src signed
- ======== ===== ========================= ============
+======== ===== ========================= ============
+code value description notes
+======== ===== ========================= ============
+BPF_JA 0x00 PC += off BPF_JMP only
+BPF_JEQ 0x10 PC += off if dst == src
+BPF_JGT 0x20 PC += off if dst > src unsigned
+BPF_JGE 0x30 PC += off if dst >= src unsigned
+BPF_JSET 0x40 PC += off if dst & src
+BPF_JNE 0x50 PC += off if dst != src
+BPF_JSGT 0x60 PC += off if dst > src signed
+BPF_JSGE 0x70 PC += off if dst >= src signed
+BPF_CALL 0x80 function call
+BPF_EXIT 0x90 function / program return BPF_JMP only
+BPF_JLT 0xa0 PC += off if dst < src unsigned
+BPF_JLE 0xb0 PC += off if dst <= src unsigned
+BPF_JSLT 0xc0 PC += off if dst < src signed
+BPF_JSLE 0xd0 PC += off if dst <= src signed
+======== ===== ========================= ============
The eBPF program needs to store the return value into register R0 before doing a
BPF_EXIT.
@@ -193,14 +194,26 @@ BPF_EXIT.
Load and store instructions
===========================
-For load and store instructions (BPF_LD, BPF_LDX, BPF_ST and BPF_STX), the
+For load and store instructions (``BPF_LD``, ``BPF_LDX``, ``BPF_ST``, and ``BPF_STX``), the
8-bit 'opcode' field is divided as:
- ============ ====== =================
- 3 bits (MSB) 2 bits 3 bits (LSB)
- ============ ====== =================
- mode size instruction class
- ============ ====== =================
+============ ====== =================
+3 bits (MSB) 2 bits 3 bits (LSB)
+============ ====== =================
+mode size instruction class
+============ ====== =================
+
+The mode modifier is one of:
+
+ ============= ===== ==================================== =============
+ mode modifier value description reference
+ ============= ===== ==================================== =============
+ BPF_IMM 0x00 64-bit immediate instructions `64-bit immediate instructions`_
+ BPF_ABS 0x20 legacy BPF packet access (absolute) `Legacy BPF Packet access instructions`_
+ BPF_IND 0x40 legacy BPF packet access (indirect) `Legacy BPF Packet access instructions`_
+ BPF_MEM 0x60 regular load and store operations `Regular load and store operations`_
+ BPF_ATOMIC 0xc0 atomic operations `Atomic operations`_
+ ============= ===== ==================================== =============
The size modifier is one of:
@@ -213,19 +226,6 @@ The size modifier is one of:
BPF_DW 0x18 double word (8 bytes)
============= ===== =====================
-The mode modifier is one of:
-
- ============= ===== ====================================
- mode modifier value description
- ============= ===== ====================================
- BPF_IMM 0x00 64-bit immediate instructions
- BPF_ABS 0x20 legacy BPF packet access (absolute)
- BPF_IND 0x40 legacy BPF packet access (indirect)
- BPF_MEM 0x60 regular load and store operations
- BPF_ATOMIC 0xc0 atomic operations
- ============= ===== ====================================
-
-
Regular load and store operations
---------------------------------
@@ -256,44 +256,42 @@ by other eBPF programs or means outside of this specification.
All atomic operations supported by eBPF are encoded as store operations
that use the ``BPF_ATOMIC`` mode modifier as follows:
- * ``BPF_ATOMIC | BPF_W | BPF_STX`` for 32-bit operations
- * ``BPF_ATOMIC | BPF_DW | BPF_STX`` for 64-bit operations
- * 8-bit and 16-bit wide atomic operations are not supported.
+* ``BPF_ATOMIC | BPF_W | BPF_STX`` for 32-bit operations
+* ``BPF_ATOMIC | BPF_DW | BPF_STX`` for 64-bit operations
+* 8-bit and 16-bit wide atomic operations are not supported.
-The imm field is used to encode the actual atomic operation.
+The 'imm' field is used to encode the actual atomic operation.
Simple atomic operation use a subset of the values defined to encode
-arithmetic operations in the imm field to encode the atomic operation:
+arithmetic operations in the 'imm' field to encode the atomic operation:
- ======== ===== ===========
- imm value description
- ======== ===== ===========
- BPF_ADD 0x00 atomic add
- BPF_OR 0x40 atomic or
- BPF_AND 0x50 atomic and
- BPF_XOR 0xa0 atomic xor
- ======== ===== ===========
+======== ===== ===========
+imm value description
+======== ===== ===========
+BPF_ADD 0x00 atomic add
+BPF_OR 0x40 atomic or
+BPF_AND 0x50 atomic and
+BPF_XOR 0xa0 atomic xor
+======== ===== ===========
-``BPF_ATOMIC | BPF_W | BPF_STX`` with imm = BPF_ADD means::
+``BPF_ATOMIC | BPF_W | BPF_STX`` with 'imm' = BPF_ADD means::
*(u32 *)(dst_reg + off16) += src_reg
-``BPF_ATOMIC | BPF_DW | BPF_STX`` with imm = BPF ADD means::
+``BPF_ATOMIC | BPF_DW | BPF_STX`` with 'imm' = BPF ADD means::
*(u64 *)(dst_reg + off16) += src_reg
-``BPF_XADD`` is a deprecated name for ``BPF_ATOMIC | BPF_ADD``.
-
In addition to the simple atomic operations, there also is a modifier and
two complex atomic operations:
- =========== ================ ===========================
- imm value description
- =========== ================ ===========================
- BPF_FETCH 0x01 modifier: return old value
- BPF_XCHG 0xe0 | BPF_FETCH atomic exchange
- BPF_CMPXCHG 0xf0 | BPF_FETCH atomic compare and exchange
- =========== ================ ===========================
+=========== ================ ===========================
+imm value description
+=========== ================ ===========================
+BPF_FETCH 0x01 modifier: return old value
+BPF_XCHG 0xe0 | BPF_FETCH atomic exchange
+BPF_CMPXCHG 0xf0 | BPF_FETCH atomic compare and exchange
+=========== ================ ===========================
The ``BPF_FETCH`` modifier is optional for simple atomic operations, and
always set for the complex atomic operations. If the ``BPF_FETCH`` flag
@@ -309,16 +307,10 @@ The ``BPF_CMPXCHG`` operation atomically compares the value addressed by
value that was at ``dst_reg + off`` before the operation is zero-extended
and loaded back to ``R0``.
-Clang can generate atomic instructions by default when ``-mcpu=v3`` is
-enabled. If a lower version for ``-mcpu`` is set, the only atomic instruction
-Clang can generate is ``BPF_ADD`` *without* ``BPF_FETCH``. If you need to enable
-the atomics features, while keeping a lower ``-mcpu`` version, you can use
-``-Xclang -target-feature -Xclang +alu32``.
-
64-bit immediate instructions
-----------------------------
-Instructions with the ``BPF_IMM`` mode modifier use the wide instruction
+Instructions with the ``BPF_IMM`` 'mode' modifier use the wide instruction
encoding for an extra imm64 value.
There is currently only one such instruction.
@@ -331,36 +323,6 @@ There is currently only one such instruction.
Legacy BPF Packet access instructions
-------------------------------------
-eBPF has special instructions for access to packet data that have been
-carried over from classic BPF to retain the performance of legacy socket
-filters running in the eBPF interpreter.
-
-The instructions come in two forms: ``BPF_ABS | | BPF_LD`` and
-``BPF_IND | | BPF_LD``.
-
-These instructions are used to access packet data and can only be used when
-the program context is a pointer to networking packet. ``BPF_ABS``
-accesses packet data at an absolute offset specified by the immediate data
-and ``BPF_IND`` access packet data at an offset that includes the value of
-a register in addition to the immediate data.
-
-These instructions have seven implicit operands:
-
- * Register R6 is an implicit input that must contain pointer to a
- struct sk_buff.
- * Register R0 is an implicit output which contains the data fetched from
- the packet.
- * Registers R1-R5 are scratch registers that are clobbered after a call to
- ``BPF_ABS | BPF_LD`` or ``BPF_IND | BPF_LD`` instructions.
-
-These instructions have an implicit program exit condition as well. When an
-eBPF program is trying to access the data beyond the packet boundary, the
-program execution will be aborted.
-
-``BPF_ABS | BPF_W | BPF_LD`` means::
-
- R0 = ntohl(*(u32 *) (((struct sk_buff *) R6)->data + imm32))
-
-``BPF_IND | BPF_W | BPF_LD`` means::
-
- R0 = ntohl(*(u32 *) (((struct sk_buff *) R6)->data + src_reg + imm32))
+eBPF previously introduced special instructions for access to packet data that were
+carried over from classic BPF. However, these instructions are
+deprecated and should no longer be used.
diff --git a/Documentation/bpf/kfuncs.rst b/Documentation/bpf/kfuncs.rst
index c0b7dae6dbf5..0f858156371d 100644
--- a/Documentation/bpf/kfuncs.rst
+++ b/Documentation/bpf/kfuncs.rst
@@ -137,14 +137,37 @@ KF_ACQUIRE and KF_RET_NULL flags.
--------------------------
The KF_TRUSTED_ARGS flag is used for kfuncs taking pointer arguments. It
-indicates that the all pointer arguments will always be refcounted, and have
-their offset set to 0. It can be used to enforce that a pointer to a refcounted
-object acquired from a kfunc or BPF helper is passed as an argument to this
-kfunc without any modifications (e.g. pointer arithmetic) such that it is
-trusted and points to the original object. This flag is often used for kfuncs
-that operate (change some property, perform some operation) on an object that
-was obtained using an acquire kfunc. Such kfuncs need an unchanged pointer to
-ensure the integrity of the operation being performed on the expected object.
+indicates that the all pointer arguments will always have a guaranteed lifetime,
+and pointers to kernel objects are always passed to helpers in their unmodified
+form (as obtained from acquire kfuncs).
+
+It can be used to enforce that a pointer to a refcounted object acquired from a
+kfunc or BPF helper is passed as an argument to this kfunc without any
+modifications (e.g. pointer arithmetic) such that it is trusted and points to
+the original object.
+
+Meanwhile, it is also allowed pass pointers to normal memory to such kfuncs,
+but those can have a non-zero offset.
+
+This flag is often used for kfuncs that operate (change some property, perform
+some operation) on an object that was obtained using an acquire kfunc. Such
+kfuncs need an unchanged pointer to ensure the integrity of the operation being
+performed on the expected object.
+
+2.4.6 KF_SLEEPABLE flag
+-----------------------
+
+The KF_SLEEPABLE flag is used for kfuncs that may sleep. Such kfuncs can only
+be called by sleepable BPF programs (BPF_F_SLEEPABLE).
+
+2.4.7 KF_DESTRUCTIVE flag
+--------------------------
+
+The KF_DESTRUCTIVE flag is used to indicate functions calling which is
+destructive to the system. For example such a call can result in system
+rebooting or panicking. Due to this additional restrictions apply to these
+calls. At the moment they only require CAP_SYS_BOOT capability, but more can be
+added later.
2.5 Registering the kfuncs
--------------------------
diff --git a/Documentation/bpf/linux-notes.rst b/Documentation/bpf/linux-notes.rst
new file mode 100644
index 000000000000..956b0c86699d
--- /dev/null
+++ b/Documentation/bpf/linux-notes.rst
@@ -0,0 +1,53 @@
+.. contents::
+.. sectnum::
+
+==========================
+Linux implementation notes
+==========================
+
+This document provides more details specific to the Linux kernel implementation of the eBPF instruction set.
+
+Byte swap instructions
+======================
+
+``BPF_FROM_LE`` and ``BPF_FROM_BE`` exist as aliases for ``BPF_TO_LE`` and ``BPF_TO_BE`` respectively.
+
+Legacy BPF Packet access instructions
+=====================================
+
+As mentioned in the `ISA standard documentation `_,
+Linux has special eBPF instructions for access to packet data that have been
+carried over from classic BPF to retain the performance of legacy socket
+filters running in the eBPF interpreter.
+
+The instructions come in two forms: ``BPF_ABS | | BPF_LD`` and
+``BPF_IND | | BPF_LD``.
+
+These instructions are used to access packet data and can only be used when
+the program context is a pointer to a networking packet. ``BPF_ABS``
+accesses packet data at an absolute offset specified by the immediate data
+and ``BPF_IND`` access packet data at an offset that includes the value of
+a register in addition to the immediate data.
+
+These instructions have seven implicit operands:
+
+* Register R6 is an implicit input that must contain a pointer to a
+ struct sk_buff.
+* Register R0 is an implicit output which contains the data fetched from
+ the packet.
+* Registers R1-R5 are scratch registers that are clobbered by the
+ instruction.
+
+These instructions have an implicit program exit condition as well. If an
+eBPF program attempts access data beyond the packet boundary, the
+program execution will be aborted.
+
+``BPF_ABS | BPF_W | BPF_LD`` (0x20) means::
+
+ R0 = ntohl(*(u32 *) ((struct sk_buff *) R6->data + imm))
+
+where ``ntohl()`` converts a 32-bit value from network byte order to host byte order.
+
+``BPF_IND | BPF_W | BPF_LD`` (0x40) means::
+
+ R0 = ntohl(*(u32 *) ((struct sk_buff *) R6->data + src + imm))
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7622-wed.yaml b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7622-wed.yaml
index 787d6673f952..84fb0a146b6e 100644
--- a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7622-wed.yaml
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7622-wed.yaml
@@ -20,6 +20,7 @@ properties:
items:
- enum:
- mediatek,mt7622-wed
+ - mediatek,mt7986-wed
- const: syscon
reg:
diff --git a/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7986-wed-pcie.yaml b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7986-wed-pcie.yaml
new file mode 100644
index 000000000000..96221f51c1c3
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/mediatek/mediatek,mt7986-wed-pcie.yaml
@@ -0,0 +1,43 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/arm/mediatek/mediatek,mt7986-wed-pcie.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: MediaTek PCIE WED Controller for MT7986
+
+maintainers:
+ - Lorenzo Bianconi
+ - Felix Fietkau
+
+description:
+ The mediatek WED PCIE provides a configuration interface for PCIE
+ controller on MT7986 soc.
+
+properties:
+ compatible:
+ items:
+ - enum:
+ - mediatek,mt7986-wed-pcie
+ - const: syscon
+
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ wed_pcie: wed-pcie@10003000 {
+ compatible = "mediatek,mt7986-wed-pcie",
+ "syscon";
+ reg = <0 0x10003000 0 0x10>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/memory-controllers/mediatek,mt7621-memc.yaml b/Documentation/devicetree/bindings/memory-controllers/mediatek,mt7621-memc.yaml
index 85e02854f083..6ccdaf99c778 100644
--- a/Documentation/devicetree/bindings/memory-controllers/mediatek,mt7621-memc.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/mediatek,mt7621-memc.yaml
@@ -11,7 +11,9 @@ maintainers:
properties:
compatible:
- const: mediatek,mt7621-memc
+ items:
+ - const: mediatek,mt7621-memc
+ - const: syscon
reg:
maxItems: 1
@@ -25,6 +27,6 @@ additionalProperties: false
examples:
- |
memory-controller@5000 {
- compatible = "mediatek,mt7621-memc";
+ compatible = "mediatek,mt7621-memc", "syscon";
reg = <0x5000 0x1000>;
};
diff --git a/Documentation/devicetree/bindings/mfd/mscc,ocelot.yaml b/Documentation/devicetree/bindings/mfd/mscc,ocelot.yaml
new file mode 100644
index 000000000000..8bf45a5673a4
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/mscc,ocelot.yaml
@@ -0,0 +1,160 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/mfd/mscc,ocelot.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ocelot Externally-Controlled Ethernet Switch
+
+maintainers:
+ - Colin Foster
+
+description: |
+ The Ocelot ethernet switch family contains chips that have an internal CPU
+ (VSC7513, VSC7514) and chips that don't (VSC7511, VSC7512). All switches have
+ the option to be controlled externally, which is the purpose of this driver.
+
+ The switch family is a multi-port networking switch that supports many
+ interfaces. Additionally, the device can perform pin control, MDIO buses, and
+ external GPIO expanders.
+
+properties:
+ compatible:
+ enum:
+ - mscc,vsc7512
+
+ reg:
+ maxItems: 1
+
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 1
+
+ spi-max-frequency:
+ maxItems: 1
+
+patternProperties:
+ "^pinctrl@[0-9a-f]+$":
+ type: object
+ $ref: /schemas/pinctrl/mscc,ocelot-pinctrl.yaml
+
+ "^gpio@[0-9a-f]+$":
+ type: object
+ $ref: /schemas/pinctrl/microchip,sparx5-sgpio.yaml
+ properties:
+ compatible:
+ enum:
+ - mscc,ocelot-sgpio
+
+ "^mdio@[0-9a-f]+$":
+ type: object
+ $ref: /schemas/net/mscc,miim.yaml
+ properties:
+ compatible:
+ enum:
+ - mscc,ocelot-miim
+
+required:
+ - compatible
+ - reg
+ - '#address-cells'
+ - '#size-cells'
+ - spi-max-frequency
+
+additionalProperties: false
+
+examples:
+ - |
+ ocelot_clock: ocelot-clock {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <125000000>;
+ };
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ soc@0 {
+ compatible = "mscc,vsc7512";
+ spi-max-frequency = <2500000>;
+ reg = <0>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ mdio@7107009c {
+ compatible = "mscc,ocelot-miim";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x7107009c 0x24>;
+
+ sw_phy0: ethernet-phy@0 {
+ reg = <0x0>;
+ };
+ };
+
+ mdio@710700c0 {
+ compatible = "mscc,ocelot-miim";
+ pinctrl-names = "default";
+ pinctrl-0 = <&miim1_pins>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x710700c0 0x24>;
+
+ sw_phy4: ethernet-phy@4 {
+ reg = <0x4>;
+ };
+ };
+
+ gpio: pinctrl@71070034 {
+ compatible = "mscc,ocelot-pinctrl";
+ gpio-controller;
+ #gpio-cells = <2>;
+ gpio-ranges = <&gpio 0 0 22>;
+ reg = <0x71070034 0x6c>;
+
+ sgpio_pins: sgpio-pins {
+ pins = "GPIO_0", "GPIO_1", "GPIO_2", "GPIO_3";
+ function = "sg0";
+ };
+
+ miim1_pins: miim1-pins {
+ pins = "GPIO_14", "GPIO_15";
+ function = "miim";
+ };
+ };
+
+ gpio@710700f8 {
+ compatible = "mscc,ocelot-sgpio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ bus-frequency = <12500000>;
+ clocks = <&ocelot_clock>;
+ microchip,sgpio-port-ranges = <0 15>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&sgpio_pins>;
+ reg = <0x710700f8 0x100>;
+
+ sgpio_in0: gpio@0 {
+ compatible = "microchip,sparx5-sgpio-bank";
+ reg = <0>;
+ gpio-controller;
+ #gpio-cells = <3>;
+ ngpios = <64>;
+ };
+
+ sgpio_out1: gpio@1 {
+ compatible = "microchip,sparx5-sgpio-bank";
+ reg = <1>;
+ gpio-controller;
+ #gpio-cells = <3>;
+ ngpios = <64>;
+ };
+ };
+ };
+ };
+
+...
+
diff --git a/Documentation/devicetree/bindings/net/adi,adin1110.yaml b/Documentation/devicetree/bindings/net/adi,adin1110.yaml
new file mode 100644
index 000000000000..b6bd8ee38a18
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/adi,adin1110.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/adi,adin1110.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ADI ADIN1110 MAC-PHY
+
+maintainers:
+ - Alexandru Tachici
+
+description: |
+ The ADIN1110 is a low power single port 10BASE-T1L MAC-
+ PHY designed for industrial Ethernet applications. It integrates
+ an Ethernet PHY core with a MAC and all the associated analog
+ circuitry, input and output clock buffering.
+
+ The ADIN2111 is a low power, low complexity, two-Ethernet ports
+ switch with integrated 10BASE-T1L PHYs and one serial peripheral
+ interface (SPI) port. The device is designed for industrial Ethernet
+ applications using low power constrained nodes and is compliant
+ with the IEEE 802.3cg-2019 Ethernet standard for long reach
+ 10 Mbps single pair Ethernet (SPE).
+
+ The device has a 4-wire SPI interface for communication
+ between the MAC and host processor.
+
+allOf:
+ - $ref: ethernet-controller.yaml#
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+properties:
+ compatible:
+ enum:
+ - adi,adin1110
+ - adi,adin2111
+
+ reg:
+ maxItems: 1
+
+ adi,spi-crc:
+ description: |
+ Enable CRC8 checks on SPI read/writes.
+ type: boolean
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include
+
+ spi {
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethernet@0 {
+ compatible = "adi,adin2111";
+ reg = <0>;
+ spi-max-frequency = <24500000>;
+
+ adi,spi-crc;
+
+ interrupt-parent = <&gpio>;
+ interrupts = <25 IRQ_TYPE_LEVEL_LOW>;
+
+ local-mac-address = [ 00 11 22 33 44 55 ];
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/altera_tse.txt b/Documentation/devicetree/bindings/net/altera_tse.txt
deleted file mode 100644
index 1d9148ff5130..000000000000
--- a/Documentation/devicetree/bindings/net/altera_tse.txt
+++ /dev/null
@@ -1,113 +0,0 @@
-* Altera Triple-Speed Ethernet MAC driver (TSE)
-
-Required properties:
-- compatible: Should be "altr,tse-1.0" for legacy SGDMA based TSE, and should
- be "altr,tse-msgdma-1.0" for the preferred MSGDMA based TSE.
- ALTR is supported for legacy device trees, but is deprecated.
- altr should be used for all new designs.
-- reg: Address and length of the register set for the device. It contains
- the information of registers in the same order as described by reg-names
-- reg-names: Should contain the reg names
- "control_port": MAC configuration space region
- "tx_csr": xDMA Tx dispatcher control and status space region
- "tx_desc": MSGDMA Tx dispatcher descriptor space region
- "rx_csr" : xDMA Rx dispatcher control and status space region
- "rx_desc": MSGDMA Rx dispatcher descriptor space region
- "rx_resp": MSGDMA Rx dispatcher response space region
- "s1": SGDMA descriptor memory
-- interrupts: Should contain the TSE interrupts and its mode.
-- interrupt-names: Should contain the interrupt names
- "rx_irq": xDMA Rx dispatcher interrupt
- "tx_irq": xDMA Tx dispatcher interrupt
-- rx-fifo-depth: MAC receive FIFO buffer depth in bytes
-- tx-fifo-depth: MAC transmit FIFO buffer depth in bytes
-- phy-mode: See ethernet.txt in the same directory.
-- phy-handle: See ethernet.txt in the same directory.
-- phy-addr: See ethernet.txt in the same directory. A configuration should
- include phy-handle or phy-addr.
-- altr,has-supplementary-unicast:
- If present, TSE supports additional unicast addresses.
- Otherwise additional unicast addresses are not supported.
-- altr,has-hash-multicast-filter:
- If present, TSE supports a hash based multicast filter.
- Otherwise, hash-based multicast filtering is not supported.
-
-- mdio device tree subnode: When the TSE has a phy connected to its local
- mdio, there must be device tree subnode with the following
- required properties:
-
- - compatible: Must be "altr,tse-mdio".
- - #address-cells: Must be <1>.
- - #size-cells: Must be <0>.
-
- For each phy on the mdio bus, there must be a node with the following
- fields:
-
- - reg: phy id used to communicate to phy.
- - device_type: Must be "ethernet-phy".
-
-The MAC address will be determined using the optional properties defined in
-ethernet.txt.
-
-Example:
-
- tse_sub_0_eth_tse_0: ethernet@1,00000000 {
- compatible = "altr,tse-msgdma-1.0";
- reg = <0x00000001 0x00000000 0x00000400>,
- <0x00000001 0x00000460 0x00000020>,
- <0x00000001 0x00000480 0x00000020>,
- <0x00000001 0x000004A0 0x00000008>,
- <0x00000001 0x00000400 0x00000020>,
- <0x00000001 0x00000420 0x00000020>;
- reg-names = "control_port", "rx_csr", "rx_desc", "rx_resp", "tx_csr", "tx_desc";
- interrupt-parent = <&hps_0_arm_gic_0>;
- interrupts = <0 41 4>, <0 40 4>;
- interrupt-names = "rx_irq", "tx_irq";
- rx-fifo-depth = <2048>;
- tx-fifo-depth = <2048>;
- address-bits = <48>;
- max-frame-size = <1500>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- phy-mode = "gmii";
- altr,has-supplementary-unicast;
- altr,has-hash-multicast-filter;
- phy-handle = <&phy0>;
- mdio {
- compatible = "altr,tse-mdio";
- #address-cells = <1>;
- #size-cells = <0>;
- phy0: ethernet-phy@0 {
- reg = <0x0>;
- device_type = "ethernet-phy";
- };
-
- phy1: ethernet-phy@1 {
- reg = <0x1>;
- device_type = "ethernet-phy";
- };
-
- };
- };
-
- tse_sub_1_eth_tse_0: ethernet@1,00001000 {
- compatible = "altr,tse-msgdma-1.0";
- reg = <0x00000001 0x00001000 0x00000400>,
- <0x00000001 0x00001460 0x00000020>,
- <0x00000001 0x00001480 0x00000020>,
- <0x00000001 0x000014A0 0x00000008>,
- <0x00000001 0x00001400 0x00000020>,
- <0x00000001 0x00001420 0x00000020>;
- reg-names = "control_port", "rx_csr", "rx_desc", "rx_resp", "tx_csr", "tx_desc";
- interrupt-parent = <&hps_0_arm_gic_0>;
- interrupts = <0 43 4>, <0 42 4>;
- interrupt-names = "rx_irq", "tx_irq";
- rx-fifo-depth = <2048>;
- tx-fifo-depth = <2048>;
- address-bits = <48>;
- max-frame-size = <1500>;
- local-mac-address = [ 00 00 00 00 00 00 ];
- phy-mode = "gmii";
- altr,has-supplementary-unicast;
- altr,has-hash-multicast-filter;
- phy-handle = <&phy1>;
- };
diff --git a/Documentation/devicetree/bindings/net/altr,tse.yaml b/Documentation/devicetree/bindings/net/altr,tse.yaml
new file mode 100644
index 000000000000..8d1d94494349
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/altr,tse.yaml
@@ -0,0 +1,168 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/altr,tse.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Altera Triple Speed Ethernet MAC driver (TSE)
+
+maintainers:
+ - Maxime Chevallier
+
+properties:
+ compatible:
+ oneOf:
+ - const: altr,tse-1.0
+ - const: ALTR,tse-1.0
+ deprecated: true
+ - const: altr,tse-msgdma-1.0
+
+ interrupts:
+ minItems: 2
+
+ interrupt-names:
+ items:
+ - const: rx_irq
+ - const: tx_irq
+
+ rx-fifo-depth:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Depth in bytes of the RX FIFO
+
+ tx-fifo-depth:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Depth in bytes of the TX FIFO
+
+ altr,has-supplementary-unicast:
+ type: boolean
+ description:
+ If present, TSE supports additional unicast addresses.
+
+ altr,has-hash-multicast-filter:
+ type: boolean
+ description:
+ If present, TSE supports hash based multicast filter.
+
+ mdio:
+ $ref: mdio.yaml#
+ unevaluatedProperties: false
+ description:
+ Creates and registers an MDIO bus.
+
+ properties:
+ compatible:
+ const: altr,tse-mdio
+
+ required:
+ - compatible
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - rx-fifo-depth
+ - tx-fifo-depth
+
+allOf:
+ - $ref: "ethernet-controller.yaml#"
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - const: altr,tse-1.0
+ - const: ALTR,tse-1.0
+ then:
+ properties:
+ reg:
+ minItems: 4
+ reg-names:
+ items:
+ - const: control_port
+ - const: rx_csr
+ - const: tx_csr
+ - const: s1
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - altr,tse-msgdma-1.0
+ then:
+ properties:
+ reg:
+ minItems: 6
+ maxItems: 7
+ reg-names:
+ minItems: 6
+ items:
+ - const: control_port
+ - const: rx_csr
+ - const: rx_desc
+ - const: rx_resp
+ - const: tx_csr
+ - const: tx_desc
+ - const: pcs
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ tse_sub_0: ethernet@c0100000 {
+ compatible = "altr,tse-msgdma-1.0";
+ reg = <0xc0100000 0x00000400>,
+ <0xc0101000 0x00000020>,
+ <0xc0102000 0x00000020>,
+ <0xc0103000 0x00000008>,
+ <0xc0104000 0x00000020>,
+ <0xc0105000 0x00000020>,
+ <0xc0106000 0x00000100>;
+ reg-names = "control_port", "rx_csr", "rx_desc", "rx_resp", "tx_csr", "tx_desc", "pcs";
+ interrupt-parent = <&intc>;
+ interrupts = <0 44 4>,<0 45 4>;
+ interrupt-names = "rx_irq","tx_irq";
+ rx-fifo-depth = <2048>;
+ tx-fifo-depth = <2048>;
+ max-frame-size = <1500>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ altr,has-supplementary-unicast;
+ altr,has-hash-multicast-filter;
+ sfp = <&sfp0>;
+ phy-mode = "sgmii";
+ managed = "in-band-status";
+ };
+ - |
+ tse_sub_1_eth_tse_0: ethernet@1,00001000 {
+ compatible = "altr,tse-msgdma-1.0";
+ reg = <0x00001000 0x00000400>,
+ <0x00001460 0x00000020>,
+ <0x00001480 0x00000020>,
+ <0x000014A0 0x00000008>,
+ <0x00001400 0x00000020>,
+ <0x00001420 0x00000020>;
+ reg-names = "control_port", "rx_csr", "rx_desc", "rx_resp", "tx_csr", "tx_desc";
+ interrupt-parent = <&hps_0_arm_gic_0>;
+ interrupts = <0 43 4>, <0 42 4>;
+ interrupt-names = "rx_irq", "tx_irq";
+ rx-fifo-depth = <2048>;
+ tx-fifo-depth = <2048>;
+ max-frame-size = <1500>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ phy-mode = "gmii";
+ altr,has-supplementary-unicast;
+ altr,has-hash-multicast-filter;
+ phy-handle = <&phy1>;
+ mdio {
+ compatible = "altr,tse-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ phy1: ethernet-phy@1 {
+ reg = <0x1>;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/net/can/nxp,sja1000.yaml b/Documentation/devicetree/bindings/net/can/nxp,sja1000.yaml
index b1327c5b86cf..144a3785132c 100644
--- a/Documentation/devicetree/bindings/net/can/nxp,sja1000.yaml
+++ b/Documentation/devicetree/bindings/net/can/nxp,sja1000.yaml
@@ -30,8 +30,10 @@ properties:
clocks:
maxItems: 1
+ power-domains:
+ maxItems: 1
+
reg-io-width:
- $ref: /schemas/types.yaml#/definitions/uint32
description: I/O register width (in bytes) implemented by this device
default: 1
enum: [ 1, 2, 4 ]
@@ -105,6 +107,7 @@ allOf:
then:
required:
- clocks
+ - power-domains
unevaluatedProperties: false
@@ -129,4 +132,5 @@ examples:
reg-io-width = <4>;
interrupts = ;
clocks = <&sysctrl R9A06G032_HCLK_CAN0>;
+ power-domains = <&sysctrl>;
};
diff --git a/Documentation/devicetree/bindings/net/cortina,gemini-ethernet.yaml b/Documentation/devicetree/bindings/net/cortina,gemini-ethernet.yaml
index cc01b9b5752a..253b5d1407ee 100644
--- a/Documentation/devicetree/bindings/net/cortina,gemini-ethernet.yaml
+++ b/Documentation/devicetree/bindings/net/cortina,gemini-ethernet.yaml
@@ -37,6 +37,7 @@ properties:
patternProperties:
"^ethernet-port@[0-9]+$":
type: object
+ unevaluatedProperties: false
description: contains the resources for ethernet port
allOf:
- $ref: ethernet-controller.yaml#
diff --git a/Documentation/devicetree/bindings/net/dsa/ar9331.txt b/Documentation/devicetree/bindings/net/dsa/ar9331.txt
index 320607cbbb17..f824fdae0da2 100644
--- a/Documentation/devicetree/bindings/net/dsa/ar9331.txt
+++ b/Documentation/devicetree/bindings/net/dsa/ar9331.txt
@@ -76,7 +76,6 @@ eth1: ethernet@1a000000 {
switch_port0: port@0 {
reg = <0x0>;
- label = "cpu";
ethernet = <ð1>;
phy-mode = "gmii";
diff --git a/Documentation/devicetree/bindings/net/dsa/arrow,xrs700x.yaml b/Documentation/devicetree/bindings/net/dsa/arrow,xrs700x.yaml
index 3f01b65f3b22..259a0c6547f3 100644
--- a/Documentation/devicetree/bindings/net/dsa/arrow,xrs700x.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/arrow,xrs700x.yaml
@@ -61,8 +61,9 @@ examples:
};
ethernet-port@3 {
reg = <3>;
- label = "cpu";
ethernet = <&fec1>;
+ phy-mode = "rgmii-id";
+
fixed-link {
speed = <1000>;
full-duplex;
diff --git a/Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml b/Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml
index 23114d691d2a..1219b830b1a4 100644
--- a/Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/brcm,b53.yaml
@@ -169,7 +169,6 @@ examples:
port@8 {
reg = <8>;
- label = "cpu";
phy-mode = "rgmii-txid";
ethernet = <ð0>;
fixed-link {
@@ -252,8 +251,9 @@ examples:
port@8 {
ethernet = <&amac2>;
- label = "cpu";
reg = <8>;
+ phy-mode = "internal";
+
fixed-link {
speed = <1000>;
full-duplex;
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa-port.yaml b/Documentation/devicetree/bindings/net/dsa/dsa-port.yaml
index 09317e16cb5d..10ad7e71097b 100644
--- a/Documentation/devicetree/bindings/net/dsa/dsa-port.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/dsa-port.yaml
@@ -76,6 +76,23 @@ properties:
required:
- reg
+# CPU and DSA ports must have phylink-compatible link descriptions
+if:
+ oneOf:
+ - required: [ ethernet ]
+ - required: [ link ]
+then:
+ allOf:
+ - required:
+ - phy-mode
+ - oneOf:
+ - required:
+ - fixed-link
+ - required:
+ - phy-handle
+ - required:
+ - managed
+
additionalProperties: true
...
diff --git a/Documentation/devicetree/bindings/net/dsa/hirschmann,hellcreek.yaml b/Documentation/devicetree/bindings/net/dsa/hirschmann,hellcreek.yaml
index 228683773151..73b774eadd0b 100644
--- a/Documentation/devicetree/bindings/net/dsa/hirschmann,hellcreek.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/hirschmann,hellcreek.yaml
@@ -91,8 +91,13 @@ examples:
port@0 {
reg = <0>;
- label = "cpu";
ethernet = <&gmac0>;
+ phy-mode = "mii";
+
+ fixed-link {
+ speed = <100>;
+ full-duplex;
+ };
};
port@2 {
diff --git a/Documentation/devicetree/bindings/net/dsa/lan9303.txt b/Documentation/devicetree/bindings/net/dsa/lan9303.txt
index 464d6bf87605..46a732087f5c 100644
--- a/Documentation/devicetree/bindings/net/dsa/lan9303.txt
+++ b/Documentation/devicetree/bindings/net/dsa/lan9303.txt
@@ -46,7 +46,6 @@ I2C managed mode:
port@0 { /* RMII fixed link to master */
reg = <0>;
- label = "cpu";
ethernet = <&master>;
};
@@ -83,7 +82,6 @@ MDIO managed mode:
port@0 {
reg = <0>;
- label = "cpu";
ethernet = <&master>;
};
diff --git a/Documentation/devicetree/bindings/net/dsa/lantiq-gswip.txt b/Documentation/devicetree/bindings/net/dsa/lantiq-gswip.txt
index e3829d3e480e..8bb1eff21cb1 100644
--- a/Documentation/devicetree/bindings/net/dsa/lantiq-gswip.txt
+++ b/Documentation/devicetree/bindings/net/dsa/lantiq-gswip.txt
@@ -96,7 +96,6 @@ switch@e108000 {
port@6 {
reg = <0x6>;
- label = "cpu";
ethernet = <ð0>;
};
};
diff --git a/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml b/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml
index 17ab6c69ecc7..f2e9ff3f580b 100644
--- a/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml
@@ -4,67 +4,92 @@
$id: http://devicetree.org/schemas/net/dsa/mediatek,mt7530.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Mediatek MT7530 Ethernet switch
+title: Mediatek MT7530 and MT7531 Ethernet Switches
maintainers:
- - Sean Wang
+ - Arınç ÜNAL
- Landen Chao
- DENG Qingfang
+ - Sean Wang
description: |
- Port 5 of mt7530 and mt7621 switch is muxed between:
- 1. GMAC5: GMAC5 can interface with another external MAC or PHY.
- 2. PHY of port 0 or port 4: PHY interfaces with an external MAC like 2nd GMAC
- of the SOC. Used in many setups where port 0/4 becomes the WAN port.
- Note: On a MT7621 SOC with integrated switch: 2nd GMAC can only connected to
- GMAC5 when the gpios for RGMII2 (GPIO 22-33) are not used and not
- connected to external component!
+ There are two versions of MT7530, standalone and in a multi-chip module.
- Port 5 modes/configurations:
- 1. Port 5 is disabled and isolated: An external phy can interface to the 2nd
- GMAC of the SOC.
- In the case of a build-in MT7530 switch, port 5 shares the RGMII bus with 2nd
- GMAC and an optional external phy. Mind the GPIO/pinctl settings of the SOC!
- 2. Port 5 is muxed to PHY of port 0/4: Port 0/4 interfaces with 2nd GMAC.
- It is a simple MAC to PHY interface, port 5 needs to be setup for xMII mode
- and RGMII delay.
- 3. Port 5 is muxed to GMAC5 and can interface to an external phy.
- Port 5 becomes an extra switch port.
- Only works on platform where external phy TX<->RX lines are swapped.
- Like in the Ubiquiti ER-X-SFP.
- 4. Port 5 is muxed to GMAC5 and interfaces with the 2nd GAMC as 2nd CPU port.
- Currently a 2nd CPU port is not supported by DSA code.
+ MT7530 is a part of the multi-chip module in MT7620AN, MT7620DA, MT7620DAN,
+ MT7620NN, MT7621AT, MT7621DAT, MT7621ST and MT7623AI SoCs.
- Depending on how the external PHY is wired:
- 1. normal: The PHY can only connect to 2nd GMAC but not to the switch
- 2. swapped: RGMII TX, RX are swapped; external phy interface with the switch as
- a ethernet port. But can't interface to the 2nd GMAC.
+ MT7530 in MT7620AN, MT7620DA, MT7620DAN and MT7620NN SoCs has got 10/100 PHYs
+ and the switch registers are directly mapped into SoC's memory map rather than
+ using MDIO. The DSA driver currently doesn't support this.
- Based on the DT the port 5 mode is configured.
+ There is only the standalone version of MT7531.
- Driver tries to lookup the phy-handle of the 2nd GMAC of the master device.
- When phy-handle matches PHY of port 0 or 4 then port 5 set-up as mode 2.
- phy-mode must be set, see also example 2 below!
- * mt7621: phy-mode = "rgmii-txid";
- * mt7623: phy-mode = "rgmii";
+ Port 5 on MT7530 has got various ways of configuration.
- CPU-Ports need a phy-mode property:
- Allowed values on mt7530 and mt7621:
- - "rgmii"
- - "trgmii"
- On mt7531:
- - "1000base-x"
- - "2500base-x"
- - "rgmii"
- - "sgmii"
+ For standalone MT7530:
+ - Port 5 can be used as a CPU port.
+
+ - PHY 0 or 4 of the switch can be muxed to connect to the gmac of the SoC
+ which port 5 is wired to. Usually used for connecting the wan port
+ directly to the CPU to achieve 2 Gbps routing in total.
+
+ The driver looks up the reg on the ethernet-phy node which the phy-handle
+ property refers to on the gmac node to mux the specified phy.
+
+ The driver requires the gmac of the SoC to have "mediatek,eth-mac" as the
+ compatible string and the reg must be 1. So, for now, only gmac1 of an
+ MediaTek SoC can benefit this. Banana Pi BPI-R2 suits this.
+ Check out example 5 for a similar configuration.
+
+ - Port 5 can be wired to an external phy. Port 5 becomes a DSA slave.
+ Check out example 7 for a similar configuration.
+
+ For multi-chip module MT7530:
+
+ - Port 5 can be used as a CPU port.
+
+ - PHY 0 or 4 of the switch can be muxed to connect to gmac1 of the SoC.
+ Usually used for connecting the wan port directly to the CPU to achieve 2
+ Gbps routing in total.
+
+ The driver looks up the reg on the ethernet-phy node which the phy-handle
+ property refers to on the gmac node to mux the specified phy.
+
+ For the MT7621 SoCs, rgmii2 group must be claimed with rgmii2 function.
+ Check out example 5.
+
+ - In case of an external phy wired to gmac1 of the SoC, port 5 must not be
+ enabled.
+
+ In case of muxing PHY 0 or 4, the external phy must not be enabled.
+
+ For the MT7621 SoCs, rgmii2 group must be claimed with rgmii2 function.
+ Check out example 6.
+
+ - Port 5 can be muxed to an external phy. Port 5 becomes a DSA slave.
+ The external phy must be wired TX to TX to gmac1 of the SoC for this to
+ work. Ubiquiti EdgeRouter X SFP is wired this way.
+
+ Muxing PHY 0 or 4 won't work when the external phy is connected TX to TX.
+
+ For the MT7621 SoCs, rgmii2 group must be claimed with gpio function.
+ Check out example 7.
properties:
compatible:
- enum:
- - mediatek,mt7530
- - mediatek,mt7531
- - mediatek,mt7621
+ oneOf:
+ - description:
+ Standalone MT7530 and multi-chip module MT7530 in MT7623AI SoC
+ const: mediatek,mt7530
+
+ - description:
+ Standalone MT7531
+ const: mediatek,mt7531
+
+ - description:
+ Multi-chip module MT7530 in MT7621AT, MT7621DAT and MT7621ST SoCs
+ const: mediatek,mt7621
reg:
maxItems: 1
@@ -79,7 +104,14 @@ properties:
gpio-controller:
type: boolean
description:
- if defined, MT7530's LED controller will run on GPIO mode.
+ If defined, LED controller of the MT7530 switch will run on GPIO mode.
+
+ There are 15 controllable pins.
+ port 0 LED 0..2 as GPIO 0..2
+ port 1 LED 0..2 as GPIO 3..5
+ port 2 LED 0..2 as GPIO 6..8
+ port 3 LED 0..2 as GPIO 9..11
+ port 4 LED 0..2 as GPIO 12..14
"#interrupt-cells":
const: 1
@@ -92,17 +124,21 @@ properties:
io-supply:
description:
Phandle to the regulator node necessary for the I/O power.
- See Documentation/devicetree/bindings/regulator/mt6323-regulator.txt
- for details for the regulator setup on these boards.
+ See Documentation/devicetree/bindings/regulator/mt6323-regulator.txt for
+ details for the regulator setup on these boards.
mediatek,mcm:
type: boolean
description:
- if defined, indicates that either MT7530 is the part on multi-chip
- module belong to MT7623A has or the remotely standalone chip as the
- function MT7623N reference board provided for.
+ Used for MT7621AT, MT7621DAT, MT7621ST and MT7623AI SoCs which the MT7530
+ switch is a part of the multi-chip module.
reset-gpios:
+ description:
+ GPIO to reset the switch. Use this if mediatek,mcm is not used.
+ This property is optional because some boards share the reset line with
+ other components which makes it impossible to probe the switch if the
+ reset line is used.
maxItems: 1
reset-names:
@@ -110,8 +146,8 @@ properties:
resets:
description:
- Phandle pointing to the system reset controller with line index for
- the ethsys.
+ Phandle pointing to the system reset controller with line index for the
+ ethsys.
maxItems: 1
patternProperties:
@@ -128,31 +164,88 @@ patternProperties:
properties:
reg:
description:
- Port address described must be 5 or 6 for CPU port and from 0
- to 5 for user ports.
+ Port address described must be 5 or 6 for CPU port and from 0 to 5
+ for user ports.
allOf:
- $ref: dsa-port.yaml#
- if:
- properties:
- label:
- items:
- - const: cpu
+ required: [ ethernet ]
then:
- required:
- - reg
- - phy-mode
+ properties:
+ reg:
+ enum:
+ - 5
+ - 6
required:
- compatible
- reg
+$defs:
+ mt7530-dsa-port:
+ patternProperties:
+ "^(ethernet-)?ports$":
+ patternProperties:
+ "^(ethernet-)?port@[0-9]+$":
+ if:
+ required: [ ethernet ]
+ then:
+ if:
+ properties:
+ reg:
+ const: 5
+ then:
+ properties:
+ phy-mode:
+ enum:
+ - gmii
+ - mii
+ - rgmii
+ else:
+ properties:
+ phy-mode:
+ enum:
+ - rgmii
+ - trgmii
+
+ mt7531-dsa-port:
+ patternProperties:
+ "^(ethernet-)?ports$":
+ patternProperties:
+ "^(ethernet-)?port@[0-9]+$":
+ if:
+ required: [ ethernet ]
+ then:
+ if:
+ properties:
+ reg:
+ const: 5
+ then:
+ properties:
+ phy-mode:
+ enum:
+ - 1000base-x
+ - 2500base-x
+ - rgmii
+ - sgmii
+ else:
+ properties:
+ phy-mode:
+ enum:
+ - 1000base-x
+ - 2500base-x
+ - sgmii
+
allOf:
- - $ref: "dsa.yaml#"
+ - $ref: dsa.yaml#
- if:
required:
- mediatek,mcm
then:
+ properties:
+ reset-gpios: false
+
required:
- resets
- reset-names
@@ -163,50 +256,74 @@ allOf:
- if:
properties:
compatible:
- items:
- - const: mediatek,mt7530
+ const: mediatek,mt7530
then:
+ $ref: "#/$defs/mt7530-dsa-port"
required:
- core-supply
- io-supply
+ - if:
+ properties:
+ compatible:
+ const: mediatek,mt7531
+ then:
+ $ref: "#/$defs/mt7531-dsa-port"
+ properties:
+ gpio-controller: false
+ mediatek,mcm: false
+
+ - if:
+ properties:
+ compatible:
+ const: mediatek,mt7621
+ then:
+ $ref: "#/$defs/mt7530-dsa-port"
+ required:
+ - mediatek,mcm
+
unevaluatedProperties: false
examples:
+ # Example 1: Standalone MT7530
- |
#include
+
mdio {
#address-cells = <1>;
#size-cells = <0>;
- switch@0 {
+
+ switch@1f {
compatible = "mediatek,mt7530";
- reg = <0>;
+ reg = <0x1f>;
+
+ reset-gpios = <&pio 33 0>;
core-supply = <&mt6323_vpa_reg>;
io-supply = <&mt6323_vemc3v3_reg>;
- reset-gpios = <&pio 33 GPIO_ACTIVE_HIGH>;
ethernet-ports {
#address-cells = <1>;
#size-cells = <0>;
+
port@0 {
reg = <0>;
- label = "lan0";
+ label = "lan1";
};
port@1 {
reg = <1>;
- label = "lan1";
+ label = "lan2";
};
port@2 {
reg = <2>;
- label = "lan2";
+ label = "lan3";
};
port@3 {
reg = <3>;
- label = "lan3";
+ label = "lan4";
};
port@4 {
@@ -216,184 +333,464 @@ examples:
port@6 {
reg = <6>;
- label = "cpu";
ethernet = <&gmac0>;
- phy-mode = "trgmii";
+ phy-mode = "rgmii";
+
fixed-link {
speed = <1000>;
full-duplex;
+ pause;
};
};
};
};
};
+ # Example 2: MT7530 in MT7623AI SoC
- |
- //Example 2: MT7621: Port 4 is WAN port: 2nd GMAC -> Port 5 -> PHY port 4.
+ #include
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch@1f {
+ compatible = "mediatek,mt7530";
+ reg = <0x1f>;
+
+ mediatek,mcm;
+ resets = <ðsys MT2701_ETHSYS_MCM_RST>;
+ reset-names = "mcm";
+
+ core-supply = <&mt6323_vpa_reg>;
+ io-supply = <&mt6323_vemc3v3_reg>;
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan1";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan2";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan3";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan4";
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "wan";
+ };
+
+ port@6 {
+ reg = <6>;
+ ethernet = <&gmac0>;
+ phy-mode = "trgmii";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
+ };
+ };
+
+ # Example 3: Standalone MT7531
+ - |
+ #include
+ #include
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch@0 {
+ compatible = "mediatek,mt7531";
+ reg = <0>;
+
+ reset-gpios = <&pio 54 0>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&pio>;
+ interrupts = <53 IRQ_TYPE_LEVEL_HIGH>;
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan1";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan2";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan3";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan4";
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "wan";
+ };
+
+ port@6 {
+ reg = <6>;
+ ethernet = <&gmac0>;
+ phy-mode = "2500base-x";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
+ };
+ };
+
+ # Example 4: MT7530 in MT7621AT, MT7621DAT and MT7621ST SoCs
+ - |
+ #include
+ #include
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ switch@1f {
+ compatible = "mediatek,mt7621";
+ reg = <0x1f>;
+
+ mediatek,mcm;
+ resets = <&sysc MT7621_RST_MCM>;
+ reset-names = "mcm";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&gic>;
+ interrupts = ;
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan1";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan2";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan3";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan4";
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "wan";
+ };
+
+ port@6 {
+ reg = <6>;
+ ethernet = <&gmac0>;
+ phy-mode = "trgmii";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
+ };
+ };
+
+ # Example 5: MT7621: mux MT7530's phy4 to SoC's gmac1
+ - |
+ #include
+ #include
ethernet {
#address-cells = <1>;
#size-cells = <0>;
- gmac0: mac@0 {
- compatible = "mediatek,eth-mac";
- reg = <0>;
- phy-mode = "rgmii";
- fixed-link {
- speed = <1000>;
- full-duplex;
- pause;
- };
- };
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii2_pins>;
- gmac1: mac@1 {
+ mac@1 {
compatible = "mediatek,eth-mac";
reg = <1>;
- phy-mode = "rgmii-txid";
- phy-handle = <&phy4>;
- };
- mdio: mdio-bus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- /* Internal phy */
- phy4: ethernet-phy@4 {
- reg = <4>;
- };
-
- mt7530: switch@1f {
- compatible = "mediatek,mt7621";
- reg = <0x1f>;
- mediatek,mcm;
-
- resets = <&rstctrl 2>;
- reset-names = "mcm";
-
- ethernet-ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- port@0 {
- reg = <0>;
- label = "lan0";
- };
-
- port@1 {
- reg = <1>;
- label = "lan1";
- };
-
- port@2 {
- reg = <2>;
- label = "lan2";
- };
-
- port@3 {
- reg = <3>;
- label = "lan3";
- };
-
- /* Commented out. Port 4 is handled by 2nd GMAC.
- port@4 {
- reg = <4>;
- label = "lan4";
- };
- */
-
- port@6 {
- reg = <6>;
- label = "cpu";
- ethernet = <&gmac0>;
- phy-mode = "rgmii";
-
- fixed-link {
- speed = <1000>;
- full-duplex;
- pause;
- };
- };
- };
- };
- };
- };
-
- - |
- //Example 3: MT7621: Port 5 is connected to external PHY: Port 5 -> external PHY.
-
- ethernet {
- #address-cells = <1>;
- #size-cells = <0>;
- gmac_0: mac@0 {
- compatible = "mediatek,eth-mac";
- reg = <0>;
phy-mode = "rgmii";
-
- fixed-link {
- speed = <1000>;
- full-duplex;
- pause;
- };
+ phy-handle = <&example5_ethphy4>;
};
- mdio0: mdio-bus {
+ mdio {
#address-cells = <1>;
#size-cells = <0>;
- /* External phy */
- ephy5: ethernet-phy@7 {
- reg = <7>;
+ /* MT7530's phy4 */
+ example5_ethphy4: ethernet-phy@4 {
+ reg = <4>;
};
switch@1f {
compatible = "mediatek,mt7621";
reg = <0x1f>;
- mediatek,mcm;
- resets = <&rstctrl 2>;
+ mediatek,mcm;
+ resets = <&sysc MT7621_RST_MCM>;
reset-names = "mcm";
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&gic>;
+ interrupts = ;
+
ethernet-ports {
#address-cells = <1>;
#size-cells = <0>;
port@0 {
reg = <0>;
- label = "lan0";
+ label = "lan1";
};
port@1 {
reg = <1>;
- label = "lan1";
+ label = "lan2";
};
port@2 {
reg = <2>;
- label = "lan2";
+ label = "lan3";
};
port@3 {
reg = <3>;
- label = "lan3";
- };
-
- port@4 {
- reg = <4>;
label = "lan4";
};
- port@5 {
- reg = <5>;
- label = "lan5";
- phy-mode = "rgmii";
- phy-handle = <&ephy5>;
+ /* Commented out, phy4 is muxed to gmac1.
+ port@4 {
+ reg = <4>;
+ label = "wan";
};
+ */
- cpu_port0: port@6 {
+ port@6 {
reg = <6>;
- label = "cpu";
- ethernet = <&gmac_0>;
- phy-mode = "rgmii";
+ ethernet = <&gmac0>;
+ phy-mode = "trgmii";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
+ };
+ };
+ };
+
+ # Example 6: MT7621: mux external phy to SoC's gmac1
+ - |
+ #include
+ #include
+
+ ethernet {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii2_pins>;
+
+ mac@1 {
+ compatible = "mediatek,eth-mac";
+ reg = <1>;
+
+ phy-mode = "rgmii";
+ phy-handle = <&example6_ethphy7>;
+ };
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* External PHY */
+ example6_ethphy7: ethernet-phy@7 {
+ reg = <7>;
+ phy-mode = "rgmii";
+ };
+
+ switch@1f {
+ compatible = "mediatek,mt7621";
+ reg = <0x1f>;
+
+ mediatek,mcm;
+ resets = <&sysc MT7621_RST_MCM>;
+ reset-names = "mcm";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&gic>;
+ interrupts = ;
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan1";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan2";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan3";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan4";
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "wan";
+ };
+
+ port@6 {
+ reg = <6>;
+ ethernet = <&gmac0>;
+ phy-mode = "trgmii";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
+ };
+ };
+ };
+
+ # Example 7: MT7621: mux external phy to MT7530's port 5
+ - |
+ #include
+ #include
+
+ ethernet {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&rgmii2_pins>;
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ /* External PHY */
+ example7_ethphy7: ethernet-phy@7 {
+ reg = <7>;
+ phy-mode = "rgmii";
+ };
+
+ switch@1f {
+ compatible = "mediatek,mt7621";
+ reg = <0x1f>;
+
+ mediatek,mcm;
+ resets = <&sysc MT7621_RST_MCM>;
+ reset-names = "mcm";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ interrupt-parent = <&gic>;
+ interrupts = ;
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ label = "lan1";
+ };
+
+ port@1 {
+ reg = <1>;
+ label = "lan2";
+ };
+
+ port@2 {
+ reg = <2>;
+ label = "lan3";
+ };
+
+ port@3 {
+ reg = <3>;
+ label = "lan4";
+ };
+
+ port@4 {
+ reg = <4>;
+ label = "wan";
+ };
+
+ port@5 {
+ reg = <5>;
+ label = "extphy";
+ phy-mode = "rgmii-txid";
+ phy-handle = <&example7_ethphy7>;
+ };
+
+ port@6 {
+ reg = <6>;
+ ethernet = <&gmac0>;
+ phy-mode = "trgmii";
fixed-link {
speed = <1000>;
diff --git a/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml b/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
index 6bbd8145b6c1..4da75b1f9533 100644
--- a/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/microchip,ksz.yaml
@@ -107,8 +107,9 @@ examples:
};
port@5 {
reg = <5>;
- label = "cpu";
ethernet = <ð0>;
+ phy-mode = "rgmii";
+
fixed-link {
speed = <1000>;
full-duplex;
@@ -144,8 +145,9 @@ examples:
};
port@6 {
reg = <6>;
- label = "cpu";
ethernet = <ð0>;
+ phy-mode = "rgmii";
+
fixed-link {
speed = <1000>;
full-duplex;
diff --git a/Documentation/devicetree/bindings/net/dsa/mscc,ocelot.yaml b/Documentation/devicetree/bindings/net/dsa/mscc,ocelot.yaml
new file mode 100644
index 000000000000..8d93ed9c172c
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/dsa/mscc,ocelot.yaml
@@ -0,0 +1,260 @@
+# SPDX-License-Identifier: (GPL-2.0 OR MIT)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/dsa/mscc,ocelot.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Microchip Ocelot Switch Family Device Tree Bindings
+
+maintainers:
+ - Vladimir Oltean
+ - Claudiu Manoil
+ - Alexandre Belloni
+ - UNGLinuxDriver@microchip.com
+
+description: |
+ There are multiple switches which are either part of the Ocelot-1 family, or
+ derivatives of this architecture. These switches can be found embedded in
+ various SoCs and accessed using MMIO, or as discrete chips and accessed over
+ SPI or PCIe. The present DSA binding shall be used when the host controlling
+ them performs packet I/O primarily through an Ethernet port of the switch
+ (which is attached to an Ethernet port of the host), rather than through
+ Frame DMA or register-based I/O.
+
+ VSC9953 (Seville):
+
+ This is found in the NXP T1040, where it is a memory-mapped platform
+ device.
+
+ The following PHY interface types are supported:
+
+ - phy-mode = "internal": on ports 8 and 9
+ - phy-mode = "sgmii": on ports 0, 1, 2, 3, 4, 5, 6, 7
+ - phy-mode = "qsgmii": on ports 0, 1, 2, 3, 4, 5, 6, 7
+ - phy-mode = "1000base-x": on ports 0, 1, 2, 3, 4, 5, 6, 7
+
+ VSC9959 (Felix):
+
+ This is found in the NXP LS1028A. It is a PCI device, part of the larger
+ enetc root complex. As a result, the ethernet-switch node is a sub-node of
+ the PCIe root complex node and its "reg" property conforms to the parent
+ node bindings, describing it as PF 5 of device 0, bus 0.
+
+ If any external switch port is enabled, the enetc PF2 (enetc_port2) should
+ be enabled as well. This is because the internal MDIO bus (exposed through
+ EA BAR 0) used to access the MAC PCS registers truly belongs to the enetc
+ port 2 and not to Felix.
+
+ The following PHY interface types are supported:
+
+ - phy-mode = "internal": on ports 4 and 5
+ - phy-mode = "sgmii": on ports 0, 1, 2, 3
+ - phy-mode = "qsgmii": on ports 0, 1, 2, 3
+ - phy-mode = "usxgmii": on ports 0, 1, 2, 3
+ - phy-mode = "1000base-x": on ports 0, 1, 2, 3
+ - phy-mode = "2500base-x": on ports 0, 1, 2, 3
+
+properties:
+ compatible:
+ enum:
+ - mscc,vsc9953-switch
+ - pci1957,eef0
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ description:
+ Used to signal availability of PTP TX timestamps, and state changes of
+ the MAC merge layer of ports that support Frame Preemption.
+
+ little-endian: true
+ big-endian: true
+
+required:
+ - compatible
+ - reg
+
+allOf:
+ - $ref: dsa.yaml#
+ - if:
+ properties:
+ compatible:
+ const: pci1957,eef0
+ then:
+ required:
+ - interrupts
+
+unevaluatedProperties: false
+
+examples:
+ # Felix VSC9959 (NXP LS1028A)
+ - |
+ #include
+
+ pcie { /* Integrated Endpoint Root Complex */
+ #address-cells = <3>;
+ #size-cells = <2>;
+
+ ethernet-switch@0,5 {
+ compatible = "pci1957,eef0";
+ reg = <0x000500 0 0 0 0>;
+ interrupts = ;
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ phy-mode = "qsgmii";
+ phy-handle = <&phy0>;
+ managed = "in-band-status";
+ };
+
+ port@1 {
+ reg = <1>;
+ phy-mode = "qsgmii";
+ phy-handle = <&phy1>;
+ managed = "in-band-status";
+ };
+
+ port@2 {
+ reg = <2>;
+ phy-mode = "qsgmii";
+ phy-handle = <&phy2>;
+ managed = "in-band-status";
+ };
+
+ port@3 {
+ reg = <3>;
+ phy-mode = "qsgmii";
+ phy-handle = <&phy3>;
+ managed = "in-band-status";
+ };
+
+ port@4 {
+ reg = <4>;
+ ethernet = <&enetc_port2>;
+ phy-mode = "internal";
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ pause;
+ };
+ };
+
+ port@5 {
+ reg = <5>;
+ ethernet = <&enetc_port3>;
+ phy-mode = "internal";
+
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
+ };
+ };
+ # Seville VSC9953 (NXP T1040)
+ - |
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ ethernet-switch@800000 {
+ compatible = "mscc,vsc9953-switch";
+ reg = <0x800000 0x290000>;
+ little-endian;
+
+ ethernet-ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ phy-mode = "qsgmii";
+ phy-handle = <&phy0>;
+ managed = "in-band-status";
+ };
+
+ port@1 {
+ reg = <1>;
+ phy-mode = "qsgmii";
+ phy-handle = <&phy1>;
+ managed = "in-band-status";
+ };
+
+ port@2 {
+ reg = <2>;
+ phy-mode = "qsgmii";
+ phy-handle = <&phy2>;
+ managed = "in-band-status";
+ };
+
+ port@3 {
+ reg = <3>;
+ phy-mode = "qsgmii";
+ phy-handle = <&phy3>;
+ managed = "in-band-status";
+ };
+
+ port@4 {
+ reg = <4>;
+ phy-mode = "qsgmii";
+ phy-handle = <&phy4>;
+ managed = "in-band-status";
+ };
+
+ port@5 {
+ reg = <5>;
+ phy-mode = "qsgmii";
+ phy-handle = <&phy5>;
+ managed = "in-band-status";
+ };
+
+ port@6 {
+ reg = <6>;
+ phy-mode = "qsgmii";
+ phy-handle = <&phy6>;
+ managed = "in-band-status";
+ };
+
+ port@7 {
+ reg = <7>;
+ phy-mode = "qsgmii";
+ phy-handle = <&phy7>;
+ managed = "in-band-status";
+ };
+
+ port@8 {
+ reg = <8>;
+ phy-mode = "internal";
+ ethernet = <&enet0>;
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ pause;
+ };
+ };
+
+ port@9 {
+ reg = <9>;
+ phy-mode = "internal";
+ ethernet = <&enet1>;
+
+ fixed-link {
+ speed = <2500>;
+ full-duplex;
+ pause;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/dsa/ocelot.txt b/Documentation/devicetree/bindings/net/dsa/ocelot.txt
deleted file mode 100644
index 7a271d070b72..000000000000
--- a/Documentation/devicetree/bindings/net/dsa/ocelot.txt
+++ /dev/null
@@ -1,213 +0,0 @@
-Microchip Ocelot switch driver family
-=====================================
-
-Felix
------
-
-Currently the switches supported by the felix driver are:
-
-- VSC9959 (Felix)
-- VSC9953 (Seville)
-
-The VSC9959 switch is found in the NXP LS1028A. It is a PCI device, part of the
-larger ENETC root complex. As a result, the ethernet-switch node is a sub-node
-of the PCIe root complex node and its "reg" property conforms to the parent
-node bindings:
-
-* reg: Specifies PCIe Device Number and Function Number of the endpoint device,
- in this case for the Ethernet L2Switch it is PF5 (of device 0, bus 0).
-
-It does not require a "compatible" string.
-
-The interrupt line is used to signal availability of PTP TX timestamps and for
-TSN frame preemption.
-
-For the external switch ports, depending on board configuration, "phy-mode" and
-"phy-handle" are populated by board specific device tree instances. Ports 4 and
-5 are fixed as internal ports in the NXP LS1028A instantiation.
-
-The CPU port property ("ethernet") configures the feature called "NPI port" in
-the Ocelot hardware core. The CPU port in Ocelot is a set of queues, which are
-connected, in the Node Processor Interface (NPI) mode, to an Ethernet port.
-By default, in fsl-ls1028a.dtsi, the NPI port is assigned to the internal
-2.5Gbps port@4, but can be moved to the 1Gbps port@5, depending on the specific
-use case. Moving the NPI port to an external switch port is hardware possible,
-but there is no platform support for the Linux system on the LS1028A chip to
-operate as an entire slave DSA chip. NPI functionality (and therefore DSA
-tagging) is supported on a single port at a time.
-
-Any port can be disabled (and in fsl-ls1028a.dtsi, they are indeed all disabled
-by default, and should be enabled on a per-board basis). But if any external
-switch port is enabled at all, the ENETC PF2 (enetc_port2) should be enabled as
-well, regardless of whether it is configured as the DSA master or not. This is
-because the Felix PHYLINK implementation accesses the MAC PCS registers, which
-in hardware truly belong to the ENETC port #2 and not to Felix.
-
-Supported PHY interface types (appropriate SerDes protocol setting changes are
-needed in the RCW binary):
-
-* phy_mode = "internal": on ports 4 and 5
-* phy_mode = "sgmii": on ports 0, 1, 2, 3
-* phy_mode = "qsgmii": on ports 0, 1, 2, 3
-* phy_mode = "usxgmii": on ports 0, 1, 2, 3
-* phy_mode = "2500base-x": on ports 0, 1, 2, 3
-
-For the rest of the device tree binding definitions, which are standard DSA and
-PCI, refer to the following documents:
-
-Documentation/devicetree/bindings/net/dsa/dsa.txt
-Documentation/devicetree/bindings/pci/pci.txt
-
-Example:
-
-&soc {
- pcie@1f0000000 { /* Integrated Endpoint Root Complex */
- ethernet-switch@0,5 {
- reg = <0x000500 0 0 0 0>;
- /* IEP INT_B */
- interrupts = ;
-
- ports {
- #address-cells = <1>;
- #size-cells = <0>;
-
- /* External ports */
- port@0 {
- reg = <0>;
- label = "swp0";
- };
-
- port@1 {
- reg = <1>;
- label = "swp1";
- };
-
- port@2 {
- reg = <2>;
- label = "swp2";
- };
-
- port@3 {
- reg = <3>;
- label = "swp3";
- };
-
- /* Tagging CPU port */
- port@4 {
- reg = <4>;
- ethernet = <&enetc_port2>;
- phy-mode = "internal";
-
- fixed-link {
- speed = <2500>;
- full-duplex;
- };
- };
-
- /* Non-tagging CPU port */
- port@5 {
- reg = <5>;
- phy-mode = "internal";
- status = "disabled";
-
- fixed-link {
- speed = <1000>;
- full-duplex;
- };
- };
- };
- };
- };
-};
-
-The VSC9953 switch is found inside NXP T1040. It is a platform device with the
-following required properties:
-
-- compatible:
- Must be "mscc,vsc9953-switch".
-
-Supported PHY interface types (appropriate SerDes protocol setting changes are
-needed in the RCW binary):
-
-* phy_mode = "internal": on ports 8 and 9
-* phy_mode = "sgmii": on ports 0, 1, 2, 3, 4, 5, 6, 7
-* phy_mode = "qsgmii": on ports 0, 1, 2, 3, 4, 5, 6, 7
-
-Example:
-
-&soc {
- ethernet-switch@800000 {
- #address-cells = <0x1>;
- #size-cells = <0x0>;
- compatible = "mscc,vsc9953-switch";
- little-endian;
- reg = <0x800000 0x290000>;
-
- ports {
- #address-cells = <0x1>;
- #size-cells = <0x0>;
-
- port@0 {
- reg = <0x0>;
- label = "swp0";
- };
-
- port@1 {
- reg = <0x1>;
- label = "swp1";
- };
-
- port@2 {
- reg = <0x2>;
- label = "swp2";
- };
-
- port@3 {
- reg = <0x3>;
- label = "swp3";
- };
-
- port@4 {
- reg = <0x4>;
- label = "swp4";
- };
-
- port@5 {
- reg = <0x5>;
- label = "swp5";
- };
-
- port@6 {
- reg = <0x6>;
- label = "swp6";
- };
-
- port@7 {
- reg = <0x7>;
- label = "swp7";
- };
-
- port@8 {
- reg = <0x8>;
- phy-mode = "internal";
- ethernet = <&enet0>;
-
- fixed-link {
- speed = <2500>;
- full-duplex;
- };
- };
-
- port@9 {
- reg = <0x9>;
- phy-mode = "internal";
- status = "disabled";
-
- fixed-link {
- speed = <2500>;
- full-duplex;
- };
- };
- };
- };
-};
diff --git a/Documentation/devicetree/bindings/net/dsa/qca8k.yaml b/Documentation/devicetree/bindings/net/dsa/qca8k.yaml
index f3c88371d76c..978162df51f7 100644
--- a/Documentation/devicetree/bindings/net/dsa/qca8k.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/qca8k.yaml
@@ -159,7 +159,6 @@ examples:
port@0 {
reg = <0>;
- label = "cpu";
ethernet = <&gmac1>;
phy-mode = "rgmii";
@@ -221,7 +220,6 @@ examples:
port@0 {
reg = <0>;
- label = "cpu";
ethernet = <&gmac1>;
phy-mode = "rgmii";
@@ -268,7 +266,6 @@ examples:
port@6 {
reg = <0>;
- label = "cpu";
ethernet = <&gmac1>;
phy-mode = "sgmii";
diff --git a/Documentation/devicetree/bindings/net/dsa/realtek.yaml b/Documentation/devicetree/bindings/net/dsa/realtek.yaml
index 4f99aff029dc..1a7d45a8ad66 100644
--- a/Documentation/devicetree/bindings/net/dsa/realtek.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/realtek.yaml
@@ -189,7 +189,6 @@ examples:
};
port@5 {
reg = <5>;
- label = "cpu";
ethernet = <&gmac0>;
phy-mode = "rgmii";
fixed-link {
@@ -277,7 +276,6 @@ examples:
};
port@6 {
reg = <6>;
- label = "cpu";
ethernet = <&fec1>;
phy-mode = "rgmii";
tx-internal-delay-ps = <2000>;
diff --git a/Documentation/devicetree/bindings/net/dsa/renesas,rzn1-a5psw.yaml b/Documentation/devicetree/bindings/net/dsa/renesas,rzn1-a5psw.yaml
index 4d428f5ad044..7ca9c19a157c 100644
--- a/Documentation/devicetree/bindings/net/dsa/renesas,rzn1-a5psw.yaml
+++ b/Documentation/devicetree/bindings/net/dsa/renesas,rzn1-a5psw.yaml
@@ -130,7 +130,8 @@ examples:
port@4 {
reg = <4>;
ethernet = <&gmac2>;
- label = "cpu";
+ phy-mode = "internal";
+
fixed-link {
speed = <1000>;
full-duplex;
diff --git a/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt b/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt
index bbf4a13f6d75..258bef483673 100644
--- a/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt
+++ b/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt
@@ -75,7 +75,6 @@ switch@0 {
};
vsc: port@6 {
reg = <6>;
- label = "cpu";
ethernet = <&gmac1>;
phy-mode = "rgmii";
fixed-link {
@@ -117,7 +116,6 @@ switch@2,0 {
};
vsc: port@6 {
reg = <6>;
- label = "cpu";
ethernet = <&enet0>;
phy-mode = "rgmii";
fixed-link {
diff --git a/Documentation/devicetree/bindings/net/engleder,tsnep.yaml b/Documentation/devicetree/bindings/net/engleder,tsnep.yaml
index d0e1476e15b5..5bd964a46a9d 100644
--- a/Documentation/devicetree/bindings/net/engleder,tsnep.yaml
+++ b/Documentation/devicetree/bindings/net/engleder,tsnep.yaml
@@ -20,7 +20,26 @@ properties:
maxItems: 1
interrupts:
- maxItems: 1
+ minItems: 1
+ maxItems: 8
+
+ interrupt-names:
+ minItems: 1
+ items:
+ - const: mac
+ - const: txrx-1
+ - const: txrx-2
+ - const: txrx-3
+ - const: txrx-4
+ - const: txrx-5
+ - const: txrx-6
+ - const: txrx-7
+ description:
+ The main interrupt for basic MAC features and the first TX/RX queue pair
+ is named "mac". "txrx-[1-7]" are the interrupts for additional TX/RX
+ queue pairs.
+
+ dma-coherent: true
local-mac-address: true
@@ -58,7 +77,7 @@ examples:
axi {
#address-cells = <2>;
#size-cells = <2>;
- tnsep0: ethernet@a0000000 {
+ tsnep0: ethernet@a0000000 {
compatible = "engleder,tsnep";
reg = <0x0 0xa0000000 0x0 0x10000>;
interrupts = <0 89 1>;
@@ -76,4 +95,24 @@ examples:
};
};
};
+
+ tsnep1: ethernet@a0010000 {
+ compatible = "engleder,tsnep";
+ reg = <0x0 0xa0010000 0x0 0x10000>;
+ interrupts = <0 93 1>, <0 94 1>, <0 95 1>, <0 96 1>;
+ interrupt-names = "mac", "txrx-1", "txrx-2", "txrx-3";
+ interrupt-parent = <&gic>;
+ local-mac-address = [00 00 00 00 00 00];
+ phy-mode = "rgmii";
+ phy-handle = <&phy1>;
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ suppress-preamble;
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ rxc-skew-ps = <1080>;
+ };
+ };
+ };
};
diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
index c138a1022879..4b3c590fcebf 100644
--- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml
@@ -67,6 +67,7 @@ properties:
- gmii
- sgmii
- qsgmii
+ - qusgmii
- tbi
- rev-mii
- rmii
diff --git a/Documentation/devicetree/bindings/net/ethernet-phy.yaml b/Documentation/devicetree/bindings/net/ethernet-phy.yaml
index ed1415a4381f..ad808e9ce5b9 100644
--- a/Documentation/devicetree/bindings/net/ethernet-phy.yaml
+++ b/Documentation/devicetree/bindings/net/ethernet-phy.yaml
@@ -144,6 +144,12 @@ properties:
Mark the corresponding energy efficient ethernet mode as
broken and request the ethernet to stop advertising it.
+ pses:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ maxItems: 1
+ description:
+ Specifies a reference to a node representing a Power Sourcing Equipment.
+
phy-is-integrated:
$ref: /schemas/types.yaml#/definitions/flag
description:
diff --git a/Documentation/devicetree/bindings/net/fsl,fec.yaml b/Documentation/devicetree/bindings/net/fsl,fec.yaml
index 5cfb661be124..e0f376f7e274 100644
--- a/Documentation/devicetree/bindings/net/fsl,fec.yaml
+++ b/Documentation/devicetree/bindings/net/fsl,fec.yaml
@@ -21,6 +21,7 @@ properties:
- fsl,imx28-fec
- fsl,imx6q-fec
- fsl,mvf600-fec
+ - fsl,s32v234-fec
- items:
- enum:
- fsl,imx53-fec
diff --git a/Documentation/devicetree/bindings/net/fsl,fman-dtsec.yaml b/Documentation/devicetree/bindings/net/fsl,fman-dtsec.yaml
new file mode 100644
index 000000000000..3a35ac1c260d
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/fsl,fman-dtsec.yaml
@@ -0,0 +1,145 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/fsl,fman-dtsec.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP FMan MAC
+
+maintainers:
+ - Madalin Bucur
+
+description: |
+ Each FMan has several MACs, each implementing an Ethernet interface. Earlier
+ versions of FMan used the Datapath Three Speed Ethernet Controller (dTSEC) for
+ 10/100/1000 MBit/s speeds, and the 10-Gigabit Ethernet Media Access Controller
+ (10GEC) for 10 Gbit/s speeds. Later versions of FMan use the Multirate
+ Ethernet Media Access Controller (mEMAC) to handle all speeds.
+
+properties:
+ compatible:
+ enum:
+ - fsl,fman-dtsec
+ - fsl,fman-xgec
+ - fsl,fman-memac
+
+ cell-index:
+ maximum: 64
+ description: |
+ FManV2:
+ register[bit] MAC cell-index
+ ============================================================
+ FM_EPI[16] XGEC 8
+ FM_EPI[16+n] dTSECn n-1
+ FM_NPI[11+n] dTSECn n-1
+ n = 1,..,5
+
+ FManV3:
+ register[bit] MAC cell-index
+ ============================================================
+ FM_EPI[16+n] mEMACn n-1
+ FM_EPI[25] mEMAC10 9
+
+ FM_NPI[11+n] mEMACn n-1
+ FM_NPI[10] mEMAC10 9
+ FM_NPI[11] mEMAC9 8
+ n = 1,..8
+
+ FM_EPI and FM_NPI are located in the FMan memory map.
+
+ 2. SoC registers:
+
+ - P2041, P3041, P4080 P5020, P5040:
+ register[bit] FMan MAC cell
+ Unit index
+ ============================================================
+ DCFG_DEVDISR2[7] 1 XGEC 8
+ DCFG_DEVDISR2[7+n] 1 dTSECn n-1
+ DCFG_DEVDISR2[15] 2 XGEC 8
+ DCFG_DEVDISR2[15+n] 2 dTSECn n-1
+ n = 1,..5
+
+ - T1040, T2080, T4240, B4860:
+ register[bit] FMan MAC cell
+ Unit index
+ ============================================================
+ DCFG_CCSR_DEVDISR2[n-1] 1 mEMACn n-1
+ DCFG_CCSR_DEVDISR2[11+n] 2 mEMACn n-1
+ n = 1,..6,9,10
+
+ EVDISR, DCFG_DEVDISR2 and DCFG_CCSR_DEVDISR2 are located in
+ the specific SoC "Device Configuration/Pin Control" Memory
+ Map.
+
+ reg:
+ maxItems: 1
+
+ fsl,fman-ports:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ maxItems: 2
+ description: |
+ An array of two references: the first is the FMan RX port and the second
+ is the TX port used by this MAC.
+
+ ptp-timer:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: A reference to the IEEE1588 timer
+
+ pcsphy-handle:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: A reference to the PCS (typically found on the SerDes)
+
+ tbi-handle:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: A reference to the (TBI-based) PCS
+
+required:
+ - compatible
+ - cell-index
+ - reg
+ - fsl,fman-ports
+ - ptp-timer
+
+allOf:
+ - $ref: ethernet-controller.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: fsl,fman-dtsec
+ then:
+ required:
+ - tbi-handle
+ - if:
+ properties:
+ compatible:
+ contains:
+ const: fsl,fman-memac
+ then:
+ required:
+ - pcsphy-handle
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ ethernet@e0000 {
+ compatible = "fsl,fman-dtsec";
+ cell-index = <0>;
+ reg = <0xe0000 0x1000>;
+ fsl,fman-ports = <&fman1_rx8 &fman1_tx28>;
+ ptp-timer = <&ptp_timer>;
+ tbi-handle = <&tbi0>;
+ };
+ - |
+ ethernet@e8000 {
+ cell-index = <4>;
+ compatible = "fsl,fman-memac";
+ reg = <0xe8000 0x1000>;
+ fsl,fman-ports = <&fman0_rx_0x0c &fman0_tx_0x2c>;
+ ptp-timer = <&ptp_timer0>;
+ pcsphy-handle = <&pcsphy4>;
+ phy-handle = <&sgmii_phy1>;
+ phy-connection-type = "sgmii";
+ };
+...
diff --git a/Documentation/devicetree/bindings/net/fsl-fman.txt b/Documentation/devicetree/bindings/net/fsl-fman.txt
index 801efc7d6818..b9055335db3b 100644
--- a/Documentation/devicetree/bindings/net/fsl-fman.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fman.txt
@@ -232,133 +232,7 @@ port@81000 {
=============================================================================
FMan dTSEC/XGEC/mEMAC Node
-DESCRIPTION
-
-mEMAC/dTSEC/XGEC are the Ethernet network interfaces
-
-PROPERTIES
-
-- compatible
- Usage: required
- Value type:
- Definition: A standard property.
- Must include one of the following:
- - "fsl,fman-dtsec" for dTSEC MAC
- - "fsl,fman-xgec" for XGEC MAC
- - "fsl,fman-memac" for mEMAC MAC
-
-- cell-index
- Usage: required
- Value type:
- Definition: Specifies the MAC id.
-
- The cell-index value may be used by the FMan or the SoC, to
- identify the MAC unit in the FMan (or SoC) memory map.
- In the tables below there's a description of the cell-index
- use, there are two tables, one describes the use of cell-index
- by the FMan, the second describes the use by the SoC:
-
- 1. FMan Registers
-
- FManV2:
- register[bit] MAC cell-index
- ============================================================
- FM_EPI[16] XGEC 8
- FM_EPI[16+n] dTSECn n-1
- FM_NPI[11+n] dTSECn n-1
- n = 1,..,5
-
- FManV3:
- register[bit] MAC cell-index
- ============================================================
- FM_EPI[16+n] mEMACn n-1
- FM_EPI[25] mEMAC10 9
-
- FM_NPI[11+n] mEMACn n-1
- FM_NPI[10] mEMAC10 9
- FM_NPI[11] mEMAC9 8
- n = 1,..8
-
- FM_EPI and FM_NPI are located in the FMan memory map.
-
- 2. SoC registers:
-
- - P2041, P3041, P4080 P5020, P5040:
- register[bit] FMan MAC cell
- Unit index
- ============================================================
- DCFG_DEVDISR2[7] 1 XGEC 8
- DCFG_DEVDISR2[7+n] 1 dTSECn n-1
- DCFG_DEVDISR2[15] 2 XGEC 8
- DCFG_DEVDISR2[15+n] 2 dTSECn n-1
- n = 1,..5
-
- - T1040, T2080, T4240, B4860:
- register[bit] FMan MAC cell
- Unit index
- ============================================================
- DCFG_CCSR_DEVDISR2[n-1] 1 mEMACn n-1
- DCFG_CCSR_DEVDISR2[11+n] 2 mEMACn n-1
- n = 1,..6,9,10
-
- EVDISR, DCFG_DEVDISR2 and DCFG_CCSR_DEVDISR2 are located in
- the specific SoC "Device Configuration/Pin Control" Memory
- Map.
-
-- reg
- Usage: required
- Value type:
- Definition: A standard property.
-
-- fsl,fman-ports
- Usage: required
- Value type:
- Definition: An array of two phandles - the first references is
- the FMan RX port and the second is the TX port used by this
- MAC.
-
-- ptp-timer
- Usage required
- Value type:
- Definition: A phandle for 1EEE1588 timer.
-
-- pcsphy-handle
- Usage required for "fsl,fman-memac" MACs
- Value type:
- Definition: A phandle for pcsphy.
-
-- tbi-handle
- Usage required for "fsl,fman-dtsec" MACs
- Value type:
- Definition: A phandle for tbiphy.
-
-EXAMPLE
-
-fman1_tx28: port@a8000 {
- cell-index = <0x28>;
- compatible = "fsl,fman-v2-port-tx";
- reg = <0xa8000 0x1000>;
-};
-
-fman1_rx8: port@88000 {
- cell-index = <0x8>;
- compatible = "fsl,fman-v2-port-rx";
- reg = <0x88000 0x1000>;
-};
-
-ptp-timer: ptp_timer@fe000 {
- compatible = "fsl,fman-ptp-timer";
- reg = <0xfe000 0x1000>;
-};
-
-ethernet@e0000 {
- compatible = "fsl,fman-dtsec";
- cell-index = <0>;
- reg = <0xe0000 0x1000>;
- fsl,fman-ports = <&fman1_rx8 &fman1_tx28>;
- ptp-timer = <&ptp-timer>;
- tbi-handle = <&tbi0>;
-};
+Refer to Documentation/devicetree/bindings/net/fsl,fman-dtsec.yaml
============================================================================
FMan IEEE 1588 Node
diff --git a/Documentation/devicetree/bindings/net/mediatek,mt7620-gsw.txt b/Documentation/devicetree/bindings/net/mediatek,mt7620-gsw.txt
deleted file mode 100644
index 358fed2fab43..000000000000
--- a/Documentation/devicetree/bindings/net/mediatek,mt7620-gsw.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-Mediatek Gigabit Switch
-=======================
-
-The mediatek gigabit switch can be found on Mediatek SoCs (mt7620, mt7621).
-
-Required properties:
-- compatible: Should be "mediatek,mt7620-gsw" or "mediatek,mt7621-gsw"
-- reg: Address and length of the register set for the device
-- interrupts: Should contain the gigabit switches interrupt
-- resets: Should contain the gigabit switches resets
-- reset-names: Should contain the reset names "gsw"
-
-Example:
-
-gsw@10110000 {
- compatible = "ralink,mt7620-gsw";
- reg = <0x10110000 8000>;
-
- resets = <&rstctrl 23>;
- reset-names = "gsw";
-
- interrupt-parent = <&intc>;
- interrupts = <17>;
-};
diff --git a/Documentation/devicetree/bindings/net/mediatek,net.yaml b/Documentation/devicetree/bindings/net/mediatek,net.yaml
index f5564ecddb62..7ef696204c5a 100644
--- a/Documentation/devicetree/bindings/net/mediatek,net.yaml
+++ b/Documentation/devicetree/bindings/net/mediatek,net.yaml
@@ -69,6 +69,15 @@ properties:
A list of phandle to the syscon node that handles the SGMII setup which is required for
those SoCs equipped with SGMII.
+ mediatek,wed:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ minItems: 2
+ maxItems: 2
+ items:
+ maxItems: 1
+ description:
+ List of phandles to wireless ethernet dispatch nodes.
+
dma-coherent: true
mdio-bus:
@@ -112,6 +121,8 @@ allOf:
Phandle to the syscon node that handles the ports slew rate and
driver current.
+ mediatek,wed: false
+
- if:
properties:
compatible:
@@ -144,15 +155,6 @@ allOf:
minItems: 1
maxItems: 1
- mediatek,wed:
- $ref: /schemas/types.yaml#/definitions/phandle-array
- minItems: 2
- maxItems: 2
- items:
- maxItems: 1
- description:
- List of phandles to wireless ethernet dispatch nodes.
-
mediatek,pcie-mirror:
$ref: /schemas/types.yaml#/definitions/phandle
description:
@@ -202,6 +204,8 @@ allOf:
minItems: 2
maxItems: 2
+ mediatek,wed: false
+
- if:
properties:
compatible:
@@ -238,6 +242,11 @@ allOf:
minItems: 2
maxItems: 2
+ mediatek,wed-pcie:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description:
+ Phandle to the mediatek wed-pcie controller.
+
patternProperties:
"^mac@[0-1]$":
type: object
diff --git a/Documentation/devicetree/bindings/net/mediatek-dwmac.yaml b/Documentation/devicetree/bindings/net/mediatek-dwmac.yaml
index 61b2fb9e141b..0fa2132fa4f4 100644
--- a/Documentation/devicetree/bindings/net/mediatek-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/mediatek-dwmac.yaml
@@ -19,6 +19,7 @@ select:
contains:
enum:
- mediatek,mt2712-gmac
+ - mediatek,mt8188-gmac
- mediatek,mt8195-gmac
required:
- compatible
@@ -37,6 +38,11 @@ properties:
- enum:
- mediatek,mt8195-gmac
- const: snps,dwmac-5.10a
+ - items:
+ - enum:
+ - mediatek,mt8188-gmac
+ - const: mediatek,mt8195-gmac
+ - const: snps,dwmac-5.10a
clocks:
minItems: 5
@@ -74,7 +80,7 @@ properties:
or will round down. Range 0~31*170.
For MT2712 RMII/MII interface, Allowed value need to be a multiple of 550,
or will round down. Range 0~31*550.
- For MT8195 RGMII/RMII/MII interface, Allowed value need to be a multiple of 290,
+ For MT8188/MT8195 RGMII/RMII/MII interface, Allowed value need to be a multiple of 290,
or will round down. Range 0~31*290.
mediatek,rx-delay-ps:
@@ -84,7 +90,7 @@ properties:
or will round down. Range 0~31*170.
For MT2712 RMII/MII interface, Allowed value need to be a multiple of 550,
or will round down. Range 0~31*550.
- For MT8195 RGMII/RMII/MII interface, Allowed value need to be a multiple
+ For MT8188/MT8195 RGMII/RMII/MII interface, Allowed value need to be a multiple
of 290, or will round down. Range 0~31*290.
mediatek,rmii-rxc:
diff --git a/Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml b/Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml
index 6c86d3d85e99..57ffeb8fc876 100644
--- a/Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml
+++ b/Documentation/devicetree/bindings/net/microchip,sparx5-switch.yaml
@@ -74,16 +74,20 @@ properties:
ethernet-ports:
type: object
+ additionalProperties: false
+
+ properties:
+ '#address-cells':
+ const: 1
+ '#size-cells':
+ const: 0
+
patternProperties:
"^port@[0-9a-f]+$":
- type: object
+ $ref: /schemas/net/ethernet-controller.yaml#
+ unevaluatedProperties: false
properties:
- '#address-cells':
- const: 1
- '#size-cells':
- const: 0
-
reg:
description: Switch port number
@@ -93,29 +97,11 @@ properties:
phandle of a Ethernet SerDes PHY. This defines which SerDes
instance will handle the Ethernet traffic.
- phy-mode:
- description:
- This specifies the interface used by the Ethernet SerDes towards
- the PHY or SFP.
-
microchip,bandwidth:
description: Specifies bandwidth in Mbit/s allocated to the port.
$ref: "/schemas/types.yaml#/definitions/uint32"
maximum: 25000
- phy-handle:
- description:
- phandle of a Ethernet PHY. This is optional and if provided it
- points to the cuPHY used by the Ethernet SerDes.
-
- sfp:
- description:
- phandle of an SFP. This is optional and used when not specifying
- a cuPHY. It points to the SFP node that describes the SFP used by
- the Ethernet SerDes.
-
- managed: true
-
microchip,sd-sgpio:
description:
Index of the ports Signal Detect SGPIO in the set of 384 SGPIOs
@@ -144,8 +130,6 @@ required:
- reg-names
- interrupts
- interrupt-names
- - resets
- - reset-names
- ethernet-ports
additionalProperties: false
diff --git a/Documentation/devicetree/bindings/net/nfc/marvell,nci.yaml b/Documentation/devicetree/bindings/net/nfc/marvell,nci.yaml
index a191a04e681c..308485a8ee6c 100644
--- a/Documentation/devicetree/bindings/net/nfc/marvell,nci.yaml
+++ b/Documentation/devicetree/bindings/net/nfc/marvell,nci.yaml
@@ -128,7 +128,7 @@ examples:
i2c-int-rising;
- reset-n-io = <&gpio3 19 GPIO_ACTIVE_HIGH>;
+ reset-n-io = <&gpio3 19 GPIO_ACTIVE_LOW>;
};
};
@@ -151,7 +151,7 @@ examples:
interrupt-parent = <&gpio1>;
interrupts = <17 IRQ_TYPE_EDGE_RISING>;
- reset-n-io = <&gpio3 19 GPIO_ACTIVE_HIGH>;
+ reset-n-io = <&gpio3 19 GPIO_ACTIVE_LOW>;
};
};
@@ -162,7 +162,7 @@ examples:
nfc {
compatible = "marvell,nfc-uart";
- reset-n-io = <&gpio3 16 GPIO_ACTIVE_HIGH>;
+ reset-n-io = <&gpio3 16 GPIO_ACTIVE_LOW>;
hci-muxed;
flow-control;
diff --git a/Documentation/devicetree/bindings/net/nxp,tja11xx.yaml b/Documentation/devicetree/bindings/net/nxp,tja11xx.yaml
index d51da24f3505..ab8867e6939b 100644
--- a/Documentation/devicetree/bindings/net/nxp,tja11xx.yaml
+++ b/Documentation/devicetree/bindings/net/nxp,tja11xx.yaml
@@ -31,6 +31,22 @@ patternProperties:
description:
The ID number for the child PHY. Should be +1 of parent PHY.
+ nxp,rmii-refclk-in:
+ type: boolean
+ description: |
+ The REF_CLK is provided for both transmitted and received data
+ in RMII mode. This clock signal is provided by the PHY and is
+ typically derived from an external 25MHz crystal. Alternatively,
+ a 50MHz clock signal generated by an external oscillator can be
+ connected to pin REF_CLK. A third option is to connect a 25MHz
+ clock to pin CLK_IN_OUT. So, the REF_CLK should be configured
+ as input or output according to the actual circuit connection.
+ If present, indicates that the REF_CLK will be configured as
+ interface reference clock input when RMII mode enabled.
+ If not present, the REF_CLK will be configured as interface
+ reference clock output when RMII mode enabled.
+ Only supported on TJA1100 and TJA1101.
+
required:
- reg
@@ -44,6 +60,7 @@ examples:
tja1101_phy0: ethernet-phy@4 {
reg = <0x4>;
+ nxp,rmii-refclk-in;
};
};
- |
diff --git a/Documentation/devicetree/bindings/net/pse-pd/podl-pse-regulator.yaml b/Documentation/devicetree/bindings/net/pse-pd/podl-pse-regulator.yaml
new file mode 100644
index 000000000000..c6b1c188abf7
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/pse-pd/podl-pse-regulator.yaml
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/pse-pd/podl-pse-regulator.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Regulator based Power Sourcing Equipment
+
+maintainers:
+ - Oleksij Rempel
+
+description: Regulator based PoDL PSE controller. The device must be referenced
+ by the PHY node to control power injection to the Ethernet cable.
+
+allOf:
+ - $ref: "pse-controller.yaml#"
+
+properties:
+ compatible:
+ const: podl-pse-regulator
+
+ '#pse-cells':
+ const: 0
+
+ pse-supply:
+ description: Power supply for the PSE controller
+
+additionalProperties: false
+
+required:
+ - compatible
+ - pse-supply
+
+examples:
+ - |
+ ethernet-pse {
+ compatible = "podl-pse-regulator";
+ pse-supply = <®_t1l1>;
+ #pse-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/net/pse-pd/pse-controller.yaml b/Documentation/devicetree/bindings/net/pse-pd/pse-controller.yaml
new file mode 100644
index 000000000000..b110abb42597
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/pse-pd/pse-controller.yaml
@@ -0,0 +1,33 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/net/pse-pd/pse-controller.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Power Sourcing Equipment (PSE).
+
+description: Binding for the Power Sourcing Equipment (PSE) as defined in the
+ IEEE 802.3 specification. It is designed for hardware which is delivering
+ power over twisted pair/ethernet cable. The ethernet-pse nodes should be
+ used to describe PSE controller and referenced by the ethernet-phy node.
+
+maintainers:
+ - Oleksij Rempel
+
+properties:
+ $nodename:
+ pattern: "^ethernet-pse(@.*)?$"
+
+ "#pse-cells":
+ description:
+ Used to uniquely identify a PSE instance within an IC. Will be
+ 0 on PSE nodes with only a single output and at least 1 on nodes
+ controlling several outputs.
+ enum: [0, 1]
+
+required:
+ - "#pse-cells"
+
+additionalProperties: true
+
+...
diff --git a/Documentation/devicetree/bindings/net/qca,ar803x.yaml b/Documentation/devicetree/bindings/net/qca,ar803x.yaml
index b3d4013b7ca6..161d28919316 100644
--- a/Documentation/devicetree/bindings/net/qca,ar803x.yaml
+++ b/Documentation/devicetree/bindings/net/qca,ar803x.yaml
@@ -40,6 +40,14 @@ properties:
Only supported on the AR8031.
type: boolean
+ qca,disable-hibernation-mode:
+ description: |
+ Disable Atheros AR803X PHYs hibernation mode. If present, indicates
+ that the hardware of PHY will not enter power saving mode when the
+ cable is disconnected. And the RX_CLK always keeps outputting a
+ valid clock.
+ type: boolean
+
qca,smarteee-tw-us-100m:
description: EEE Tw parameter for 100M links.
$ref: /schemas/types.yaml#/definitions/uint32
diff --git a/Documentation/devicetree/bindings/net/ralink,rt2880-net.txt b/Documentation/devicetree/bindings/net/ralink,rt2880-net.txt
deleted file mode 100644
index 9fe1a0a22e44..000000000000
--- a/Documentation/devicetree/bindings/net/ralink,rt2880-net.txt
+++ /dev/null
@@ -1,59 +0,0 @@
-Ralink Frame Engine Ethernet controller
-=======================================
-
-The Ralink frame engine ethernet controller can be found on Ralink and
-Mediatek SoCs (RT288x, RT3x5x, RT366x, RT388x, rt5350, mt7620, mt7621, mt76x8).
-
-Depending on the SoC, there is a number of ports connected to the CPU port
-directly and/or via a (gigabit-)switch.
-
-* Ethernet controller node
-
-Required properties:
-- compatible: Should be one of "ralink,rt2880-eth", "ralink,rt3050-eth",
- "ralink,rt3050-eth", "ralink,rt3883-eth", "ralink,rt5350-eth",
- "mediatek,mt7620-eth", "mediatek,mt7621-eth"
-- reg: Address and length of the register set for the device
-- interrupts: Should contain the frame engines interrupt
-- resets: Should contain the frame engines resets
-- reset-names: Should contain the reset names "fe". If a switch is present
- "esw" is also required.
-
-
-* Ethernet port node
-
-Required properties:
-- compatible: Should be "ralink,eth-port"
-- reg: The number of the physical port
-- phy-handle: reference to the node describing the phy
-
-Example:
-
-mdio-bus {
- ...
- phy0: ethernet-phy@0 {
- phy-mode = "mii";
- reg = <0>;
- };
-};
-
-ethernet@400000 {
- compatible = "ralink,rt2880-eth";
- reg = <0x00400000 10000>;
-
- #address-cells = <1>;
- #size-cells = <0>;
-
- resets = <&rstctrl 18>;
- reset-names = "fe";
-
- interrupt-parent = <&cpuintc>;
- interrupts = <5>;
-
- port@0 {
- compatible = "ralink,eth-port";
- reg = <0>;
- phy-handle = <&phy0>;
- };
-
-};
diff --git a/Documentation/devicetree/bindings/net/ralink,rt3050-esw.txt b/Documentation/devicetree/bindings/net/ralink,rt3050-esw.txt
deleted file mode 100644
index 87e315856efa..000000000000
--- a/Documentation/devicetree/bindings/net/ralink,rt3050-esw.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-Ralink Fast Ethernet Embedded Switch
-====================================
-
-The ralink fast ethernet embedded switch can be found on Ralink and Mediatek
-SoCs (RT3x5x, RT5350, MT76x8).
-
-Required properties:
-- compatible: Should be "ralink,rt3050-esw"
-- reg: Address and length of the register set for the device
-- interrupts: Should contain the embedded switches interrupt
-- resets: Should contain the embedded switches resets
-- reset-names: Should contain the reset names "esw"
-
-Optional properties:
-- ralink,portmap: can be used to choose if the default switch setup is
- llllw or wllll
-- ralink,led_polarity: override the active high/low settings of the leds
-
-Example:
-
-esw@10110000 {
- compatible = "ralink,rt3050-esw";
- reg = <0x10110000 8000>;
-
- resets = <&rstctrl 23>;
- reset-names = "esw";
-
- interrupt-parent = <&intc>;
- interrupts = <17>;
-};
diff --git a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
index acf347f3cdbe..3f41294f5997 100644
--- a/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
+++ b/Documentation/devicetree/bindings/net/renesas,etheravb.yaml
@@ -40,9 +40,14 @@ properties:
- renesas,etheravb-r8a77980 # R-Car V3H
- renesas,etheravb-r8a77990 # R-Car E3
- renesas,etheravb-r8a77995 # R-Car D3
- - renesas,etheravb-r8a779a0 # R-Car V3U
- const: renesas,etheravb-rcar-gen3 # R-Car Gen3 and RZ/G2
+ - items:
+ - enum:
+ - renesas,etheravb-r8a779a0 # R-Car V3U
+ - renesas,etheravb-r8a779g0 # R-Car V4H
+ - const: renesas,etheravb-rcar-gen4 # R-Car Gen4
+
- items:
- enum:
- renesas,etheravb-r9a09g011 # RZ/V2M
@@ -207,7 +212,7 @@ allOf:
- renesas,etheravb-r8a77965
- renesas,etheravb-r8a77970
- renesas,etheravb-r8a77980
- - renesas,etheravb-r8a779a0
+ - renesas,etheravb-rcar-gen4
then:
required:
- tx-internal-delay-ps
diff --git a/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml b/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml
index 083623c8d718..42fb72b6909d 100644
--- a/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/rockchip-dwmac.yaml
@@ -25,7 +25,9 @@ select:
- rockchip,rk3368-gmac
- rockchip,rk3399-gmac
- rockchip,rk3568-gmac
+ - rockchip,rk3588-gmac
- rockchip,rv1108-gmac
+ - rockchip,rv1126-gmac
required:
- compatible
@@ -47,9 +49,11 @@ properties:
- rockchip,rk3368-gmac
- rockchip,rk3399-gmac
- rockchip,rv1108-gmac
+ - rockchip,rv1126-gmac
- items:
- enum:
- rockchip,rk3568-gmac
+ - rockchip,rk3588-gmac
- const: snps,dwmac-4.20a
clocks:
@@ -81,6 +85,11 @@ properties:
description: The phandle of the syscon node for the general register file.
$ref: /schemas/types.yaml#/definitions/phandle
+ rockchip,php-grf:
+ description:
+ The phandle of the syscon node for the peripheral general register file.
+ $ref: /schemas/types.yaml#/definitions/phandle
+
tx_delay:
description: Delay value for TXD timing. Range value is 0~0x7F, 0x30 as default.
$ref: /schemas/types.yaml#/definitions/uint32
diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
index 491597c02edf..f94a0a4320f1 100644
--- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml
+++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml
@@ -74,6 +74,7 @@ properties:
- rockchip,rk3328-gmac
- rockchip,rk3366-gmac
- rockchip,rk3368-gmac
+ - rockchip,rk3588-gmac
- rockchip,rk3399-gmac
- rockchip,rv1108-gmac
- snps,dwmac
@@ -288,6 +289,11 @@ properties:
is supported. For example, this is used in case of SGMII and
MAC2MAC connection.
+ snps,clk-csr:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Frequency division factor for MDC clock.
+
mdio:
$ref: mdio.yaml#
unevaluatedProperties: false
@@ -301,6 +307,60 @@ properties:
required:
- compatible
+ stmmac-axi-config:
+ type: object
+ unevaluatedProperties: false
+ description:
+ AXI BUS Mode parameters.
+
+ properties:
+ snps,lpi_en:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ enable Low Power Interface
+
+ snps,xit_frm:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ unlock on WoL
+
+ snps,wr_osr_lmt:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ max write outstanding req. limit
+
+ snps,rd_osr_lmt:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ max read outstanding req. limit
+
+ snps,kbbe:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ do not cross 1KiB boundary.
+
+ snps,blen:
+ $ref: /schemas/types.yaml#/definitions/uint32-array
+ description:
+ this is a vector of supported burst length.
+ minItems: 7
+ maxItems: 7
+
+ snps,fb:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ fixed-burst
+
+ snps,mb:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ mixed-burst
+
+ snps,rb:
+ $ref: /schemas/types.yaml#/definitions/flag
+ description:
+ rebuild INCRx Burst
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/net/sunplus,sp7021-emac.yaml b/Documentation/devicetree/bindings/net/sunplus,sp7021-emac.yaml
index 62dffee27c3d..8e51dcdb4796 100644
--- a/Documentation/devicetree/bindings/net/sunplus,sp7021-emac.yaml
+++ b/Documentation/devicetree/bindings/net/sunplus,sp7021-emac.yaml
@@ -32,6 +32,7 @@ properties:
ethernet-ports:
type: object
+ additionalProperties: false
description: Ethernet ports to PHY
properties:
@@ -44,6 +45,7 @@ properties:
patternProperties:
"^port@[0-1]$":
type: object
+ additionalProperties: false
description: Port to PHY
properties:
diff --git a/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml b/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
index 31bf825c6598..46e330f45768 100644
--- a/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
+++ b/Documentation/devicetree/bindings/net/ti,cpsw-switch.yaml
@@ -77,6 +77,8 @@ properties:
ethernet-ports:
type: object
+ additionalProperties: false
+
properties:
'#address-cells':
const: 1
@@ -89,6 +91,7 @@ properties:
description: CPSW external ports
$ref: ethernet-controller.yaml#
+ unevaluatedProperties: false
properties:
reg:
@@ -117,6 +120,7 @@ properties:
cpts:
type: object
+ unevaluatedProperties: false
description:
The Common Platform Time Sync (CPTS) module
diff --git a/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml b/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
index b8281d8be940..7d90beaccc60 100644
--- a/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
+++ b/Documentation/devicetree/bindings/net/ti,k3-am654-cpsw-nuss.yaml
@@ -55,6 +55,7 @@ properties:
compatible:
enum:
- ti,am654-cpsw-nuss
+ - ti,j7200-cpswxg-nuss
- ti,j721e-cpsw-nuss
- ti,am642-cpsw-nuss
@@ -110,16 +111,17 @@ properties:
const: 0
patternProperties:
- port@[1-2]:
+ "^port@[1-4]$":
type: object
description: CPSWxG NUSS external ports
$ref: ethernet-controller.yaml#
+ unevaluatedProperties: false
properties:
reg:
minimum: 1
- maximum: 2
+ maximum: 4
description: CPSW port number
phys:
@@ -178,6 +180,19 @@ required:
- '#address-cells'
- '#size-cells'
+allOf:
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ const: ti,j7200-cpswxg-nuss
+ then:
+ properties:
+ ethernet-ports:
+ patternProperties:
+ "^port@[3-4]$": false
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml b/Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml
index b783ad0d1f53..e9f78cef6b7f 100644
--- a/Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml
+++ b/Documentation/devicetree/bindings/net/ti,k3-am654-cpts.yaml
@@ -95,6 +95,7 @@ properties:
refclk-mux:
type: object
+ additionalProperties: false
description: CPTS reference clock multiplexer clock
properties:
'#clock-cells':
diff --git a/Documentation/devicetree/bindings/net/vertexcom-mse102x.yaml b/Documentation/devicetree/bindings/net/vertexcom-mse102x.yaml
index 8156a9aeb589..304757bf9281 100644
--- a/Documentation/devicetree/bindings/net/vertexcom-mse102x.yaml
+++ b/Documentation/devicetree/bindings/net/vertexcom-mse102x.yaml
@@ -7,7 +7,7 @@ $schema: "http://devicetree.org/meta-schemas/core.yaml#"
title: The Vertexcom MSE102x (SPI) Device Tree Bindings
maintainers:
- - Stefan Wahren
+ - Stefan Wahren
description:
Vertexcom's MSE102x are a family of HomePlug GreenPHY chips.
diff --git a/Documentation/devicetree/bindings/net/wireless/brcm,bcm4329-fmac.yaml b/Documentation/devicetree/bindings/net/wireless/brcm,bcm4329-fmac.yaml
index 53b4153d9bfc..fec1cc9b9a08 100644
--- a/Documentation/devicetree/bindings/net/wireless/brcm,bcm4329-fmac.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/brcm,bcm4329-fmac.yaml
@@ -4,7 +4,7 @@
$id: http://devicetree.org/schemas/net/wireless/brcm,bcm4329-fmac.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
-title: Broadcom BCM4329 family fullmac wireless SDIO devices
+title: Broadcom BCM4329 family fullmac wireless SDIO/PCIE devices
maintainers:
- Arend van Spriel
@@ -41,11 +41,17 @@ properties:
- cypress,cyw4373-fmac
- cypress,cyw43012-fmac
- const: brcm,bcm4329-fmac
- - const: brcm,bcm4329-fmac
+ - enum:
+ - brcm,bcm4329-fmac
+ - pci14e4,43dc # BCM4355
+ - pci14e4,4464 # BCM4364
+ - pci14e4,4488 # BCM4377
+ - pci14e4,4425 # BCM4378
+ - pci14e4,4433 # BCM4387
reg:
- description: SDIO function number for the device, for most cases
- this will be 1.
+ description: SDIO function number for the device (for most cases
+ this will be 1) or PCI device identifier.
interrupts:
maxItems: 1
@@ -85,6 +91,31 @@ properties:
takes precedence.
type: boolean
+ brcm,cal-blob:
+ $ref: /schemas/types.yaml#/definitions/uint8-array
+ description: A per-device calibration blob for the Wi-Fi radio. This
+ should be filled in by the bootloader from platform configuration
+ data, if necessary, and will be uploaded to the device if present.
+
+ brcm,board-type:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: Overrides the board type, which is normally the compatible of
+ the root node. This can be used to decouple the overall system board or
+ device name from the board type for WiFi purposes, which is used to
+ construct firmware and NVRAM configuration filenames, allowing for
+ multiple devices that share the same module or characteristics for the
+ WiFi subsystem to share the same firmware/NVRAM files. On Apple platforms,
+ this should be the Apple module-instance codename prefixed by "apple,",
+ e.g. "apple,honshu".
+
+ apple,antenna-sku:
+ $ref: /schemas/types.yaml#/definitions/string
+ description: Antenna SKU used to identify a specific antenna configuration
+ on Apple platforms. This is use to build firmware filenames, to allow
+ platforms with different antenna configs to have different firmware and/or
+ NVRAM. This would normally be filled in by the bootloader from platform
+ configuration data.
+
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/net/wireless/microchip,wilc1000.yaml b/Documentation/devicetree/bindings/net/wireless/microchip,wilc1000.yaml
index 60de78f1bc7b..b3405f284580 100644
--- a/Documentation/devicetree/bindings/net/wireless/microchip,wilc1000.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/microchip,wilc1000.yaml
@@ -20,8 +20,6 @@ properties:
reg: true
- spi-max-frequency: true
-
interrupts:
maxItems: 1
@@ -51,7 +49,10 @@ required:
- compatible
- interrupts
-additionalProperties: false
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
index a677b056f112..f7cf135aa37f 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml
@@ -66,6 +66,18 @@ properties:
required:
- iommus
+ qcom,smem-states:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: State bits used by the AP to signal the WLAN Q6.
+ items:
+ - description: Signal bits used to enable/disable low power mode
+ on WCN6750 in the case of WoW (Wake on Wireless).
+
+ qcom,smem-state-names:
+ description: The names of the state bits used for SMP2P output.
+ items:
+ - const: wlan-smp2p-out
+
required:
- compatible
- reg
@@ -448,6 +460,8 @@ examples:
;
qcom,rproc = <&remoteproc_wpss>;
memory-region = <&wlan_fw_mem>, <&wlan_ce_mem>;
+ qcom,smem-states = <&wlan_smp2p_out 0>;
+ qcom,smem-state-names = "wlan-smp2p-out";
wifi-firmware {
iommus = <&apps_smmu 0x1c02 0x1>;
};
diff --git a/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml b/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml
index 76199a67d628..b35d2f3ad1ad 100644
--- a/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/silabs,wfx.yaml
@@ -29,12 +29,6 @@ description: >
Documentation/devicetree/bindings/mmc/mmc-pwrseq-simple.yaml for more
information.
- For SPI:
-
- In add of the properties below, please consult
- Documentation/devicetree/bindings/spi/spi-controller.yaml for optional SPI
- related properties.
-
properties:
compatible:
items:
@@ -52,8 +46,6 @@ properties:
bindings.
maxItems: 1
- spi-max-frequency: true
-
interrupts:
description: The interrupt line. Should be IRQ_TYPE_EDGE_RISING. When SPI is
used, this property is required. When SDIO is used, the "in-band"
@@ -84,12 +76,15 @@ properties:
mac-address: true
-additionalProperties: false
-
required:
- compatible
- reg
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+
+unevaluatedProperties: false
+
examples:
- |
#include
diff --git a/Documentation/devicetree/bindings/net/wireless/ti,wlcore.yaml b/Documentation/devicetree/bindings/net/wireless/ti,wlcore.yaml
index d68bb2ec1f7e..e31456730e9f 100644
--- a/Documentation/devicetree/bindings/net/wireless/ti,wlcore.yaml
+++ b/Documentation/devicetree/bindings/net/wireless/ti,wlcore.yaml
@@ -36,8 +36,6 @@ properties:
This is required when connected via SPI, and optional when connected via
SDIO.
- spi-max-frequency: true
-
interrupts:
minItems: 1
maxItems: 2
@@ -69,20 +67,22 @@ required:
- compatible
- interrupts
-if:
- properties:
- compatible:
- contains:
- enum:
- - ti,wl1271
- - ti,wl1273
- - ti,wl1281
- - ti,wl1283
-then:
- required:
- - ref-clock-frequency
+allOf:
+ - $ref: /schemas/spi/spi-peripheral-props.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - ti,wl1271
+ - ti,wl1273
+ - ti,wl1281
+ - ti,wl1283
+ then:
+ required:
+ - ref-clock-frequency
-additionalProperties: false
+unevaluatedProperties: false
examples:
- |
diff --git a/Documentation/networking/bonding.rst b/Documentation/networking/bonding.rst
index 7823a069a903..96cd7a26f3d9 100644
--- a/Documentation/networking/bonding.rst
+++ b/Documentation/networking/bonding.rst
@@ -846,7 +846,7 @@ primary_reselect
tlb_dynamic_lb
Specifies if dynamic shuffling of flows is enabled in tlb
- mode. The value has no effect on any other modes.
+ or alb mode. The value has no effect on any other modes.
The default behavior of tlb mode is to shuffle active flows across
slaves based on the load in that interval. This gives nice lb
diff --git a/Documentation/networking/decnet.rst b/Documentation/networking/decnet.rst
deleted file mode 100644
index b8bc11ff8370..000000000000
--- a/Documentation/networking/decnet.rst
+++ /dev/null
@@ -1,243 +0,0 @@
-.. SPDX-License-Identifier: GPL-2.0
-
-=========================================
-Linux DECnet Networking Layer Information
-=========================================
-
-1. Other documentation....
-==========================
-
- - Project Home Pages
- - http://www.chygwyn.com/ - Kernel info
- - http://linux-decnet.sourceforge.net/ - Userland tools
- - http://www.sourceforge.net/projects/linux-decnet/ - Status page
-
-2. Configuring the kernel
-=========================
-
-Be sure to turn on the following options:
-
- - CONFIG_DECNET (obviously)
- - CONFIG_PROC_FS (to see what's going on)
- - CONFIG_SYSCTL (for easy configuration)
-
-if you want to try out router support (not properly debugged yet)
-you'll need the following options as well...
-
- - CONFIG_DECNET_ROUTER (to be able to add/delete routes)
- - CONFIG_NETFILTER (will be required for the DECnet routing daemon)
-
-Don't turn on SIOCGIFCONF support for DECnet unless you are really sure
-that you need it, in general you won't and it can cause ifconfig to
-malfunction.
-
-Run time configuration has changed slightly from the 2.4 system. If you
-want to configure an endnode, then the simplified procedure is as follows:
-
- - Set the MAC address on your ethernet card before starting _any_ other
- network protocols.
-
-As soon as your network card is brought into the UP state, DECnet should
-start working. If you need something more complicated or are unsure how
-to set the MAC address, see the next section. Also all configurations which
-worked with 2.4 will work under 2.5 with no change.
-
-3. Command line options
-=======================
-
-You can set a DECnet address on the kernel command line for compatibility
-with the 2.4 configuration procedure, but in general it's not needed any more.
-If you do st a DECnet address on the command line, it has only one purpose
-which is that its added to the addresses on the loopback device.
-
-With 2.4 kernels, DECnet would only recognise addresses as local if they
-were added to the loopback device. In 2.5, any local interface address
-can be used to loop back to the local machine. Of course this does not
-prevent you adding further addresses to the loopback device if you
-want to.
-
-N.B. Since the address list of an interface determines the addresses for
-which "hello" messages are sent, if you don't set an address on the loopback
-interface then you won't see any entries in /proc/net/neigh for the local
-host until such time as you start a connection. This doesn't affect the
-operation of the local communications in any other way though.
-
-The kernel command line takes options looking like the following::
-
- decnet.addr=1,2
-
-the two numbers are the node address 1,2 = 1.2 For 2.2.xx kernels
-and early 2.3.xx kernels, you must use a comma when specifying the
-DECnet address like this. For more recent 2.3.xx kernels, you may
-use almost any character except space, although a `.` would be the most
-obvious choice :-)
-
-There used to be a third number specifying the node type. This option
-has gone away in favour of a per interface node type. This is now set
-using /proc/sys/net/decnet/conf//forwarding. This file can be
-set with a single digit, 0=EndNode, 1=L1 Router and 2=L2 Router.
-
-There are also equivalent options for modules. The node address can
-also be set through the /proc/sys/net/decnet/ files, as can other system
-parameters.
-
-Currently the only supported devices are ethernet and ip_gre. The
-ethernet address of your ethernet card has to be set according to the DECnet
-address of the node in order for it to be autoconfigured (and then appear in
-/proc/net/decnet_dev). There is a utility available at the above
-FTP sites called dn2ethaddr which can compute the correct ethernet
-address to use. The address can be set by ifconfig either before or
-at the time the device is brought up. If you are using RedHat you can
-add the line::
-
- MACADDR=AA:00:04:00:03:04
-
-or something similar, to /etc/sysconfig/network-scripts/ifcfg-eth0 or
-wherever your network card's configuration lives. Setting the MAC address
-of your ethernet card to an address starting with "hi-ord" will cause a
-DECnet address which matches to be added to the interface (which you can
-verify with iproute2).
-
-The default device for routing can be set through the /proc filesystem
-by setting /proc/sys/net/decnet/default_device to the
-device you want DECnet to route packets out of when no specific route
-is available. Usually this will be eth0, for example::
-
- echo -n "eth0" >/proc/sys/net/decnet/default_device
-
-If you don't set the default device, then it will default to the first
-ethernet card which has been autoconfigured as described above. You can
-confirm that by looking in the default_device file of course.
-
-There is a list of what the other files under /proc/sys/net/decnet/ do
-on the kernel patch web site (shown above).
-
-4. Run time kernel configuration
-================================
-
-
-This is either done through the sysctl/proc interface (see the kernel web
-pages for details on what the various options do) or through the iproute2
-package in the same way as IPv4/6 configuration is performed.
-
-Documentation for iproute2 is included with the package, although there is
-as yet no specific section on DECnet, most of the features apply to both
-IP and DECnet, albeit with DECnet addresses instead of IP addresses and
-a reduced functionality.
-
-If you want to configure a DECnet router you'll need the iproute2 package
-since its the _only_ way to add and delete routes currently. Eventually
-there will be a routing daemon to send and receive routing messages for
-each interface and update the kernel routing tables accordingly. The
-routing daemon will use netfilter to listen to routing packets, and
-rtnetlink to update the kernels routing tables.
-
-The DECnet raw socket layer has been removed since it was there purely
-for use by the routing daemon which will now use netfilter (a much cleaner
-and more generic solution) instead.
-
-5. How can I tell if its working?
-=================================
-
-Here is a quick guide of what to look for in order to know if your DECnet
-kernel subsystem is working.
-
- - Is the node address set (see /proc/sys/net/decnet/node_address)
- - Is the node of the correct type
- (see /proc/sys/net/decnet/conf//forwarding)
- - Is the Ethernet MAC address of each Ethernet card set to match
- the DECnet address. If in doubt use the dn2ethaddr utility available
- at the ftp archive.
- - If the previous two steps are satisfied, and the Ethernet card is up,
- you should find that it is listed in /proc/net/decnet_dev and also
- that it appears as a directory in /proc/sys/net/decnet/conf/. The
- loopback device (lo) should also appear and is required to communicate
- within a node.
- - If you have any DECnet routers on your network, they should appear
- in /proc/net/decnet_neigh, otherwise this file will only contain the
- entry for the node itself (if it doesn't check to see if lo is up).
- - If you want to send to any node which is not listed in the
- /proc/net/decnet_neigh file, you'll need to set the default device
- to point to an Ethernet card with connection to a router. This is
- again done with the /proc/sys/net/decnet/default_device file.
- - Try starting a simple server and client, like the dnping/dnmirror
- over the loopback interface. With luck they should communicate.
- For this step and those after, you'll need the DECnet library
- which can be obtained from the above ftp sites as well as the
- actual utilities themselves.
- - If this seems to work, then try talking to a node on your local
- network, and see if you can obtain the same results.
- - At this point you are on your own... :-)
-
-6. How to send a bug report
-===========================
-
-If you've found a bug and want to report it, then there are several things
-you can do to help me work out exactly what it is that is wrong. Useful
-information (_most_ of which _is_ _essential_) includes:
-
- - What kernel version are you running ?
- - What version of the patch are you running ?
- - How far though the above set of tests can you get ?
- - What is in the /proc/decnet* files and /proc/sys/net/decnet/* files ?
- - Which services are you running ?
- - Which client caused the problem ?
- - How much data was being transferred ?
- - Was the network congested ?
- - How can the problem be reproduced ?
- - Can you use tcpdump to get a trace ? (N.B. Most (all?) versions of
- tcpdump don't understand how to dump DECnet properly, so including
- the hex listing of the packet contents is _essential_, usually the -x flag.
- You may also need to increase the length grabbed with the -s flag. The
- -e flag also provides very useful information (ethernet MAC addresses))
-
-7. MAC FAQ
-==========
-
-A quick FAQ on ethernet MAC addresses to explain how Linux and DECnet
-interact and how to get the best performance from your hardware.
-
-Ethernet cards are designed to normally only pass received network frames
-to a host computer when they are addressed to it, or to the broadcast address.
-
-Linux has an interface which allows the setting of extra addresses for
-an ethernet card to listen to. If the ethernet card supports it, the
-filtering operation will be done in hardware, if not the extra unwanted packets
-received will be discarded by the host computer. In the latter case,
-significant processor time and bus bandwidth can be used up on a busy
-network (see the NAPI documentation for a longer explanation of these
-effects).
-
-DECnet makes use of this interface to allow running DECnet on an ethernet
-card which has already been configured using TCP/IP (presumably using the
-built in MAC address of the card, as usual) and/or to allow multiple DECnet
-addresses on each physical interface. If you do this, be aware that if your
-ethernet card doesn't support perfect hashing in its MAC address filter
-then your computer will be doing more work than required. Some cards
-will simply set themselves into promiscuous mode in order to receive
-packets from the DECnet specified addresses. So if you have one of these
-cards its better to set the MAC address of the card as described above
-to gain the best efficiency. Better still is to use a card which supports
-NAPI as well.
-
-
-8. Mailing list
-===============
-
-If you are keen to get involved in development, or want to ask questions
-about configuration, or even just report bugs, then there is a mailing
-list that you can join, details are at:
-
-http://sourceforge.net/mail/?group_id=4993
-
-9. Legal Info
-=============
-
-The Linux DECnet project team have placed their code under the GPL. The
-software is provided "as is" and without warranty express or implied.
-DECnet is a trademark of Compaq. This software is not a product of
-Compaq. We acknowledge the help of people at Compaq in providing extra
-documentation above and beyond what was previously publicly available.
-
-Steve Whitehouse
-
diff --git a/Documentation/networking/device_drivers/can/freescale/flexcan.rst b/Documentation/networking/device_drivers/can/freescale/flexcan.rst
index 4e3eec6cecd2..106cd2890135 100644
--- a/Documentation/networking/device_drivers/can/freescale/flexcan.rst
+++ b/Documentation/networking/device_drivers/can/freescale/flexcan.rst
@@ -5,7 +5,7 @@ Flexcan CAN Controller driver
=============================
Authors: Marc Kleine-Budde ,
-Dario Binacchi
+Dario Binacchi
On/off RTR frames reception
===========================
diff --git a/Documentation/networking/device_drivers/ethernet/index.rst b/Documentation/networking/device_drivers/ethernet/index.rst
index 7f1777173abb..5196905582c5 100644
--- a/Documentation/networking/device_drivers/ethernet/index.rst
+++ b/Documentation/networking/device_drivers/ethernet/index.rst
@@ -52,6 +52,7 @@ Contents:
ti/tlan
toshiba/spider_net
wangxun/txgbe
+ wangxun/ngbe
.. only:: subproject and html
diff --git a/Documentation/networking/device_drivers/ethernet/wangxun/ngbe.rst b/Documentation/networking/device_drivers/ethernet/wangxun/ngbe.rst
new file mode 100644
index 000000000000..43a02f9943e1
--- /dev/null
+++ b/Documentation/networking/device_drivers/ethernet/wangxun/ngbe.rst
@@ -0,0 +1,14 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============================================================
+Linux Base Driver for WangXun(R) Gigabit PCI Express Adapters
+=============================================================
+
+WangXun Gigabit Linux driver.
+Copyright (c) 2019 - 2022 Beijing WangXun Technology Co., Ltd.
+
+Support
+=======
+ If you have problems with the software or hardware, please contact our
+ customer support team via email at nic-support@net-swift.com or check our website
+ at https://www.net-swift.com
diff --git a/Documentation/networking/devlink/ice.rst b/Documentation/networking/devlink/ice.rst
index 8c082b139bbd..0c89ceb8986d 100644
--- a/Documentation/networking/devlink/ice.rst
+++ b/Documentation/networking/devlink/ice.rst
@@ -139,6 +139,42 @@ EMP firmware image.
The driver does not currently support reloading the driver via
``DEVLINK_RELOAD_ACTION_DRIVER_REINIT``.
+Port split
+==========
+
+The ``ice`` driver supports port splitting only for port 0, as the FW has
+a predefined set of available port split options for the whole device.
+
+A system reboot is required for port split to be applied.
+
+The following command will select the port split option with 4 ports:
+
+.. code:: shell
+
+ $ devlink port split pci/0000:16:00.0/0 count 4
+
+The list of all available port options will be printed to dynamic debug after
+each ``split`` and ``unsplit`` command. The first option is the default.
+
+.. code:: shell
+
+ ice 0000:16:00.0: Available port split options and max port speeds (Gbps):
+ ice 0000:16:00.0: Status Split Quad 0 Quad 1
+ ice 0000:16:00.0: count L0 L1 L2 L3 L4 L5 L6 L7
+ ice 0000:16:00.0: Active 2 100 - - - 100 - - -
+ ice 0000:16:00.0: 2 50 - 50 - - - - -
+ ice 0000:16:00.0: Pending 4 25 25 25 25 - - - -
+ ice 0000:16:00.0: 4 25 25 - - 25 25 - -
+ ice 0000:16:00.0: 8 10 10 10 10 10 10 10 10
+ ice 0000:16:00.0: 1 100 - - - - - - -
+
+There could be multiple FW port options with the same port split count. When
+the same port split count request is issued again, the next FW port option with
+the same port split count will be selected.
+
+``devlink port unsplit`` will select the option with a split count of 1. If
+there is no FW option available with split count 1, you will receive an error.
+
Regions
=======
diff --git a/Documentation/networking/devlink/index.rst b/Documentation/networking/devlink/index.rst
index e3a5f985673e..4b653d040627 100644
--- a/Documentation/networking/devlink/index.rst
+++ b/Documentation/networking/devlink/index.rst
@@ -13,10 +13,8 @@ new APIs prefixed by ``devl_*``. The older APIs handle all the locking
in devlink core, but don't allow registration of most sub-objects once
the main devlink object is itself registered. The newer ``devl_*`` APIs assume
the devlink instance lock is already held. Drivers can take the instance
-lock by calling ``devl_lock()``. It is also held in most of the callbacks.
-Eventually all callbacks will be invoked under the devlink instance lock,
-refer to the use of the ``DEVLINK_NL_FLAG_NO_LOCK`` flag in devlink core
-to find out which callbacks are not converted, yet.
+lock by calling ``devl_lock()``. It is also held all callbacks of devlink
+netlink commands.
Drivers are encouraged to use the devlink instance lock for their own needs.
diff --git a/Documentation/networking/dsa/configuration.rst b/Documentation/networking/dsa/configuration.rst
index 2b08f1a772d3..827701f8cbfe 100644
--- a/Documentation/networking/dsa/configuration.rst
+++ b/Documentation/networking/dsa/configuration.rst
@@ -49,6 +49,9 @@ In this documentation the following Ethernet interfaces are used:
*eth0*
the master interface
+*eth1*
+ another master interface
+
*lan1*
a slave interface
@@ -360,3 +363,96 @@ the ``self`` flag) has been removed. This results in the following changes:
Script writers are therefore encouraged to use the ``master static`` set of
flags when working with bridge FDB entries on DSA switch interfaces.
+
+Affinity of user ports to CPU ports
+-----------------------------------
+
+Typically, DSA switches are attached to the host via a single Ethernet
+interface, but in cases where the switch chip is discrete, the hardware design
+may permit the use of 2 or more ports connected to the host, for an increase in
+termination throughput.
+
+DSA can make use of multiple CPU ports in two ways. First, it is possible to
+statically assign the termination traffic associated with a certain user port
+to be processed by a certain CPU port. This way, user space can implement
+custom policies of static load balancing between user ports, by spreading the
+affinities according to the available CPU ports.
+
+Secondly, it is possible to perform load balancing between CPU ports on a per
+packet basis, rather than statically assigning user ports to CPU ports.
+This can be achieved by placing the DSA masters under a LAG interface (bonding
+or team). DSA monitors this operation and creates a mirror of this software LAG
+on the CPU ports facing the physical DSA masters that constitute the LAG slave
+devices.
+
+To make use of multiple CPU ports, the firmware (device tree) description of
+the switch must mark all the links between CPU ports and their DSA masters
+using the ``ethernet`` reference/phandle. At startup, only a single CPU port
+and DSA master will be used - the numerically first port from the firmware
+description which has an ``ethernet`` property. It is up to the user to
+configure the system for the switch to use other masters.
+
+DSA uses the ``rtnl_link_ops`` mechanism (with a "dsa" ``kind``) to allow
+changing the DSA master of a user port. The ``IFLA_DSA_MASTER`` u32 netlink
+attribute contains the ifindex of the master device that handles each slave
+device. The DSA master must be a valid candidate based on firmware node
+information, or a LAG interface which contains only slaves which are valid
+candidates.
+
+Using iproute2, the following manipulations are possible:
+
+ .. code-block:: sh
+
+ # See the DSA master in current use
+ ip -d link show dev swp0
+ (...)
+ dsa master eth0
+
+ # Static CPU port distribution
+ ip link set swp0 type dsa master eth1
+ ip link set swp1 type dsa master eth0
+ ip link set swp2 type dsa master eth1
+ ip link set swp3 type dsa master eth0
+
+ # CPU ports in LAG, using explicit assignment of the DSA master
+ ip link add bond0 type bond mode balance-xor && ip link set bond0 up
+ ip link set eth1 down && ip link set eth1 master bond0
+ ip link set swp0 type dsa master bond0
+ ip link set swp1 type dsa master bond0
+ ip link set swp2 type dsa master bond0
+ ip link set swp3 type dsa master bond0
+ ip link set eth0 down && ip link set eth0 master bond0
+ ip -d link show dev swp0
+ (...)
+ dsa master bond0
+
+ # CPU ports in LAG, relying on implicit migration of the DSA master
+ ip link add bond0 type bond mode balance-xor && ip link set bond0 up
+ ip link set eth0 down && ip link set eth0 master bond0
+ ip link set eth1 down && ip link set eth1 master bond0
+ ip -d link show dev swp0
+ (...)
+ dsa master bond0
+
+Notice that in the case of CPU ports under a LAG, the use of the
+``IFLA_DSA_MASTER`` netlink attribute is not strictly needed, but rather, DSA
+reacts to the ``IFLA_MASTER`` attribute change of its present master (``eth0``)
+and migrates all user ports to the new upper of ``eth0``, ``bond0``. Similarly,
+when ``bond0`` is destroyed using ``RTM_DELLINK``, DSA migrates the user ports
+that were assigned to this interface to the first physical DSA master which is
+eligible, based on the firmware description (it effectively reverts to the
+startup configuration).
+
+In a setup with more than 2 physical CPU ports, it is therefore possible to mix
+static user to CPU port assignment with LAG between DSA masters. It is not
+possible to statically assign a user port towards a DSA master that has any
+upper interfaces (this includes LAG devices - the master must always be the LAG
+in this case).
+
+Live changing of the DSA master (and thus CPU port) affinity of a user port is
+permitted, in order to allow dynamic redistribution in response to traffic.
+
+Physical DSA masters are allowed to join and leave at any time a LAG interface
+used as a DSA master; however, DSA will reject a LAG interface as a valid
+candidate for being a DSA master unless it has at least one physical DSA master
+as a slave device.
diff --git a/Documentation/networking/dsa/dsa.rst b/Documentation/networking/dsa/dsa.rst
index d742ba6bd211..a94ddf83348a 100644
--- a/Documentation/networking/dsa/dsa.rst
+++ b/Documentation/networking/dsa/dsa.rst
@@ -303,6 +303,20 @@ These frames are then queued for transmission using the master network device
Ethernet switch will be able to process these incoming frames from the
management interface and deliver them to the physical switch port.
+When using multiple CPU ports, it is possible to stack a LAG (bonding/team)
+device between the DSA slave devices and the physical DSA masters. The LAG
+device is thus also a DSA master, but the LAG slave devices continue to be DSA
+masters as well (just with no user port assigned to them; this is needed for
+recovery in case the LAG DSA master disappears). Thus, the data path of the LAG
+DSA master is used asymmetrically. On RX, the ``ETH_P_XDSA`` handler, which
+calls ``dsa_switch_rcv()``, is invoked early (on the physical DSA master;
+LAG slave). Therefore, the RX data path of the LAG DSA master is not used.
+On the other hand, TX takes place linearly: ``dsa_slave_xmit`` calls
+``dsa_enqueue_skb``, which calls ``dev_queue_xmit`` towards the LAG DSA master.
+The latter calls ``dev_queue_xmit`` towards one physical DSA master or the
+other, and in both cases, the packet exits the system through a hardware path
+towards the switch.
+
Graphical representation
------------------------
@@ -629,6 +643,24 @@ Switch configuration
PHY cannot be found. In this case, probing of the DSA switch continues
without that particular port.
+- ``port_change_master``: method through which the affinity (association used
+ for traffic termination purposes) between a user port and a CPU port can be
+ changed. By default all user ports from a tree are assigned to the first
+ available CPU port that makes sense for them (most of the times this means
+ the user ports of a tree are all assigned to the same CPU port, except for H
+ topologies as described in commit 2c0b03258b8b). The ``port`` argument
+ represents the index of the user port, and the ``master`` argument represents
+ the new DSA master ``net_device``. The CPU port associated with the new
+ master can be retrieved by looking at ``struct dsa_port *cpu_dp =
+ master->dsa_ptr``. Additionally, the master can also be a LAG device where
+ all the slave devices are physical DSA masters. LAG DSA masters also have a
+ valid ``master->dsa_ptr`` pointer, however this is not unique, but rather a
+ duplicate of the first physical DSA master's (LAG slave) ``dsa_ptr``. In case
+ of a LAG DSA master, a further call to ``port_lag_join`` will be emitted
+ separately for the physical CPU ports associated with the physical DSA
+ masters, requesting them to create a hardware LAG associated with the LAG
+ interface.
+
PHY devices and link management
-------------------------------
@@ -1095,9 +1127,3 @@ capable hardware, but does not enforce a strict switch device driver model. On
the other DSA enforces a fairly strict device driver model, and deals with most
of the switch specific. At some point we should envision a merger between these
two subsystems and get the best of both worlds.
-
-Other hanging fruits
---------------------
-
-- allowing more than one CPU/management interface:
- http://comments.gmane.org/gmane.linux.network/365657
diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst
index dbca3e9ec782..d578b8bcd8a4 100644
--- a/Documentation/networking/ethtool-netlink.rst
+++ b/Documentation/networking/ethtool-netlink.rst
@@ -220,6 +220,8 @@ Userspace to kernel:
``ETHTOOL_MSG_PHC_VCLOCKS_GET`` get PHC virtual clocks info
``ETHTOOL_MSG_MODULE_SET`` set transceiver module parameters
``ETHTOOL_MSG_MODULE_GET`` get transceiver module parameters
+ ``ETHTOOL_MSG_PSE_SET`` set PSE parameters
+ ``ETHTOOL_MSG_PSE_GET`` get PSE parameters
===================================== =================================
Kernel to userspace:
@@ -260,6 +262,7 @@ Kernel to userspace:
``ETHTOOL_MSG_STATS_GET_REPLY`` standard statistics
``ETHTOOL_MSG_PHC_VCLOCKS_GET_REPLY`` PHC virtual clocks info
``ETHTOOL_MSG_MODULE_GET_REPLY`` transceiver module parameters
+ ``ETHTOOL_MSG_PSE_GET_REPLY`` PSE parameters
======================================== =================================
``GET`` requests are sent by userspace applications to retrieve device
@@ -426,6 +429,7 @@ Kernel response contents:
``ETHTOOL_A_LINKMODES_DUPLEX`` u8 duplex mode
``ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG`` u8 Master/slave port mode
``ETHTOOL_A_LINKMODES_MASTER_SLAVE_STATE`` u8 Master/slave port state
+ ``ETHTOOL_A_LINKMODES_RATE_MATCHING`` u8 PHY rate matching
========================================== ====== ==========================
For ``ETHTOOL_A_LINKMODES_OURS``, value represents advertised modes and mask
@@ -449,6 +453,7 @@ Request contents:
``ETHTOOL_A_LINKMODES_SPEED`` u32 link speed (Mb/s)
``ETHTOOL_A_LINKMODES_DUPLEX`` u8 duplex mode
``ETHTOOL_A_LINKMODES_MASTER_SLAVE_CFG`` u8 Master/slave port mode
+ ``ETHTOOL_A_LINKMODES_RATE_MATCHING`` u8 PHY rate matching
``ETHTOOL_A_LINKMODES_LANES`` u32 lanes
========================================== ====== ==========================
@@ -1625,6 +1630,62 @@ For SFF-8636 modules, low power mode is forced by the host according to table
For CMIS modules, low power mode is forced by the host according to table 6-12
in revision 5.0 of the specification.
+PSE_GET
+=======
+
+Gets PSE attributes.
+
+Request contents:
+
+ ===================================== ====== ==========================
+ ``ETHTOOL_A_PSE_HEADER`` nested request header
+ ===================================== ====== ==========================
+
+Kernel response contents:
+
+ ====================================== ====== =============================
+ ``ETHTOOL_A_PSE_HEADER`` nested reply header
+ ``ETHTOOL_A_PODL_PSE_ADMIN_STATE`` u32 Operational state of the PoDL
+ PSE functions
+ ``ETHTOOL_A_PODL_PSE_PW_D_STATUS`` u32 power detection status of the
+ PoDL PSE.
+ ====================================== ====== =============================
+
+When set, the optional ``ETHTOOL_A_PODL_PSE_ADMIN_STATE`` attribute identifies
+the operational state of the PoDL PSE functions. The operational state of the
+PSE function can be changed using the ``ETHTOOL_A_PODL_PSE_ADMIN_CONTROL``
+action. This option is corresponding to ``IEEE 802.3-2018`` 30.15.1.1.2
+aPoDLPSEAdminState. Possible values are:
+
+.. kernel-doc:: include/uapi/linux/ethtool.h
+ :identifiers: ethtool_podl_pse_admin_state
+
+When set, the optional ``ETHTOOL_A_PODL_PSE_PW_D_STATUS`` attribute identifies
+the power detection status of the PoDL PSE. The status depend on internal PSE
+state machine and automatic PD classification support. This option is
+corresponding to ``IEEE 802.3-2018`` 30.15.1.1.3 aPoDLPSEPowerDetectionStatus.
+Possible values are:
+
+.. kernel-doc:: include/uapi/linux/ethtool.h
+ :identifiers: ethtool_podl_pse_pw_d_status
+
+PSE_SET
+=======
+
+Sets PSE parameters.
+
+Request contents:
+
+ ====================================== ====== =============================
+ ``ETHTOOL_A_PSE_HEADER`` nested request header
+ ``ETHTOOL_A_PODL_PSE_ADMIN_CONTROL`` u32 Control PoDL PSE Admin state
+ ====================================== ====== =============================
+
+When set, the optional ``ETHTOOL_A_PODL_PSE_ADMIN_CONTROL`` attribute is used
+to control PoDL PSE Admin functions. This option is implementing
+``IEEE 802.3-2018`` 30.15.1.2.1 acPoDLPSEAdminControl. See
+``ETHTOOL_A_PODL_PSE_ADMIN_STATE`` for supported values.
+
Request translation
===================
diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst
index 03b215bddde8..16a153bcc5fe 100644
--- a/Documentation/networking/index.rst
+++ b/Documentation/networking/index.rst
@@ -47,7 +47,6 @@ Contents:
cdc_mbim
dccp
dctcp
- decnet
dns_resolver
driver
eql
@@ -93,6 +92,7 @@ Contents:
radiotap-headers
rds
regulatory
+ representors
rxrpc
sctp
secid
diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst
index a759872a2883..e7b3fa7bb3f7 100644
--- a/Documentation/networking/ip-sysctl.rst
+++ b/Documentation/networking/ip-sysctl.rst
@@ -1040,6 +1040,35 @@ tcp_challenge_ack_limit - INTEGER
TCP stack implements per TCP socket limits anyway.
Default: INT_MAX (unlimited)
+tcp_ehash_entries - INTEGER
+ Show the number of hash buckets for TCP sockets in the current
+ networking namespace.
+
+ A negative value means the networking namespace does not own its
+ hash buckets and shares the initial networking namespace's one.
+
+tcp_child_ehash_entries - INTEGER
+ Control the number of hash buckets for TCP sockets in the child
+ networking namespace, which must be set before clone() or unshare().
+
+ If the value is not 0, the kernel uses a value rounded up to 2^n
+ as the actual hash bucket size. 0 is a special value, meaning
+ the child networking namespace will share the initial networking
+ namespace's hash buckets.
+
+ Note that the child will use the global one in case the kernel
+ fails to allocate enough memory. In addition, the global hash
+ buckets are spread over available NUMA nodes, but the allocation
+ of the child hash table depends on the current process's NUMA
+ policy, which could result in performance differences.
+
+ Note also that the default value of tcp_max_tw_buckets and
+ tcp_max_syn_backlog depend on the hash bucket size.
+
+ Possible values: 0, 2^n (n: 0 - 24 (16Mi))
+
+ Default: 0
+
UDP variables
=============
diff --git a/Documentation/networking/phy.rst b/Documentation/networking/phy.rst
index 704f31da5167..06f4fcdb58b6 100644
--- a/Documentation/networking/phy.rst
+++ b/Documentation/networking/phy.rst
@@ -308,6 +308,21 @@ Some of the interface modes are described below:
rate of 125Mpbs using a 4B/5B encoding scheme, resulting in an underlying
data rate of 100Mpbs.
+``PHY_INTERFACE_MODE_QUSGMII``
+ This defines the Cisco the Quad USGMII mode, which is the Quad variant of
+ the USGMII (Universal SGMII) link. It's very similar to QSGMII, but uses
+ a Packet Control Header (PCH) instead of the 7 bytes preamble to carry not
+ only the port id, but also so-called "extensions". The only documented
+ extension so-far in the specification is the inclusion of timestamps, for
+ PTP-enabled PHYs. This mode isn't compatible with QSGMII, but offers the
+ same capabilities in terms of link speed and negociation.
+
+``PHY_INTERFACE_MODE_1000BASEKX``
+ This is 1000BASE-X as defined by IEEE 802.3 Clause 36 with Clause 73
+ autonegotiation. Generally, it will be used with a Clause 70 PMD. To
+ contrast with the 1000BASE-X phy mode used for Clause 38 and 39 PMDs, this
+ interface mode has different autonegotiation and only supports full duplex.
+
Pause frames / flow control
===========================
diff --git a/Documentation/networking/representors.rst b/Documentation/networking/representors.rst
new file mode 100644
index 000000000000..ee1f5cd54496
--- /dev/null
+++ b/Documentation/networking/representors.rst
@@ -0,0 +1,259 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============================
+Network Function Representors
+=============================
+
+This document describes the semantics and usage of representor netdevices, as
+used to control internal switching on SmartNICs. For the closely-related port
+representors on physical (multi-port) switches, see
+:ref:`Documentation/networking/switchdev.rst `.
+
+Motivation
+----------
+
+Since the mid-2010s, network cards have started offering more complex
+virtualisation capabilities than the legacy SR-IOV approach (with its simple
+MAC/VLAN-based switching model) can support. This led to a desire to offload
+software-defined networks (such as OpenVSwitch) to these NICs to specify the
+network connectivity of each function. The resulting designs are variously
+called SmartNICs or DPUs.
+
+Network function representors bring the standard Linux networking stack to
+virtual switches and IOV devices. Just as each physical port of a Linux-
+controlled switch has a separate netdev, so does each virtual port of a virtual
+switch.
+When the system boots, and before any offload is configured, all packets from
+the virtual functions appear in the networking stack of the PF via the
+representors. The PF can thus always communicate freely with the virtual
+functions.
+The PF can configure standard Linux forwarding between representors, the uplink
+or any other netdev (routing, bridging, TC classifiers).
+
+Thus, a representor is both a control plane object (representing the function in
+administrative commands) and a data plane object (one end of a virtual pipe).
+As a virtual link endpoint, the representor can be configured like any other
+netdevice; in some cases (e.g. link state) the representee will follow the
+representor's configuration, while in others there are separate APIs to
+configure the representee.
+
+Definitions
+-----------
+
+This document uses the term "switchdev function" to refer to the PCIe function
+which has administrative control over the virtual switch on the device.
+Typically, this will be a PF, but conceivably a NIC could be configured to grant
+these administrative privileges instead to a VF or SF (subfunction).
+Depending on NIC design, a multi-port NIC might have a single switchdev function
+for the whole device or might have a separate virtual switch, and hence
+switchdev function, for each physical network port.
+If the NIC supports nested switching, there might be separate switchdev
+functions for each nested switch, in which case each switchdev function should
+only create representors for the ports on the (sub-)switch it directly
+administers.
+
+A "representee" is the object that a representor represents. So for example in
+the case of a VF representor, the representee is the corresponding VF.
+
+What does a representor do?
+---------------------------
+
+A representor has three main roles.
+
+1. It is used to configure the network connection the representee sees, e.g.
+ link up/down, MTU, etc. For instance, bringing the representor
+ administratively UP should cause the representee to see a link up / carrier
+ on event.
+2. It provides the slow path for traffic which does not hit any offloaded
+ fast-path rules in the virtual switch. Packets transmitted on the
+ representor netdevice should be delivered to the representee; packets
+ transmitted by the representee which fail to match any switching rule should
+ be received on the representor netdevice. (That is, there is a virtual pipe
+ connecting the representor to the representee, similar in concept to a veth
+ pair.)
+ This allows software switch implementations (such as OpenVSwitch or a Linux
+ bridge) to forward packets between representees and the rest of the network.
+3. It acts as a handle by which switching rules (such as TC filters) can refer
+ to the representee, allowing these rules to be offloaded.
+
+The combination of 2) and 3) means that the behaviour (apart from performance)
+should be the same whether a TC filter is offloaded or not. E.g. a TC rule
+on a VF representor applies in software to packets received on that representor
+netdevice, while in hardware offload it would apply to packets transmitted by
+the representee VF. Conversely, a mirred egress redirect to a VF representor
+corresponds in hardware to delivery directly to the representee VF.
+
+What functions should have a representor?
+-----------------------------------------
+
+Essentially, for each virtual port on the device's internal switch, there
+should be a representor.
+Some vendors have chosen to omit representors for the uplink and the physical
+network port, which can simplify usage (the uplink netdev becomes in effect the
+physical port's representor) but does not generalise to devices with multiple
+ports or uplinks.
+
+Thus, the following should all have representors:
+
+ - VFs belonging to the switchdev function.
+ - Other PFs on the local PCIe controller, and any VFs belonging to them.
+ - PFs and VFs on external PCIe controllers on the device (e.g. for any embedded
+ System-on-Chip within the SmartNIC).
+ - PFs and VFs with other personalities, including network block devices (such
+ as a vDPA virtio-blk PF backed by remote/distributed storage), if (and only
+ if) their network access is implemented through a virtual switch port. [#]_
+ Note that such functions can require a representor despite the representee
+ not having a netdev.
+ - Subfunctions (SFs) belonging to any of the above PFs or VFs, if they have
+ their own port on the switch (as opposed to using their parent PF's port).
+ - Any accelerators or plugins on the device whose interface to the network is
+ through a virtual switch port, even if they do not have a corresponding PCIe
+ PF or VF.
+
+This allows the entire switching behaviour of the NIC to be controlled through
+representor TC rules.
+
+It is a common misunderstanding to conflate virtual ports with PCIe virtual
+functions or their netdevs. While in simple cases there will be a 1:1
+correspondence between VF netdevices and VF representors, more advanced device
+configurations may not follow this.
+A PCIe function which does not have network access through the internal switch
+(not even indirectly through the hardware implementation of whatever services
+the function provides) should *not* have a representor (even if it has a
+netdev).
+Such a function has no switch virtual port for the representor to configure or
+to be the other end of the virtual pipe.
+The representor represents the virtual port, not the PCIe function nor the 'end
+user' netdevice.
+
+.. [#] The concept here is that a hardware IP stack in the device performs the
+ translation between block DMA requests and network packets, so that only
+ network packets pass through the virtual port onto the switch. The network
+ access that the IP stack "sees" would then be configurable through tc rules;
+ e.g. its traffic might all be wrapped in a specific VLAN or VxLAN. However,
+ any needed configuration of the block device *qua* block device, not being a
+ networking entity, would not be appropriate for the representor and would
+ thus use some other channel such as devlink.
+ Contrast this with the case of a virtio-blk implementation which forwards the
+ DMA requests unchanged to another PF whose driver then initiates and
+ terminates IP traffic in software; in that case the DMA traffic would *not*
+ run over the virtual switch and the virtio-blk PF should thus *not* have a
+ representor.
+
+How are representors created?
+-----------------------------
+
+The driver instance attached to the switchdev function should, for each virtual
+port on the switch, create a pure-software netdevice which has some form of
+in-kernel reference to the switchdev function's own netdevice or driver private
+data (``netdev_priv()``).
+This may be by enumerating ports at probe time, reacting dynamically to the
+creation and destruction of ports at run time, or a combination of the two.
+
+The operations of the representor netdevice will generally involve acting
+through the switchdev function. For example, ``ndo_start_xmit()`` might send
+the packet through a hardware TX queue attached to the switchdev function, with
+either packet metadata or queue configuration marking it for delivery to the
+representee.
+
+How are representors identified?
+--------------------------------
+
+The representor netdevice should *not* directly refer to a PCIe device (e.g.
+through ``net_dev->dev.parent`` / ``SET_NETDEV_DEV()``), either of the
+representee or of the switchdev function.
+Instead, it should implement the ``ndo_get_devlink_port()`` netdevice op, which
+the kernel uses to provide the ``phys_switch_id`` and ``phys_port_name`` sysfs
+nodes. (Some legacy drivers implement ``ndo_get_port_parent_id()`` and
+``ndo_get_phys_port_name()`` directly, but this is deprecated.) See
+:ref:`Documentation/networking/devlink/devlink-port.rst ` for the
+details of this API.
+
+It is expected that userland will use this information (e.g. through udev rules)
+to construct an appropriately informative name or alias for the netdevice. For
+instance if the switchdev function is ``eth4`` then a representor with a
+``phys_port_name`` of ``p0pf1vf2`` might be renamed ``eth4pf1vf2rep``.
+
+There are as yet no established conventions for naming representors which do not
+correspond to PCIe functions (e.g. accelerators and plugins).
+
+How do representors interact with TC rules?
+-------------------------------------------
+
+Any TC rule on a representor applies (in software TC) to packets received by
+that representor netdevice. Thus, if the delivery part of the rule corresponds
+to another port on the virtual switch, the driver may choose to offload it to
+hardware, applying it to packets transmitted by the representee.
+
+Similarly, since a TC mirred egress action targeting the representor would (in
+software) send the packet through the representor (and thus indirectly deliver
+it to the representee), hardware offload should interpret this as delivery to
+the representee.
+
+As a simple example, if ``PORT_DEV`` is the physical port representor and
+``REP_DEV`` is a VF representor, the following rules::
+
+ tc filter add dev $REP_DEV parent ffff: protocol ipv4 flower \
+ action mirred egress redirect dev $PORT_DEV
+ tc filter add dev $PORT_DEV parent ffff: protocol ipv4 flower skip_sw \
+ action mirred egress mirror dev $REP_DEV
+
+would mean that all IPv4 packets from the VF are sent out the physical port, and
+all IPv4 packets received on the physical port are delivered to the VF in
+addition to ``PORT_DEV``. (Note that without ``skip_sw`` on the second rule,
+the VF would get two copies, as the packet reception on ``PORT_DEV`` would
+trigger the TC rule again and mirror the packet to ``REP_DEV``.)
+
+On devices without separate port and uplink representors, ``PORT_DEV`` would
+instead be the switchdev function's own uplink netdevice.
+
+Of course the rules can (if supported by the NIC) include packet-modifying
+actions (e.g. VLAN push/pop), which should be performed by the virtual switch.
+
+Tunnel encapsulation and decapsulation are rather more complicated, as they
+involve a third netdevice (a tunnel netdev operating in metadata mode, such as
+a VxLAN device created with ``ip link add vxlan0 type vxlan external``) and
+require an IP address to be bound to the underlay device (e.g. switchdev
+function uplink netdev or port representor). TC rules such as::
+
+ tc filter add dev $REP_DEV parent ffff: flower \
+ action tunnel_key set id $VNI src_ip $LOCAL_IP dst_ip $REMOTE_IP \
+ dst_port 4789 \
+ action mirred egress redirect dev vxlan0
+ tc filter add dev vxlan0 parent ffff: flower enc_src_ip $REMOTE_IP \
+ enc_dst_ip $LOCAL_IP enc_key_id $VNI enc_dst_port 4789 \
+ action tunnel_key unset action mirred egress redirect dev $REP_DEV
+
+where ``LOCAL_IP`` is an IP address bound to ``PORT_DEV``, and ``REMOTE_IP`` is
+another IP address on the same subnet, mean that packets sent by the VF should
+be VxLAN encapsulated and sent out the physical port (the driver has to deduce
+this by a route lookup of ``LOCAL_IP`` leading to ``PORT_DEV``, and also
+perform an ARP/neighbour table lookup to find the MAC addresses to use in the
+outer Ethernet frame), while UDP packets received on the physical port with UDP
+port 4789 should be parsed as VxLAN and, if their VSID matches ``$VNI``,
+decapsulated and forwarded to the VF.
+
+If this all seems complicated, just remember the 'golden rule' of TC offload:
+the hardware should ensure the same final results as if the packets were
+processed through the slow path, traversed software TC (except ignoring any
+``skip_hw`` rules and applying any ``skip_sw`` rules) and were transmitted or
+received through the representor netdevices.
+
+Configuring the representee's MAC
+---------------------------------
+
+The representee's link state is controlled through the representor. Setting the
+representor administratively UP or DOWN should cause carrier ON or OFF at the
+representee.
+
+Setting an MTU on the representor should cause that same MTU to be reported to
+the representee.
+(On hardware that allows configuring separate and distinct MTU and MRU values,
+the representor MTU should correspond to the representee's MRU and vice-versa.)
+
+Currently there is no way to use the representor to set the station permanent
+MAC address of the representee; other methods available to do this include:
+
+ - legacy SR-IOV (``ip link set DEVICE vf NUM mac LLADDR``)
+ - devlink port function (see **devlink-port(8)** and
+ :ref:`Documentation/networking/devlink/devlink-port.rst `)
diff --git a/Documentation/networking/smc-sysctl.rst b/Documentation/networking/smc-sysctl.rst
index 742e90e6d822..6d8acdbe9be1 100644
--- a/Documentation/networking/smc-sysctl.rst
+++ b/Documentation/networking/smc-sysctl.rst
@@ -34,3 +34,28 @@ smcr_buf_type - INTEGER
- 1 - Use virtually contiguous buffers
- 2 - Mixed use of the two types. Try physically contiguous buffers first.
If not available, use virtually contiguous buffers then.
+
+smcr_testlink_time - INTEGER
+ How frequently SMC-R link sends out TEST_LINK LLC messages to confirm
+ viability, after the last activity of connections on it. Value 0 means
+ disabling TEST_LINK.
+
+ Default: 30 seconds.
+
+wmem - INTEGER
+ Initial size of send buffer used by SMC sockets.
+ The default value inherits from net.ipv4.tcp_wmem[1].
+
+ The minimum value is 16KiB and there is no hard limit for max value, but
+ only allowed 512KiB for SMC-R and 1MiB for SMC-D.
+
+ Default: 16K
+
+rmem - INTEGER
+ Initial size of receive buffer (RMB) used by SMC sockets.
+ The default value inherits from net.ipv4.tcp_rmem[1].
+
+ The minimum value is 16KiB and there is no hard limit for max value, but
+ only allowed 512KiB for SMC-R and 1MiB for SMC-D.
+
+ Default: 128K
diff --git a/Documentation/networking/switchdev.rst b/Documentation/networking/switchdev.rst
index bbf272e9d607..758f1dae3fce 100644
--- a/Documentation/networking/switchdev.rst
+++ b/Documentation/networking/switchdev.rst
@@ -1,5 +1,6 @@
.. SPDX-License-Identifier: GPL-2.0
.. include::
+.. _switchdev:
===============================================
Ethernet switch device driver model (switchdev)
diff --git a/Documentation/userspace-api/index.rst b/Documentation/userspace-api/index.rst
index a61eac0c73f8..c78da9ce0ec4 100644
--- a/Documentation/userspace-api/index.rst
+++ b/Documentation/userspace-api/index.rst
@@ -26,6 +26,7 @@ place where this information is gathered.
ioctl/index
iommu
media/index
+ netlink/index
sysfs-platform_profile
vduse
futex2
diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index 3b985b19f39d..5f81e2a24a5c 100644
--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
@@ -308,7 +308,6 @@ Code Seq# Include File Comments
0x89 00-06 arch/x86/include/asm/sockios.h
0x89 0B-DF linux/sockios.h
0x89 E0-EF linux/sockios.h SIOCPROTOPRIVATE range
-0x89 E0-EF linux/dn.h PROTOPRIVATE range
0x89 F0-FF linux/sockios.h SIOCDEVPRIVATE range
0x8B all linux/wireless.h
0x8C 00-3F WiNRADiO driver
diff --git a/Documentation/userspace-api/netlink/index.rst b/Documentation/userspace-api/netlink/index.rst
new file mode 100644
index 000000000000..b0c21538d97d
--- /dev/null
+++ b/Documentation/userspace-api/netlink/index.rst
@@ -0,0 +1,12 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+
+================
+Netlink Handbook
+================
+
+Netlink documentation for users.
+
+.. toctree::
+ :maxdepth: 2
+
+ intro
diff --git a/Documentation/userspace-api/netlink/intro.rst b/Documentation/userspace-api/netlink/intro.rst
new file mode 100644
index 000000000000..0955e9f203d3
--- /dev/null
+++ b/Documentation/userspace-api/netlink/intro.rst
@@ -0,0 +1,681 @@
+.. SPDX-License-Identifier: BSD-3-Clause
+
+=======================
+Introduction to Netlink
+=======================
+
+Netlink is often described as an ioctl() replacement.
+It aims to replace fixed-format C structures as supplied
+to ioctl() with a format which allows an easy way to add
+or extended the arguments.
+
+To achieve this Netlink uses a minimal fixed-format metadata header
+followed by multiple attributes in the TLV (type, length, value) format.
+
+Unfortunately the protocol has evolved over the years, in an organic
+and undocumented fashion, making it hard to coherently explain.
+To make the most practical sense this document starts by describing
+netlink as it is used today and dives into more "historical" uses
+in later sections.
+
+Opening a socket
+================
+
+Netlink communication happens over sockets, a socket needs to be
+opened first:
+
+.. code-block:: c
+
+ fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_GENERIC);
+
+The use of sockets allows for a natural way of exchanging information
+in both directions (to and from the kernel). The operations are still
+performed synchronously when applications send() the request but
+a separate recv() system call is needed to read the reply.
+
+A very simplified flow of a Netlink "call" will therefore look
+something like:
+
+.. code-block:: c
+
+ fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_GENERIC);
+
+ /* format the request */
+ send(fd, &request, sizeof(request));
+ n = recv(fd, &response, RSP_BUFFER_SIZE);
+ /* interpret the response */
+
+Netlink also provides natural support for "dumping", i.e. communicating
+to user space all objects of a certain type (e.g. dumping all network
+interfaces).
+
+.. code-block:: c
+
+ fd = socket(AF_NETLINK, SOCK_RAW, NETLINK_GENERIC);
+
+ /* format the dump request */
+ send(fd, &request, sizeof(request));
+ while (1) {
+ n = recv(fd, &buffer, RSP_BUFFER_SIZE);
+ /* one recv() call can read multiple messages, hence the loop below */
+ for (nl_msg in buffer) {
+ if (nl_msg.nlmsg_type == NLMSG_DONE)
+ goto dump_finished;
+ /* process the object */
+ }
+ }
+ dump_finished:
+
+The first two arguments of the socket() call require little explanation -
+it is opening a Netlink socket, with all headers provided by the user
+(hence NETLINK, RAW). The last argument is the protocol within Netlink.
+This field used to identify the subsystem with which the socket will
+communicate.
+
+Classic vs Generic Netlink
+--------------------------
+
+Initial implementation of Netlink depended on a static allocation
+of IDs to subsystems and provided little supporting infrastructure.
+Let us refer to those protocols collectively as **Classic Netlink**.
+The list of them is defined on top of the ``include/uapi/linux/netlink.h``
+file, they include among others - general networking (NETLINK_ROUTE),
+iSCSI (NETLINK_ISCSI), and audit (NETLINK_AUDIT).
+
+**Generic Netlink** (introduced in 2005) allows for dynamic registration of
+subsystems (and subsystem ID allocation), introspection and simplifies
+implementing the kernel side of the interface.
+
+The following section describes how to use Generic Netlink, as the
+number of subsystems using Generic Netlink outnumbers the older
+protocols by an order of magnitude. There are also no plans for adding
+more Classic Netlink protocols to the kernel.
+Basic information on how communicating with core networking parts of
+the Linux kernel (or another of the 20 subsystems using Classic
+Netlink) differs from Generic Netlink is provided later in this document.
+
+Generic Netlink
+===============
+
+In addition to the Netlink fixed metadata header each Netlink protocol
+defines its own fixed metadata header. (Similarly to how network
+headers stack - Ethernet > IP > TCP we have Netlink > Generic N. > Family.)
+
+A Netlink message always starts with struct nlmsghdr, which is followed
+by a protocol-specific header. In case of Generic Netlink the protocol
+header is struct genlmsghdr.
+
+The practical meaning of the fields in case of Generic Netlink is as follows:
+
+.. code-block:: c
+
+ struct nlmsghdr {
+ __u32 nlmsg_len; /* Length of message including headers */
+ __u16 nlmsg_type; /* Generic Netlink Family (subsystem) ID */
+ __u16 nlmsg_flags; /* Flags - request or dump */
+ __u32 nlmsg_seq; /* Sequence number */
+ __u32 nlmsg_pid; /* Port ID, set to 0 */
+ };
+ struct genlmsghdr {
+ __u8 cmd; /* Command, as defined by the Family */
+ __u8 version; /* Irrelevant, set to 1 */
+ __u16 reserved; /* Reserved, set to 0 */
+ };
+ /* TLV attributes follow... */
+
+In Classic Netlink :c:member:`nlmsghdr.nlmsg_type` used to identify
+which operation within the subsystem the message was referring to
+(e.g. get information about a netdev). Generic Netlink needs to mux
+multiple subsystems in a single protocol so it uses this field to
+identify the subsystem, and :c:member:`genlmsghdr.cmd` identifies
+the operation instead. (See :ref:`res_fam` for
+information on how to find the Family ID of the subsystem of interest.)
+Note that the first 16 values (0 - 15) of this field are reserved for
+control messages both in Classic Netlink and Generic Netlink.
+See :ref:`nl_msg_type` for more details.
+
+There are 3 usual types of message exchanges on a Netlink socket:
+
+ - performing a single action (``do``);
+ - dumping information (``dump``);
+ - getting asynchronous notifications (``multicast``).
+
+Classic Netlink is very flexible and presumably allows other types
+of exchanges to happen, but in practice those are the three that get
+used.
+
+Asynchronous notifications are sent by the kernel and received by
+the user sockets which subscribed to them. ``do`` and ``dump`` requests
+are initiated by the user. :c:member:`nlmsghdr.nlmsg_flags` should
+be set as follows:
+
+ - for ``do``: ``NLM_F_REQUEST | NLM_F_ACK``
+ - for ``dump``: ``NLM_F_REQUEST | NLM_F_ACK | NLM_F_DUMP``
+
+:c:member:`nlmsghdr.nlmsg_seq` should be a set to a monotonically
+increasing value. The value gets echoed back in responses and doesn't
+matter in practice, but setting it to an increasing value for each
+message sent is considered good hygiene. The purpose of the field is
+matching responses to requests. Asynchronous notifications will have
+:c:member:`nlmsghdr.nlmsg_seq` of ``0``.
+
+:c:member:`nlmsghdr.nlmsg_pid` is the Netlink equivalent of an address.
+This field can be set to ``0`` when talking to the kernel.
+See :ref:`nlmsg_pid` for the (uncommon) uses of the field.
+
+The expected use for :c:member:`genlmsghdr.version` was to allow
+versioning of the APIs provided by the subsystems. No subsystem to
+date made significant use of this field, so setting it to ``1`` seems
+like a safe bet.
+
+.. _nl_msg_type:
+
+Netlink message types
+---------------------
+
+As previously mentioned :c:member:`nlmsghdr.nlmsg_type` carries
+protocol specific values but the first 16 identifiers are reserved
+(first subsystem specific message type should be equal to
+``NLMSG_MIN_TYPE`` which is ``0x10``).
+
+There are only 4 Netlink control messages defined:
+
+ - ``NLMSG_NOOP`` - ignore the message, not used in practice;
+ - ``NLMSG_ERROR`` - carries the return code of an operation;
+ - ``NLMSG_DONE`` - marks the end of a dump;
+ - ``NLMSG_OVERRUN`` - socket buffer has overflown, not used to date.
+
+``NLMSG_ERROR`` and ``NLMSG_DONE`` are of practical importance.
+They carry return codes for operations. Note that unless
+the ``NLM_F_ACK`` flag is set on the request Netlink will not respond
+with ``NLMSG_ERROR`` if there is no error. To avoid having to special-case
+this quirk it is recommended to always set ``NLM_F_ACK``.
+
+The format of ``NLMSG_ERROR`` is described by struct nlmsgerr::
+
+ ----------------------------------------------
+ | struct nlmsghdr - response header |
+ ----------------------------------------------
+ | int error |
+ ----------------------------------------------
+ | struct nlmsghdr - original request header |
+ ----------------------------------------------
+ | ** optionally (1) payload of the request |
+ ----------------------------------------------
+ | ** optionally (2) extended ACK |
+ ----------------------------------------------
+
+There are two instances of struct nlmsghdr here, first of the response
+and second of the request. ``NLMSG_ERROR`` carries the information about
+the request which led to the error. This could be useful when trying
+to match requests to responses or re-parse the request to dump it into
+logs.
+
+The payload of the request is not echoed in messages reporting success
+(``error == 0``) or if ``NETLINK_CAP_ACK`` setsockopt() was set.
+The latter is common
+and perhaps recommended as having to read a copy of every request back
+from the kernel is rather wasteful. The absence of request payload
+is indicated by ``NLM_F_CAPPED`` in :c:member:`nlmsghdr.nlmsg_flags`.
+
+The second optional element of ``NLMSG_ERROR`` are the extended ACK
+attributes. See :ref:`ext_ack` for more details. The presence
+of extended ACK is indicated by ``NLM_F_ACK_TLVS`` in
+:c:member:`nlmsghdr.nlmsg_flags`.
+
+``NLMSG_DONE`` is simpler, the request is never echoed but the extended
+ACK attributes may be present::
+
+ ----------------------------------------------
+ | struct nlmsghdr - response header |
+ ----------------------------------------------
+ | int error |
+ ----------------------------------------------
+ | ** optionally extended ACK |
+ ----------------------------------------------
+
+.. _res_fam:
+
+Resolving the Family ID
+-----------------------
+
+This section explains how to find the Family ID of a subsystem.
+It also serves as an example of Generic Netlink communication.
+
+Generic Netlink is itself a subsystem exposed via the Generic Netlink API.
+To avoid a circular dependency Generic Netlink has a statically allocated
+Family ID (``GENL_ID_CTRL`` which is equal to ``NLMSG_MIN_TYPE``).
+The Generic Netlink family implements a command used to find out information
+about other families (``CTRL_CMD_GETFAMILY``).
+
+To get information about the Generic Netlink family named for example
+``"test1"`` we need to send a message on the previously opened Generic Netlink
+socket. The message should target the Generic Netlink Family (1), be a
+``do`` (2) call to ``CTRL_CMD_GETFAMILY`` (3). A ``dump`` version of this
+call would make the kernel respond with information about *all* the families
+it knows about. Last but not least the name of the family in question has
+to be specified (4) as an attribute with the appropriate type::
+
+ struct nlmsghdr:
+ __u32 nlmsg_len: 32
+ __u16 nlmsg_type: GENL_ID_CTRL // (1)
+ __u16 nlmsg_flags: NLM_F_REQUEST | NLM_F_ACK // (2)
+ __u32 nlmsg_seq: 1
+ __u32 nlmsg_pid: 0
+
+ struct genlmsghdr:
+ __u8 cmd: CTRL_CMD_GETFAMILY // (3)
+ __u8 version: 2 /* or 1, doesn't matter */
+ __u16 reserved: 0
+
+ struct nlattr: // (4)
+ __u16 nla_len: 10
+ __u16 nla_type: CTRL_ATTR_FAMILY_NAME
+ char data: test1\0
+
+ (padding:)
+ char data: \0\0
+
+The length fields in Netlink (:c:member:`nlmsghdr.nlmsg_len`
+and :c:member:`nlattr.nla_len`) always *include* the header.
+Attribute headers in netlink must be aligned to 4 bytes from the start
+of the message, hence the extra ``\0\0`` after ``CTRL_ATTR_FAMILY_NAME``.
+The attribute lengths *exclude* the padding.
+
+If the family is found kernel will reply with two messages, the response
+with all the information about the family::
+
+ /* Message #1 - reply */
+ struct nlmsghdr:
+ __u32 nlmsg_len: 136
+ __u16 nlmsg_type: GENL_ID_CTRL
+ __u16 nlmsg_flags: 0
+ __u32 nlmsg_seq: 1 /* echoed from our request */
+ __u32 nlmsg_pid: 5831 /* The PID of our user space process */
+
+ struct genlmsghdr:
+ __u8 cmd: CTRL_CMD_GETFAMILY
+ __u8 version: 2
+ __u16 reserved: 0
+
+ struct nlattr:
+ __u16 nla_len: 10
+ __u16 nla_type: CTRL_ATTR_FAMILY_NAME
+ char data: test1\0
+
+ (padding:)
+ data: \0\0
+
+ struct nlattr:
+ __u16 nla_len: 6
+ __u16 nla_type: CTRL_ATTR_FAMILY_ID
+ __u16: 123 /* The Family ID we are after */
+
+ (padding:)
+ char data: \0\0
+
+ struct nlattr:
+ __u16 nla_len: 9
+ __u16 nla_type: CTRL_ATTR_FAMILY_VERSION
+ __u16: 1
+
+ /* ... etc, more attributes will follow. */
+
+And the error code (success) since ``NLM_F_ACK`` had been set on the request::
+
+ /* Message #2 - the ACK */
+ struct nlmsghdr:
+ __u32 nlmsg_len: 36
+ __u16 nlmsg_type: NLMSG_ERROR
+ __u16 nlmsg_flags: NLM_F_CAPPED /* There won't be a payload */
+ __u32 nlmsg_seq: 1 /* echoed from our request */
+ __u32 nlmsg_pid: 5831 /* The PID of our user space process */
+
+ int error: 0
+
+ struct nlmsghdr: /* Copy of the request header as we sent it */
+ __u32 nlmsg_len: 32
+ __u16 nlmsg_type: GENL_ID_CTRL
+ __u16 nlmsg_flags: NLM_F_REQUEST | NLM_F_ACK
+ __u32 nlmsg_seq: 1
+ __u32 nlmsg_pid: 0
+
+The order of attributes (struct nlattr) is not guaranteed so the user
+has to walk the attributes and parse them.
+
+Note that Generic Netlink sockets are not associated or bound to a single
+family. A socket can be used to exchange messages with many different
+families, selecting the recipient family on message-by-message basis using
+the :c:member:`nlmsghdr.nlmsg_type` field.
+
+.. _ext_ack:
+
+Extended ACK
+------------
+
+Extended ACK controls reporting of additional error/warning TLVs
+in ``NLMSG_ERROR`` and ``NLMSG_DONE`` messages. To maintain backward
+compatibility this feature has to be explicitly enabled by setting
+the ``NETLINK_EXT_ACK`` setsockopt() to ``1``.
+
+Types of extended ack attributes are defined in enum nlmsgerr_attrs.
+The most commonly used attributes are ``NLMSGERR_ATTR_MSG``,
+``NLMSGERR_ATTR_OFFS`` and ``NLMSGERR_ATTR_MISS_*``.
+
+``NLMSGERR_ATTR_MSG`` carries a message in English describing
+the encountered problem. These messages are far more detailed
+than what can be expressed thru standard UNIX error codes.
+
+``NLMSGERR_ATTR_OFFS`` points to the attribute which caused the problem.
+
+``NLMSGERR_ATTR_MISS_TYPE`` and ``NLMSGERR_ATTR_MISS_NEST``
+inform about a missing attribute.
+
+Extended ACKs can be reported on errors as well as in case of success.
+The latter should be treated as a warning.
+
+Extended ACKs greatly improve the usability of Netlink and should
+always be enabled, appropriately parsed and reported to the user.
+
+Advanced topics
+===============
+
+Dump consistency
+----------------
+
+Some of the data structures kernel uses for storing objects make
+it hard to provide an atomic snapshot of all the objects in a dump
+(without impacting the fast-paths updating them).
+
+Kernel may set the ``NLM_F_DUMP_INTR`` flag on any message in a dump
+(including the ``NLMSG_DONE`` message) if the dump was interrupted and
+may be inconsistent (e.g. missing objects). User space should retry
+the dump if it sees the flag set.
+
+Introspection
+-------------
+
+The basic introspection abilities are enabled by access to the Family
+object as reported in :ref:`res_fam`. User can query information about
+the Generic Netlink family, including which operations are supported
+by the kernel and what attributes the kernel understands.
+Family information includes the highest ID of an attribute kernel can parse,
+a separate command (``CTRL_CMD_GETPOLICY``) provides detailed information
+about supported attributes, including ranges of values the kernel accepts.
+
+Querying family information is useful in cases when user space needs
+to make sure that the kernel has support for a feature before issuing
+a request.
+
+.. _nlmsg_pid:
+
+nlmsg_pid
+---------
+
+:c:member:`nlmsghdr.nlmsg_pid` is the Netlink equivalent of an address.
+It is referred to as Port ID, sometimes Process ID because for historical
+reasons if the application does not select (bind() to) an explicit Port ID
+kernel will automatically assign it the ID equal to its Process ID
+(as reported by the getpid() system call).
+
+Similarly to the bind() semantics of the TCP/IP network protocols the value
+of zero means "assign automatically", hence it is common for applications
+to leave the :c:member:`nlmsghdr.nlmsg_pid` field initialized to ``0``.
+
+The field is still used today in rare cases when kernel needs to send
+a unicast notification. User space application can use bind() to associate
+its socket with a specific PID, it then communicates its PID to the kernel.
+This way the kernel can reach the specific user space process.
+
+This sort of communication is utilized in UMH (User Mode Helper)-like
+scenarios when kernel needs to trigger user space processing or ask user
+space for a policy decision.
+
+Multicast notifications
+-----------------------
+
+One of the strengths of Netlink is the ability to send event notifications
+to user space. This is a unidirectional form of communication (kernel ->
+user) and does not involve any control messages like ``NLMSG_ERROR`` or
+``NLMSG_DONE``.
+
+For example the Generic Netlink family itself defines a set of multicast
+notifications about registered families. When a new family is added the
+sockets subscribed to the notifications will get the following message::
+
+ struct nlmsghdr:
+ __u32 nlmsg_len: 136
+ __u16 nlmsg_type: GENL_ID_CTRL
+ __u16 nlmsg_flags: 0
+ __u32 nlmsg_seq: 0
+ __u32 nlmsg_pid: 0
+
+ struct genlmsghdr:
+ __u8 cmd: CTRL_CMD_NEWFAMILY
+ __u8 version: 2
+ __u16 reserved: 0
+
+ struct nlattr:
+ __u16 nla_len: 10
+ __u16 nla_type: CTRL_ATTR_FAMILY_NAME
+ char data: test1\0
+
+ (padding:)
+ data: \0\0
+
+ struct nlattr:
+ __u16 nla_len: 6
+ __u16 nla_type: CTRL_ATTR_FAMILY_ID
+ __u16: 123 /* The Family ID we are after */
+
+ (padding:)
+ char data: \0\0
+
+ struct nlattr:
+ __u16 nla_len: 9
+ __u16 nla_type: CTRL_ATTR_FAMILY_VERSION
+ __u16: 1
+
+ /* ... etc, more attributes will follow. */
+
+The notification contains the same information as the response
+to the ``CTRL_CMD_GETFAMILY`` request.
+
+The Netlink headers of the notification are mostly 0 and irrelevant.
+The :c:member:`nlmsghdr.nlmsg_seq` may be either zero or a monotonically
+increasing notification sequence number maintained by the family.
+
+To receive notifications the user socket must subscribe to the relevant
+notification group. Much like the Family ID, the Group ID for a given
+multicast group is dynamic and can be found inside the Family information.
+The ``CTRL_ATTR_MCAST_GROUPS`` attribute contains nests with names
+(``CTRL_ATTR_MCAST_GRP_NAME``) and IDs (``CTRL_ATTR_MCAST_GRP_ID``) of
+the groups family.
+
+Once the Group ID is known a setsockopt() call adds the socket to the group:
+
+.. code-block:: c
+
+ unsigned int group_id;
+
+ /* .. find the group ID... */
+
+ setsockopt(fd, SOL_NETLINK, NETLINK_ADD_MEMBERSHIP,
+ &group_id, sizeof(group_id));
+
+The socket will now receive notifications.
+
+It is recommended to use separate sockets for receiving notifications
+and sending requests to the kernel. The asynchronous nature of notifications
+means that they may get mixed in with the responses making the message
+handling much harder.
+
+Buffer sizing
+-------------
+
+Netlink sockets are datagram sockets rather than stream sockets,
+meaning that each message must be received in its entirety by a single
+recv()/recvmsg() system call. If the buffer provided by the user is too
+short, the message will be truncated and the ``MSG_TRUNC`` flag set
+in struct msghdr (struct msghdr is the second argument
+of the recvmsg() system call, *not* a Netlink header).
+
+Upon truncation the remaining part of the message is discarded.
+
+Netlink expects that the user buffer will be at least 8kB or a page
+size of the CPU architecture, whichever is bigger. Particular Netlink
+families may, however, require a larger buffer. 32kB buffer is recommended
+for most efficient handling of dumps (larger buffer fits more dumped
+objects and therefore fewer recvmsg() calls are needed).
+
+Classic Netlink
+===============
+
+The main differences between Classic and Generic Netlink are the dynamic
+allocation of subsystem identifiers and availability of introspection.
+In theory the protocol does not differ significantly, however, in practice
+Classic Netlink experimented with concepts which were abandoned in Generic
+Netlink (really, they usually only found use in a small corner of a single
+subsystem). This section is meant as an explainer of a few of such concepts,
+with the explicit goal of giving the Generic Netlink
+users the confidence to ignore them when reading the uAPI headers.
+
+Most of the concepts and examples here refer to the ``NETLINK_ROUTE`` family,
+which covers much of the configuration of the Linux networking stack.
+Real documentation of that family, deserves a chapter (or a book) of its own.
+
+Families
+--------
+
+Netlink refers to subsystems as families. This is a remnant of using
+sockets and the concept of protocol families, which are part of message
+demultiplexing in ``NETLINK_ROUTE``.
+
+Sadly every layer of encapsulation likes to refer to whatever it's carrying
+as "families" making the term very confusing:
+
+ 1. AF_NETLINK is a bona fide socket protocol family
+ 2. AF_NETLINK's documentation refers to what comes after its own
+ header (struct nlmsghdr) in a message as a "Family Header"
+ 3. Generic Netlink is a family for AF_NETLINK (struct genlmsghdr follows
+ struct nlmsghdr), yet it also calls its users "Families".
+
+Note that the Generic Netlink Family IDs are in a different "ID space"
+and overlap with Classic Netlink protocol numbers (e.g. ``NETLINK_CRYPTO``
+has the Classic Netlink protocol ID of 21 which Generic Netlink will
+happily allocate to one of its families as well).
+
+Strict checking
+---------------
+
+The ``NETLINK_GET_STRICT_CHK`` socket option enables strict input checking
+in ``NETLINK_ROUTE``. It was needed because historically kernel did not
+validate the fields of structures it didn't process. This made it impossible
+to start using those fields later without risking regressions in applications
+which initialized them incorrectly or not at all.
+
+``NETLINK_GET_STRICT_CHK`` declares that the application is initializing
+all fields correctly. It also opts into validating that message does not
+contain trailing data and requests that kernel rejects attributes with
+type higher than largest attribute type known to the kernel.
+
+``NETLINK_GET_STRICT_CHK`` is not used outside of ``NETLINK_ROUTE``.
+
+Unknown attributes
+------------------
+
+Historically Netlink ignored all unknown attributes. The thinking was that
+it would free the application from having to probe what kernel supports.
+The application could make a request to change the state and check which
+parts of the request "stuck".
+
+This is no longer the case for new Generic Netlink families and those opting
+in to strict checking. See enum netlink_validation for validation types
+performed.
+
+Fixed metadata and structures
+-----------------------------
+
+Classic Netlink made liberal use of fixed-format structures within
+the messages. Messages would commonly have a structure with
+a considerable number of fields after struct nlmsghdr. It was also
+common to put structures with multiple members inside attributes,
+without breaking each member into an attribute of its own.
+
+This has caused problems with validation and extensibility and
+therefore using binary structures is actively discouraged for new
+attributes.
+
+Request types
+-------------
+
+``NETLINK_ROUTE`` categorized requests into 4 types ``NEW``, ``DEL``, ``GET``,
+and ``SET``. Each object can handle all or some of those requests
+(objects being netdevs, routes, addresses, qdiscs etc.) Request type
+is defined by the 2 lowest bits of the message type, so commands for
+new objects would always be allocated with a stride of 4.
+
+Each object would also have it's own fixed metadata shared by all request
+types (e.g. struct ifinfomsg for netdev requests, struct ifaddrmsg for address
+requests, struct tcmsg for qdisc requests).
+
+Even though other protocols and Generic Netlink commands often use
+the same verbs in their message names (``GET``, ``SET``) the concept
+of request types did not find wider adoption.
+
+Notification echo
+-----------------
+
+``NLM_F_ECHO`` requests for notifications resulting from the request
+to be queued onto the requesting socket. This is useful to discover
+the impact of the request.
+
+Note that this feature is not universally implemented.
+
+Other request-type-specific flags
+---------------------------------
+
+Classic Netlink defined various flags for its ``GET``, ``NEW``
+and ``DEL`` requests in the upper byte of nlmsg_flags in struct nlmsghdr.
+Since request types have not been generalized the request type specific
+flags are rarely used (and considered deprecated for new families).
+
+For ``GET`` - ``NLM_F_ROOT`` and ``NLM_F_MATCH`` are combined into
+``NLM_F_DUMP``, and not used separately. ``NLM_F_ATOMIC`` is never used.
+
+For ``DEL`` - ``NLM_F_NONREC`` is only used by nftables and ``NLM_F_BULK``
+only by FDB some operations.
+
+The flags for ``NEW`` are used most commonly in classic Netlink. Unfortunately,
+the meaning is not crystal clear. The following description is based on the
+best guess of the intention of the authors, and in practice all families
+stray from it in one way or another. ``NLM_F_REPLACE`` asks to replace
+an existing object, if no matching object exists the operation should fail.
+``NLM_F_EXCL`` has the opposite semantics and only succeeds if object already
+existed.
+``NLM_F_CREATE`` asks for the object to be created if it does not
+exist, it can be combined with ``NLM_F_REPLACE`` and ``NLM_F_EXCL``.
+
+A comment in the main Netlink uAPI header states::
+
+ 4.4BSD ADD NLM_F_CREATE|NLM_F_EXCL
+ 4.4BSD CHANGE NLM_F_REPLACE
+
+ True CHANGE NLM_F_CREATE|NLM_F_REPLACE
+ Append NLM_F_CREATE
+ Check NLM_F_EXCL
+
+which seems to indicate that those flags predate request types.
+``NLM_F_REPLACE`` without ``NLM_F_CREATE`` was initially used instead
+of ``SET`` commands.
+``NLM_F_EXCL`` without ``NLM_F_CREATE`` was used to check if object exists
+without creating it, presumably predating ``GET`` commands.
+
+``NLM_F_APPEND`` indicates that if one key can have multiple objects associated
+with it (e.g. multiple next-hop objects for a route) the new object should be
+added to the list rather than replacing the entire list.
+
+uAPI reference
+==============
+
+.. kernel-doc:: include/uapi/linux/netlink.h
diff --git a/MAINTAINERS b/MAINTAINERS
index 66ef84d8b304..9ca84cb5ab4a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -877,6 +877,13 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/altera/
+ALTERA TSE PCS
+M: Maxime Chevallier
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/pcs/pcs-altera-tse.c
+F: include/linux/pcs-altera-tse.h
+
ALTERA UART/JTAG UART SERIAL DRIVERS
M: Tobias Klauser
L: linux-serial@vger.kernel.org
@@ -2392,6 +2399,7 @@ N: atmel
ARM/Microchip Sparx5 SoC support
M: Lars Povlsen
M: Steen Hegelund
+M: Daniel Machon
M: UNGLinuxDriver@microchip.com
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Supported
@@ -3822,6 +3830,7 @@ F: kernel/bpf/dispatcher.c
F: kernel/bpf/trampoline.c
F: include/linux/bpf*
F: include/linux/filter.h
+F: include/linux/tnum.h
BPF [BTF]
M: Martin KaFai Lau
@@ -5720,13 +5729,6 @@ F: include/linux/tfrc.h
F: include/uapi/linux/dccp.h
F: net/dccp/
-DECnet NETWORK LAYER
-L: linux-decnet-user@lists.sourceforge.net
-S: Orphan
-W: http://linux-decnet.sourceforge.net
-F: Documentation/networking/decnet.rst
-F: net/decnet/
-
DECSTATION PLATFORM SUPPORT
M: "Maciej W. Rozycki"
L: linux-mips@vger.kernel.org
@@ -14744,6 +14746,13 @@ F: net/dsa/tag_ocelot.c
F: net/dsa/tag_ocelot_8021q.c
F: tools/testing/selftests/drivers/net/ocelot/*
+OCELOT EXTERNAL SWITCH CONTROL
+M: Colin Foster
+S: Supported
+F: Documentation/devicetree/bindings/mfd/mscc,ocelot.yaml
+F: drivers/mfd/ocelot*
+F: include/linux/mfd/ocelot.h
+
OCXL (Open Coherent Accelerator Processor Interface OpenCAPI) DRIVER
M: Frederic Barrat
M: Andrew Donnellan
@@ -19519,6 +19528,11 @@ L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/dlink/sundance.c
+SUN HAPPY MEAL ETHERNET DRIVER
+M: Sean Anderson
+S: Maintained
+F: drivers/net/ethernet/sun/sunhme.*
+
SUNPLUS ETHERNET DRIVER
M: Wells Lu
L: netdev@vger.kernel.org
@@ -21889,9 +21903,11 @@ F: drivers/input/tablet/wacom_serial4.c
WANGXUN ETHERNET DRIVER
M: Jiawen Wu
+M: Mengyuan Lou
+W: https://www.net-swift.com
L: netdev@vger.kernel.org
S: Maintained
-F: Documentation/networking/device_drivers/ethernet/wangxun/txgbe.rst
+F: Documentation/networking/device_drivers/ethernet/wangxun/*
F: drivers/net/ethernet/wangxun/
WATCHDOG DEVICE DRIVERS
diff --git a/arch/arm/boot/dts/aspeed-bmc-facebook-elbert.dts b/arch/arm/boot/dts/aspeed-bmc-facebook-elbert.dts
index 27b43fe099f1..8e1a1d1b282d 100644
--- a/arch/arm/boot/dts/aspeed-bmc-facebook-elbert.dts
+++ b/arch/arm/boot/dts/aspeed-bmc-facebook-elbert.dts
@@ -183,3 +183,21 @@
&i2c11 {
status = "okay";
};
+
+/*
+ * BMC's "mac3" controller is connected to BCM53134P's IMP_RGMII port
+ * directly (fixed link, no PHY in between).
+ * Note: BMC's "mdio0" controller is connected to BCM53134P's MDIO
+ * interface, and the MDIO channel will be enabled in dts later, when
+ * BCM53134 is added to "bcm53xx" DSA driver.
+ */
+&mac3 {
+ status = "okay";
+ phy-mode = "rgmii";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_rgmii4_default>;
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+};
diff --git a/arch/arm64/boot/dts/apple/t8103-j274.dts b/arch/arm64/boot/dts/apple/t8103-j274.dts
index 2cd429efba5b..c1f3ba9c39f6 100644
--- a/arch/arm64/boot/dts/apple/t8103-j274.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j274.dts
@@ -21,6 +21,10 @@
};
};
+&wifi0 {
+ brcm,board-type = "apple,atlantisb";
+};
+
/*
* Force the bus number assignments so that we can declare some of the
* on-board devices and properties that are populated by the bootloader
diff --git a/arch/arm64/boot/dts/apple/t8103-j293.dts b/arch/arm64/boot/dts/apple/t8103-j293.dts
index 49cdf4b560a3..ecb10d237a05 100644
--- a/arch/arm64/boot/dts/apple/t8103-j293.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j293.dts
@@ -17,6 +17,10 @@
model = "Apple MacBook Pro (13-inch, M1, 2020)";
};
+&wifi0 {
+ brcm,board-type = "apple,honshu";
+};
+
/*
* Remove unused PCIe ports and disable the associated DARTs.
*/
diff --git a/arch/arm64/boot/dts/apple/t8103-j313.dts b/arch/arm64/boot/dts/apple/t8103-j313.dts
index b0ebb45bdb6f..df741737b8e6 100644
--- a/arch/arm64/boot/dts/apple/t8103-j313.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j313.dts
@@ -17,6 +17,10 @@
model = "Apple MacBook Air (M1, 2020)";
};
+&wifi0 {
+ brcm,board-type = "apple,shikoku";
+};
+
/*
* Remove unused PCIe ports and disable the associated DARTs.
*/
diff --git a/arch/arm64/boot/dts/apple/t8103-j456.dts b/arch/arm64/boot/dts/apple/t8103-j456.dts
index 884fddf7d363..8c6bf9592510 100644
--- a/arch/arm64/boot/dts/apple/t8103-j456.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j456.dts
@@ -21,6 +21,10 @@
};
};
+&wifi0 {
+ brcm,board-type = "apple,capri";
+};
+
&i2c0 {
hpm2: usb-pd@3b {
compatible = "apple,cd321x";
diff --git a/arch/arm64/boot/dts/apple/t8103-j457.dts b/arch/arm64/boot/dts/apple/t8103-j457.dts
index d7c622931627..fe7c0aaf7d62 100644
--- a/arch/arm64/boot/dts/apple/t8103-j457.dts
+++ b/arch/arm64/boot/dts/apple/t8103-j457.dts
@@ -21,6 +21,10 @@
};
};
+&wifi0 {
+ brcm,board-type = "apple,santorini";
+};
+
/*
* Force the bus number assignments so that we can declare some of the
* on-board devices and properties that are populated by the bootloader
diff --git a/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi b/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi
index fe2ae40fa9dd..3d15b8e2a6c1 100644
--- a/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi
+++ b/arch/arm64/boot/dts/apple/t8103-jxxx.dtsi
@@ -71,8 +71,10 @@
&port00 {
bus-range = <1 1>;
wifi0: network@0,0 {
+ compatible = "pci14e4,4425";
reg = <0x10000 0x0 0x0 0x0 0x0>;
/* To be filled by the loader */
local-mac-address = [00 00 00 00 00 00];
+ apple,antenna-sku = "XX";
};
};
diff --git a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
index 4797537cb368..e6d7453e56e0 100644
--- a/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt2712e.dtsi
@@ -756,7 +756,7 @@
snps,mtl-tx-config = <&mtl_tx_setup>;
snps,txpbl = <1>;
snps,rxpbl = <1>;
- clk_csr = <0>;
+ snps,clk-csr = <0>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
index e3a407d03551..692102f6248d 100644
--- a/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
+++ b/arch/arm64/boot/dts/mediatek/mt7986a.dtsi
@@ -222,6 +222,28 @@
#reset-cells = <1>;
};
+ wed_pcie: wed-pcie@10003000 {
+ compatible = "mediatek,mt7986-wed-pcie",
+ "syscon";
+ reg = <0 0x10003000 0 0x10>;
+ };
+
+ wed0: wed@15010000 {
+ compatible = "mediatek,mt7986-wed",
+ "syscon";
+ reg = <0 0x15010000 0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = ;
+ };
+
+ wed1: wed@15011000 {
+ compatible = "mediatek,mt7986-wed",
+ "syscon";
+ reg = <0 0x15011000 0 0x1000>;
+ interrupt-parent = <&gic>;
+ interrupts = ;
+ };
+
eth: ethernet@15100000 {
compatible = "mediatek,mt7986-eth";
reg = <0 0x15100000 0 0x80000>;
@@ -256,6 +278,8 @@
<&apmixedsys CLK_APMIXED_SGMPLL>;
mediatek,ethsys = <ðsys>;
mediatek,sgmiisys = <&sgmiisys0>, <&sgmiisys1>;
+ mediatek,wed-pcie = <&wed_pcie>;
+ mediatek,wed = <&wed0>, <&wed1>;
#reset-cells = <1>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 389623ae5a91..30f76178608b 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -1970,7 +1970,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
u32 flags, struct bpf_tramp_links *tlinks,
void *orig_call)
{
- int ret;
+ int i, ret;
int nargs = m->nr_args;
int max_insns = ((long)image_end - (long)image) / AARCH64_INSN_SIZE;
struct jit_ctx ctx = {
@@ -1982,6 +1982,12 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
if (nargs > 8)
return -ENOTSUPP;
+ /* don't support struct argument */
+ for (i = 0; i < MAX_BPF_FUNC_ARGS; i++) {
+ if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
+ return -ENOTSUPP;
+ }
+
ret = prepare_trampoline(&ctx, im, tlinks, orig_call, nargs, flags);
if (ret < 0)
return ret;
diff --git a/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc1.dts b/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc1.dts
index 24eebc5a85b1..0128bd8fa7ed 100644
--- a/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc1.dts
+++ b/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc1.dts
@@ -20,12 +20,6 @@
bootargs = "console=ttyS0,57600";
};
- palmbus: palmbus@1e000000 {
- i2c@900 {
- status = "okay";
- };
- };
-
gpio-keys {
compatible = "gpio-keys";
@@ -53,7 +47,7 @@
};
};
-&sdhci {
+&mmc {
status = "okay";
};
diff --git a/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc2.dts b/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc2.dts
index 34006e667780..7515555388ae 100644
--- a/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc2.dts
+++ b/arch/mips/boot/dts/ralink/mt7621-gnubee-gb-pc2.dts
@@ -20,12 +20,6 @@
bootargs = "console=ttyS0,57600";
};
- palmbus: palmbus@1e000000 {
- i2c@900 {
- status = "okay";
- };
- };
-
gpio-keys {
compatible = "gpio-keys";
@@ -35,9 +29,45 @@
linux,code = ;
};
};
+
+ gpio-leds {
+ compatible = "gpio-leds";
+
+ ethblack-green {
+ label = "green:ethblack";
+ gpios = <&gpio 3 GPIO_ACTIVE_LOW>;
+ };
+
+ ethblue-green {
+ label = "green:ethblue";
+ gpios = <&gpio 4 GPIO_ACTIVE_LOW>;
+ };
+
+ ethyellow-green {
+ label = "green:ethyellow";
+ gpios = <&gpio 15 GPIO_ACTIVE_LOW>;
+ };
+
+ ethyellow-orange {
+ label = "orange:ethyellow";
+ gpios = <&gpio 13 GPIO_ACTIVE_LOW>;
+ };
+
+ power {
+ label = "green:power";
+ gpios = <&gpio 6 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "default-on";
+ };
+
+ system {
+ label = "green:system";
+ gpios = <&gpio 8 GPIO_ACTIVE_LOW>;
+ linux,default-trigger = "disk-activity";
+ };
+ };
};
-&sdhci {
+&mmc {
status = "okay";
};
@@ -83,12 +113,12 @@
&gmac1 {
status = "okay";
- phy-handle = <ðphy7>;
+ phy-handle = <ðphy5>;
};
&mdio {
- ethphy7: ethernet-phy@7 {
- reg = <7>;
+ ethphy5: ethernet-phy@5 {
+ reg = <5>;
phy-mode = "rgmii-rxid";
};
};
diff --git a/arch/mips/boot/dts/ralink/mt7621.dtsi b/arch/mips/boot/dts/ralink/mt7621.dtsi
index ee46ace0bcc1..f3f4c1f26e01 100644
--- a/arch/mips/boot/dts/ralink/mt7621.dtsi
+++ b/arch/mips/boot/dts/ralink/mt7621.dtsi
@@ -33,11 +33,6 @@
compatible = "mti,cpu-interrupt-controller";
};
- aliases {
- serial0 = &uartlite;
- };
-
-
mmc_fixed_3v3: regulator-3v3 {
compatible = "regulator-fixed";
regulator-name = "mmc_power";
@@ -110,17 +105,16 @@
pinctrl-0 = <&i2c_pins>;
};
- memc: syscon@5000 {
+ memc: memory-controller@5000 {
compatible = "mediatek,mt7621-memc", "syscon";
reg = <0x5000 0x1000>;
};
- uartlite: uartlite@c00 {
+ serial0: serial@c00 {
compatible = "ns16550a";
reg = <0xc00 0x100>;
clocks = <&sysc MT7621_CLK_UART1>;
- clock-names = "uart1";
interrupt-parent = <&gic>;
interrupts = ;
@@ -236,7 +230,7 @@
};
};
- sdhci: sdhci@1e130000 {
+ mmc: mmc@1e130000 {
status = "disabled";
compatible = "mediatek,mt7620-mmc";
@@ -262,8 +256,8 @@
interrupts = ;
};
- xhci: xhci@1e1c0000 {
- compatible = "mediatek,mt8173-xhci";
+ usb: usb@1e1c0000 {
+ compatible = "mediatek,mt8173-xhci", "mediatek,mtk-xhci";
reg = <0x1e1c0000 0x1000
0x1e1d0700 0x0100>;
reg-names = "mac", "ippc";
@@ -338,23 +332,22 @@
gmac1: mac@1 {
compatible = "mediatek,eth-mac";
reg = <1>;
- status = "off";
- phy-mode = "rgmii-rxid";
+ status = "disabled";
+ phy-mode = "rgmii";
};
mdio: mdio-bus {
#address-cells = <1>;
#size-cells = <0>;
- switch0: switch0@0 {
+ switch0: switch@1f {
compatible = "mediatek,mt7621";
- reg = <0>;
+ reg = <0x1f>;
mediatek,mcm;
resets = <&sysc MT7621_RST_MCM>;
reset-names = "mcm";
interrupt-controller;
#interrupt-cells = <1>;
- interrupt-parent = <&gic>;
interrupts = ;
ports {
@@ -362,31 +355,31 @@
#size-cells = <0>;
port@0 {
- status = "off";
+ status = "disabled";
reg = <0>;
label = "lan0";
};
port@1 {
- status = "off";
+ status = "disabled";
reg = <1>;
label = "lan1";
};
port@2 {
- status = "off";
+ status = "disabled";
reg = <2>;
label = "lan2";
};
port@3 {
- status = "off";
+ status = "disabled";
reg = <3>;
label = "lan3";
};
port@4 {
- status = "off";
+ status = "disabled";
reg = <4>;
label = "lan4";
};
diff --git a/arch/mips/configs/decstation_64_defconfig b/arch/mips/configs/decstation_64_defconfig
index 4f74c4bde9f6..49ec1575234e 100644
--- a/arch/mips/configs/decstation_64_defconfig
+++ b/arch/mips/configs/decstation_64_defconfig
@@ -49,8 +49,6 @@ CONFIG_IPV6_SUBTREES=y
CONFIG_NETWORK_SECMARK=y
CONFIG_IP_SCTP=m
CONFIG_VLAN_8021Q=m
-CONFIG_DECNET=m
-CONFIG_DECNET_ROUTER=y
# CONFIG_WIRELESS is not set
# CONFIG_UEVENT_HELPER is not set
# CONFIG_FW_LOADER is not set
diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig
index f0d0546ea1e5..5cec4c096e2c 100644
--- a/arch/mips/configs/decstation_defconfig
+++ b/arch/mips/configs/decstation_defconfig
@@ -45,8 +45,6 @@ CONFIG_IPV6_SUBTREES=y
CONFIG_NETWORK_SECMARK=y
CONFIG_IP_SCTP=m
CONFIG_VLAN_8021Q=m
-CONFIG_DECNET=m
-CONFIG_DECNET_ROUTER=y
# CONFIG_WIRELESS is not set
# CONFIG_UEVENT_HELPER is not set
# CONFIG_FW_LOADER is not set
diff --git a/arch/mips/configs/decstation_r4k_defconfig b/arch/mips/configs/decstation_r4k_defconfig
index df5ff9ddf8f4..af37e26d9b5b 100644
--- a/arch/mips/configs/decstation_r4k_defconfig
+++ b/arch/mips/configs/decstation_r4k_defconfig
@@ -44,8 +44,6 @@ CONFIG_IPV6_SUBTREES=y
CONFIG_NETWORK_SECMARK=y
CONFIG_IP_SCTP=m
CONFIG_VLAN_8021Q=m
-CONFIG_DECNET=m
-CONFIG_DECNET_ROUTER=y
# CONFIG_WIRELESS is not set
# CONFIG_UEVENT_HELPER is not set
# CONFIG_FW_LOADER is not set
diff --git a/arch/mips/configs/gpr_defconfig b/arch/mips/configs/gpr_defconfig
index 7cd321b47d01..eb755650f821 100644
--- a/arch/mips/configs/gpr_defconfig
+++ b/arch/mips/configs/gpr_defconfig
@@ -66,7 +66,6 @@ CONFIG_IP_NF_RAW=m
CONFIG_IP_NF_ARPTABLES=m
CONFIG_IP_NF_ARPFILTER=m
CONFIG_IP_NF_ARP_MANGLE=m
-CONFIG_DECNET_NF_GRABULATOR=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -96,7 +95,6 @@ CONFIG_ATM_MPOA=m
CONFIG_ATM_BR2684=m
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
-CONFIG_DECNET=m
CONFIG_LLC2=m
CONFIG_ATALK=m
CONFIG_DEV_APPLETALK=m
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index efbfaa539938..edf9634aa8ee 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -112,7 +112,6 @@ CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
-CONFIG_DECNET_NF_GRABULATOR=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -142,7 +141,6 @@ CONFIG_ATM_MPOA=m
CONFIG_ATM_BR2684=m
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
-CONFIG_DECNET=m
CONFIG_LLC2=m
CONFIG_ATALK=m
CONFIG_DEV_APPLETALK=m
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index d6981855221b..9932a593e3c3 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -112,7 +112,6 @@ CONFIG_IP6_NF_FILTER=m
CONFIG_IP6_NF_TARGET_REJECT=m
CONFIG_IP6_NF_MANGLE=m
CONFIG_IP6_NF_RAW=m
-CONFIG_DECNET_NF_GRABULATOR=m
CONFIG_BRIDGE_NF_EBTABLES=m
CONFIG_BRIDGE_EBT_BROUTE=m
CONFIG_BRIDGE_EBT_T_FILTER=m
@@ -133,7 +132,6 @@ CONFIG_BRIDGE_EBT_REDIRECT=m
CONFIG_BRIDGE_EBT_SNAT=m
CONFIG_BRIDGE_EBT_LOG=m
CONFIG_BRIDGE=m
-CONFIG_DECNET=m
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
diff --git a/arch/mips/net/bpf_jit_comp32.c b/arch/mips/net/bpf_jit_comp32.c
index 83c975d5cca2..ace5db3fbd17 100644
--- a/arch/mips/net/bpf_jit_comp32.c
+++ b/arch/mips/net/bpf_jit_comp32.c
@@ -1376,12 +1376,20 @@ void build_prologue(struct jit_context *ctx)
const u8 *fp = bpf2mips32[BPF_REG_FP];
int stack, saved, locals, reserved;
+ /*
+ * In the unlikely event that the TCC limit is raised to more
+ * than 16 bits, it is clamped to the maximum value allowed for
+ * the generated code (0xffff). It is better fail to compile
+ * instead of degrading gracefully.
+ */
+ BUILD_BUG_ON(MAX_TAIL_CALL_CNT > 0xffff);
+
/*
* The first two instructions initialize TCC in the reserved (for us)
* 16-byte area in the parent's stack frame. On a tail call, the
* calling function jumps into the prologue after these instructions.
*/
- emit(ctx, ori, MIPS_R_T9, MIPS_R_ZERO, min(MAX_TAIL_CALL_CNT, 0xffff));
+ emit(ctx, ori, MIPS_R_T9, MIPS_R_ZERO, MAX_TAIL_CALL_CNT);
emit(ctx, sw, MIPS_R_T9, 0, MIPS_R_SP);
/*
diff --git a/arch/mips/net/bpf_jit_comp64.c b/arch/mips/net/bpf_jit_comp64.c
index 6475828ffb36..0e7c1bdcf914 100644
--- a/arch/mips/net/bpf_jit_comp64.c
+++ b/arch/mips/net/bpf_jit_comp64.c
@@ -547,12 +547,20 @@ void build_prologue(struct jit_context *ctx)
u8 zx = bpf2mips64[JIT_REG_ZX];
int stack, saved, locals, reserved;
+ /*
+ * In the unlikely event that the TCC limit is raised to more
+ * than 16 bits, it is clamped to the maximum value allowed for
+ * the generated code (0xffff). It is better fail to compile
+ * instead of degrading gracefully.
+ */
+ BUILD_BUG_ON(MAX_TAIL_CALL_CNT > 0xffff);
+
/*
* The first instruction initializes the tail call count register.
* On a tail call, the calling function jumps into the prologue
* after this instruction.
*/
- emit(ctx, ori, tc, MIPS_R_ZERO, min(MAX_TAIL_CALL_CNT, 0xffff));
+ emit(ctx, ori, tc, MIPS_R_ZERO, MAX_TAIL_CALL_CNT);
/* === Entry-point for tail calls === */
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig
index 91967824272e..a24f484bfbd2 100644
--- a/arch/powerpc/configs/ppc6xx_defconfig
+++ b/arch/powerpc/configs/ppc6xx_defconfig
@@ -243,8 +243,6 @@ CONFIG_ATM_LANE=m
CONFIG_ATM_BR2684=m
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
-CONFIG_DECNET=m
-CONFIG_DECNET_ROUTER=y
CONFIG_ATALK=m
CONFIG_DEV_APPLETALK=m
CONFIG_IPDDP=m
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 5b53ecca1e15..088af7c84e5d 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -287,6 +287,7 @@ config X86
select PROC_PID_ARCH_STATUS if PROC_FS
select HAVE_ARCH_NODE_DEV_GROUP if X86_SGX
imply IMA_SECURE_AND_OR_TRUSTED_BOOT if EFI
+ select HAVE_DYNAMIC_FTRACE_NO_PATCHABLE
config INSTRUCTION_DECODER
def_bool y
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 4922517ddb0d..99620428ad78 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -664,7 +664,7 @@ static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
*/
emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
} else {
- /* movabsq %rax, imm64 */
+ /* movabsq rax, imm64 */
EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
EMIT(imm32_lo, 4);
EMIT(imm32_hi, 4);
@@ -1753,34 +1753,60 @@ emit_jmp:
static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
int stack_size)
{
- int i;
+ int i, j, arg_size, nr_regs;
/* Store function arguments to stack.
* For a function that accepts two pointers the sequence will be:
* mov QWORD PTR [rbp-0x10],rdi
* mov QWORD PTR [rbp-0x8],rsi
*/
- for (i = 0; i < min(nr_args, 6); i++)
- emit_stx(prog, bytes_to_bpf_size(m->arg_size[i]),
- BPF_REG_FP,
- i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
- -(stack_size - i * 8));
+ for (i = 0, j = 0; i < min(nr_args, 6); i++) {
+ if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) {
+ nr_regs = (m->arg_size[i] + 7) / 8;
+ arg_size = 8;
+ } else {
+ nr_regs = 1;
+ arg_size = m->arg_size[i];
+ }
+
+ while (nr_regs) {
+ emit_stx(prog, bytes_to_bpf_size(arg_size),
+ BPF_REG_FP,
+ j == 5 ? X86_REG_R9 : BPF_REG_1 + j,
+ -(stack_size - j * 8));
+ nr_regs--;
+ j++;
+ }
+ }
}
static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
int stack_size)
{
- int i;
+ int i, j, arg_size, nr_regs;
/* Restore function arguments from stack.
* For a function that accepts two pointers the sequence will be:
* EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
* EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
*/
- for (i = 0; i < min(nr_args, 6); i++)
- emit_ldx(prog, bytes_to_bpf_size(m->arg_size[i]),
- i == 5 ? X86_REG_R9 : BPF_REG_1 + i,
- BPF_REG_FP,
- -(stack_size - i * 8));
+ for (i = 0, j = 0; i < min(nr_args, 6); i++) {
+ if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) {
+ nr_regs = (m->arg_size[i] + 7) / 8;
+ arg_size = 8;
+ } else {
+ nr_regs = 1;
+ arg_size = m->arg_size[i];
+ }
+
+ while (nr_regs) {
+ emit_ldx(prog, bytes_to_bpf_size(arg_size),
+ j == 5 ? X86_REG_R9 : BPF_REG_1 + j,
+ BPF_REG_FP,
+ -(stack_size - j * 8));
+ nr_regs--;
+ j++;
+ }
+ }
}
static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
@@ -1812,6 +1838,9 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
if (p->aux->sleepable) {
enter = __bpf_prog_enter_sleepable;
exit = __bpf_prog_exit_sleepable;
+ } else if (p->type == BPF_PROG_TYPE_STRUCT_OPS) {
+ enter = __bpf_prog_enter_struct_ops;
+ exit = __bpf_prog_exit_struct_ops;
} else if (p->expected_attach_type == BPF_LSM_CGROUP) {
enter = __bpf_prog_enter_lsm_cgroup;
exit = __bpf_prog_exit_lsm_cgroup;
@@ -2015,13 +2044,14 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
const struct btf_func_model *m, u32 flags,
struct bpf_tramp_links *tlinks,
- void *orig_call)
+ void *func_addr)
{
- int ret, i, nr_args = m->nr_args;
+ int ret, i, nr_args = m->nr_args, extra_nregs = 0;
int regs_off, ip_off, args_off, stack_size = nr_args * 8, run_ctx_off;
struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
+ void *orig_call = func_addr;
u8 **branches = NULL;
u8 *prog;
bool save_ret;
@@ -2030,6 +2060,14 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
if (nr_args > 6)
return -ENOTSUPP;
+ for (i = 0; i < MAX_BPF_FUNC_ARGS; i++) {
+ if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
+ extra_nregs += (m->arg_size[i] + 7) / 8 - 1;
+ }
+ if (nr_args + extra_nregs > 6)
+ return -ENOTSUPP;
+ stack_size += extra_nregs * 8;
+
/* Generated trampoline stack layout:
*
* RBP + 8 [ return address ]
@@ -2042,7 +2080,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
* [ ... ]
* RBP - regs_off [ reg_arg1 ] program's ctx pointer
*
- * RBP - args_off [ args count ] always
+ * RBP - args_off [ arg regs count ] always
*
* RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag
*
@@ -2085,21 +2123,19 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
EMIT1(0x53); /* push rbx */
- /* Store number of arguments of the traced function:
- * mov rax, nr_args
+ /* Store number of argument registers of the traced function:
+ * mov rax, nr_args + extra_nregs
* mov QWORD PTR [rbp - args_off], rax
*/
- emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_args);
+ emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_args + extra_nregs);
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -args_off);
if (flags & BPF_TRAMP_F_IP_ARG) {
/* Store IP address of the traced function:
- * mov rax, QWORD PTR [rbp + 8]
- * sub rax, X86_PATCH_SIZE
+ * movabsq rax, func_addr
* mov QWORD PTR [rbp - ip_off], rax
*/
- emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
- EMIT4(0x48, 0x83, 0xe8, X86_PATCH_SIZE);
+ emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr);
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
}
@@ -2211,7 +2247,7 @@ cleanup:
return ret;
}
-static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
+static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf)
{
u8 *jg_reloc, *prog = *pprog;
int pivot, err, jg_bytes = 1;
@@ -2227,12 +2263,12 @@ static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
progs[a]);
err = emit_cond_near_jump(&prog, /* je func */
- (void *)progs[a], prog,
+ (void *)progs[a], image + (prog - buf),
X86_JE);
if (err)
return err;
- emit_indirect_jump(&prog, 2 /* rdx */, prog);
+ emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf));
*pprog = prog;
return 0;
@@ -2257,7 +2293,7 @@ static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
jg_reloc = prog;
err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */
- progs);
+ progs, image, buf);
if (err)
return err;
@@ -2271,7 +2307,7 @@ static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs)
emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */
- b, progs);
+ b, progs, image, buf);
if (err)
return err;
@@ -2291,12 +2327,12 @@ static int cmp_ips(const void *a, const void *b)
return 0;
}
-int arch_prepare_bpf_dispatcher(void *image, s64 *funcs, int num_funcs)
+int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs)
{
- u8 *prog = image;
+ u8 *prog = buf;
sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
- return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs);
+ return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf);
}
struct x64_jit_data {
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index 12aca34e8db0..4f01e6b17bb9 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -30,7 +30,7 @@ enum bcma_boot_dev {
BCMA_BOOT_DEV_NAND,
};
-/* The 47162a0 hangs when reading MIPS DMP registers registers */
+/* The 47162a0 hangs when reading MIPS DMP registers */
static inline bool bcma_core_mips_bcm47162a0_quirk(struct bcma_device *dev)
{
return dev->bus->chipinfo.id == BCMA_CHIP_ID_BCM47162 &&
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 2a709daefbc4..6cec9ce23fd3 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -2322,6 +2322,7 @@ static struct genl_family nbd_genl_family __ro_after_init = {
.module = THIS_MODULE,
.small_ops = nbd_connect_genl_ops,
.n_small_ops = ARRAY_SIZE(nbd_connect_genl_ops),
+ .resv_start_op = NBD_CMD_STATUS + 1,
.maxattr = NBD_ATTR_MAX,
.policy = nbd_attr_policy,
.mcgrps = nbd_mcast_grps,
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index 818681c89db8..a657e9a3e96a 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -449,6 +449,7 @@ static int btintel_version_info_tlv(struct hci_dev *hdev,
case 0x17: /* TyP */
case 0x18: /* Slr */
case 0x19: /* Slr-F */
+ case 0x1b: /* Mgr */
break;
default:
bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)",
@@ -2330,6 +2331,7 @@ static void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)
case 0x17:
case 0x18:
case 0x19:
+ case 0x1b:
hci_set_msft_opcode(hdev, 0xFC1E);
break;
default:
@@ -2439,15 +2441,20 @@ static int btintel_setup_combined(struct hci_dev *hdev)
INTEL_ROM_LEGACY_NO_WBS_SUPPORT))
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
&hdev->quirks);
+ if (ver.hw_variant == 0x08 && ver.fw_variant == 0x22)
+ set_bit(HCI_QUIRK_VALID_LE_STATES,
+ &hdev->quirks);
err = btintel_legacy_rom_setup(hdev, &ver);
break;
case 0x0b: /* SfP */
- case 0x0c: /* WsP */
case 0x11: /* JfP */
case 0x12: /* ThP */
case 0x13: /* HrP */
case 0x14: /* CcP */
+ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+ fallthrough;
+ case 0x0c: /* WsP */
/* Apply the device specific HCI quirks
*
* All Legacy bootloader devices support WBS
@@ -2455,11 +2462,6 @@ static int btintel_setup_combined(struct hci_dev *hdev)
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
&hdev->quirks);
- /* Valid LE States quirk for JfP/ThP familiy */
- if (ver.hw_variant == 0x11 || ver.hw_variant == 0x12)
- set_bit(HCI_QUIRK_VALID_LE_STATES,
- &hdev->quirks);
-
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev, ver.hw_variant);
@@ -2530,9 +2532,8 @@ static int btintel_setup_combined(struct hci_dev *hdev)
*/
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
- /* Valid LE States quirk for JfP/ThP familiy */
- if (ver.hw_variant == 0x11 || ver.hw_variant == 0x12)
- set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+ /* Set Valid LE States quirk */
+ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
/* Setup MSFT Extension support */
btintel_set_msft_opcode(hdev, ver.hw_variant);
@@ -2542,6 +2543,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)
case 0x17:
case 0x18:
case 0x19:
+ case 0x1b:
/* Display version information of TLV type */
btintel_version_info_tlv(hdev, &ver_tlv);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 15caa6469538..271963805a38 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -426,6 +426,8 @@ static const struct usb_device_id blacklist_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x04ca, 0x4006), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0cb8, 0xc549), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek 8852CE Bluetooth devices */
{ USB_DEVICE(0x04ca, 0x4007), .driver_info = BTUSB_REALTEK |
@@ -438,6 +440,8 @@ static const struct usb_device_id blacklist_table[] = {
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x13d3, 0x3586), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x13d3, 0x3592), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
@@ -466,6 +470,9 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe0c8), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x0489, 0xe0e0), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x04ca, 0x3802), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
@@ -478,9 +485,18 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3567), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x13d3, 0x3578), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x13d3, 0x3583), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x0e8d, 0x0608), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
/* MediaTek MT7922A Bluetooth devices */
{ USB_DEVICE(0x0489, 0xe0d8), .driver_info = BTUSB_MEDIATEK |
@@ -516,19 +532,17 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK },
- /* Additional Realtek 8761B Bluetooth devices */
+ /* Additional Realtek 8761BUV Bluetooth devices */
{ USB_DEVICE(0x2357, 0x0604), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
-
- /* Additional Realtek 8761BU Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x190e), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x2550, 0x8761), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
-
- /* Additional Realtek 8761BUV Bluetooth devices */
{ USB_DEVICE(0x0bda, 0x8771), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x7392, 0xc611), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Additional Realtek 8821AE Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
@@ -2477,15 +2491,29 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,
set_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
+ /* WMT cmd/event doesn't follow up the generic HCI cmd/event handling,
+ * it needs constantly polling control pipe until the host received the
+ * WMT event, thus, we should require to specifically acquire PM counter
+ * on the USB to prevent the interface from entering auto suspended
+ * while WMT cmd/event in progress.
+ */
+ err = usb_autopm_get_interface(data->intf);
+ if (err < 0)
+ goto err_free_wc;
+
err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc);
if (err < 0) {
clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags);
+ usb_autopm_put_interface(data->intf);
goto err_free_wc;
}
/* Submit control IN URB on demand to process the WMT event */
err = btusb_mtk_submit_wmt_recv_urb(hdev);
+
+ usb_autopm_put_interface(data->intf);
+
if (err < 0)
goto err_free_wc;
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index f537673ede17..865112e96ff9 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -493,6 +493,11 @@ static int hci_uart_tty_open(struct tty_struct *tty)
BT_ERR("Can't allocate control structure");
return -ENFILE;
}
+ if (percpu_init_rwsem(&hu->proto_lock)) {
+ BT_ERR("Can't allocate semaphore structure");
+ kfree(hu);
+ return -ENOMEM;
+ }
tty->disc_data = hu;
hu->tty = tty;
@@ -505,8 +510,6 @@ static int hci_uart_tty_open(struct tty_struct *tty)
INIT_WORK(&hu->init_ready, hci_uart_init_work);
INIT_WORK(&hu->write_work, hci_uart_write_work);
- percpu_init_rwsem(&hu->proto_lock);
-
/* Flush any pending characters in the driver */
tty_driver_flush_buffer(tty);
diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c
index c0e5f42ec6b7..f16fd79bc02b 100644
--- a/drivers/bluetooth/hci_serdev.c
+++ b/drivers/bluetooth/hci_serdev.c
@@ -310,11 +310,12 @@ int hci_uart_register_device(struct hci_uart *hu,
serdev_device_set_client_ops(hu->serdev, &hci_serdev_client_ops);
+ if (percpu_init_rwsem(&hu->proto_lock))
+ return -ENOMEM;
+
err = serdev_device_open(hu->serdev);
if (err)
- return err;
-
- percpu_init_rwsem(&hu->proto_lock);
+ goto err_rwsem;
err = p->open(hu);
if (err)
@@ -389,6 +390,8 @@ err_alloc:
p->close(hu);
err_open:
serdev_device_close(hu->serdev);
+err_rwsem:
+ percpu_free_rwsem(&hu->proto_lock);
return err;
}
EXPORT_SYMBOL_GPL(hci_uart_register_device);
@@ -410,5 +413,6 @@ void hci_uart_unregister_device(struct hci_uart *hu)
clear_bit(HCI_UART_PROTO_READY, &hu->flags);
serdev_device_close(hu->serdev);
}
+ percpu_free_rwsem(&hu->proto_lock);
}
EXPORT_SYMBOL_GPL(hci_uart_unregister_device);
diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c
index 9e545f2a5a26..fa2246da63c1 100644
--- a/drivers/bus/mhi/host/pci_generic.c
+++ b/drivers/bus/mhi/host/pci_generic.c
@@ -507,6 +507,8 @@ static const struct mhi_channel_config mhi_telit_fn990_channels[] = {
MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0),
MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0),
MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0),
+ MHI_CHANNEL_CONFIG_UL(92, "DUN2", 32, 1),
+ MHI_CHANNEL_CONFIG_DL(93, "DUN2", 32, 1),
MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2),
MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3),
};
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index d1f652802181..ff5cabe70a2b 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -1311,6 +1311,37 @@ int zynqmp_pm_get_feature_config(enum pm_feature_config_id id,
id, 0, payload);
}
+/**
+ * zynqmp_pm_set_sd_config - PM call to set value of SD config registers
+ * @node: SD node ID
+ * @config: The config type of SD registers
+ * @value: Value to be set
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node, IOCTL_SET_SD_CONFIG,
+ config, value, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_config);
+
+/**
+ * zynqmp_pm_set_gem_config - PM call to set value of GEM config registers
+ * @node: GEM node ID
+ * @config: The config type of GEM registers
+ * @value: Value to be set
+ *
+ * Return: Returns 0 on success or error value on failure.
+ */
+int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config,
+ u32 value)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, node, IOCTL_SET_GEM_CONFIG,
+ config, value, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_set_gem_config);
+
/**
* struct zynqmp_pm_shutdown_scope - Struct for shutdown scope
* @subtype: Shutdown subtype
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index b4dc52392275..9c8a7b206dcf 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -147,6 +147,28 @@ static void pma_cnt_assign(struct ib_pma_portcounters *pma_cnt,
vl_15_dropped);
}
+static int query_ib_ppcnt(struct mlx5_core_dev *dev, u8 port_num, void *out,
+ size_t sz)
+{
+ u32 *in;
+ int err;
+
+ in = kvzalloc(sz, GFP_KERNEL);
+ if (!in) {
+ err = -ENOMEM;
+ return err;
+ }
+
+ MLX5_SET(ppcnt_reg, in, local_port, port_num);
+
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_INFINIBAND_PORT_COUNTERS_GROUP);
+ err = mlx5_core_access_reg(dev, in, sz, out,
+ sz, MLX5_REG_PPCNT, 0, 0);
+
+ kvfree(in);
+ return err;
+}
+
static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
const struct ib_mad *in_mad, struct ib_mad *out_mad)
{
@@ -208,8 +230,7 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num,
goto done;
}
- err = mlx5_core_query_ib_ppcnt(mdev, mdev_port_num,
- out_cnt, sz);
+ err = query_ib_ppcnt(mdev, mdev_port_num, out_cnt, sz);
if (!err)
pma_cnt_assign(pma_cnt, out_cnt);
}
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index e66bf72f1f04..62338f44a30e 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -1541,6 +1541,18 @@ int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
{
+ /*
+ * If the driver is in hash mode and the port_select_flow_table_bypass cap
+ * is supported, it means that the driver no longer needs to assign the port
+ * affinity by default. If a user wants to set the port affinity explicitly,
+ * the user has a dedicated API to do that, so there is no need to assign
+ * the port affinity by default.
+ */
+ if (dev->lag_active &&
+ mlx5_lag_mode_is_hash(dev->mdev) &&
+ MLX5_CAP_PORT_SELECTION(dev->mdev, port_select_flow_table_bypass))
+ return 0;
+
return dev->lag_active ||
(MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index e69c4bf557bf..ae24848af233 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -798,7 +798,7 @@ u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN])
u16 ret;
if (contr == 0) {
- strlcpy(serial, driver_serial, CAPI_SERIAL_LEN);
+ strscpy(serial, driver_serial, CAPI_SERIAL_LEN);
return CAPI_NOERROR;
}
@@ -806,7 +806,7 @@ u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN])
ctr = get_capi_ctr_by_nr(contr);
if (ctr && ctr->state == CAPI_CTR_RUNNING) {
- strlcpy(serial, ctr->serial, CAPI_SERIAL_LEN);
+ strscpy(serial, ctr->serial, CAPI_SERIAL_LEN);
ret = CAPI_NOERROR;
} else
ret = CAPI_REGNOTINSTALLED;
diff --git a/drivers/isdn/mISDN/l1oip.h b/drivers/isdn/mISDN/l1oip.h
index 7ea10db20e3a..48133d022812 100644
--- a/drivers/isdn/mISDN/l1oip.h
+++ b/drivers/isdn/mISDN/l1oip.h
@@ -59,6 +59,7 @@ struct l1oip {
int bundle; /* bundle channels in one frm */
int codec; /* codec to use for transmis. */
int limit; /* limit number of bchannels */
+ bool shutdown; /* if card is released */
/* timer */
struct timer_list keep_tl;
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 2c40412466e6..a77195e378b7 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -275,7 +275,7 @@ l1oip_socket_send(struct l1oip *hc, u8 localcodec, u8 channel, u32 chanmask,
p = frame;
/* restart timer */
- if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ))
+ if (time_before(hc->keep_tl.expires, jiffies + 5 * HZ) && !hc->shutdown)
mod_timer(&hc->keep_tl, jiffies + L1OIP_KEEPALIVE * HZ);
else
hc->keep_tl.expires = jiffies + L1OIP_KEEPALIVE * HZ;
@@ -601,7 +601,9 @@ multiframe:
goto multiframe;
/* restart timer */
- if (time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) || !hc->timeout_on) {
+ if ((time_before(hc->timeout_tl.expires, jiffies + 5 * HZ) ||
+ !hc->timeout_on) &&
+ !hc->shutdown) {
hc->timeout_on = 1;
mod_timer(&hc->timeout_tl, jiffies + L1OIP_TIMEOUT * HZ);
} else /* only adjust timer */
@@ -1232,11 +1234,10 @@ release_card(struct l1oip *hc)
{
int ch;
- if (timer_pending(&hc->keep_tl))
- del_timer(&hc->keep_tl);
+ hc->shutdown = true;
- if (timer_pending(&hc->timeout_tl))
- del_timer(&hc->timeout_tl);
+ del_timer_sync(&hc->keep_tl);
+ del_timer_sync(&hc->timeout_tl);
cancel_work_sync(&hc->workq);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index abb58ab1a1a4..c3dd1fe8d8c9 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -963,6 +963,27 @@ config MFD_MENF21BMC
This driver can also be built as a module. If so the module
will be called menf21bmc.
+config MFD_OCELOT
+ tristate "Microsemi Ocelot External Control Support"
+ depends on SPI_MASTER
+ select MFD_CORE
+ select REGMAP_SPI
+ help
+ Ocelot is a family of networking chips that support multiple ethernet
+ and fibre interfaces. In addition to networking, they contain several
+ other functions, including pinctrl, MDIO, and communication with
+ external chips. While some chips have an internal processor capable of
+ running an OS, others don't. All chips can be controlled externally
+ through different interfaces, including SPI, I2C, and PCIe.
+
+ Say yes here to add support for Ocelot chips (VSC7511, VSC7512,
+ VSC7513, VSC7514) controlled externally.
+
+ To compile this driver as a module, choose M here: the module will be
+ called ocelot-soc.
+
+ If unsure, say N.
+
config EZX_PCAP
bool "Motorola EZXPCAP Support"
depends on SPI_MASTER
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 858cacf659d6..0004b7e86220 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -120,6 +120,9 @@ obj-$(CONFIG_MFD_MC13XXX_I2C) += mc13xxx-i2c.o
obj-$(CONFIG_MFD_CORE) += mfd-core.o
+ocelot-soc-objs := ocelot-core.o ocelot-spi.o
+obj-$(CONFIG_MFD_OCELOT) += ocelot-soc.o
+
obj-$(CONFIG_EZX_PCAP) += ezx-pcap.o
obj-$(CONFIG_MFD_CPCAP) += motorola-cpcap.o
diff --git a/drivers/mfd/ocelot-core.c b/drivers/mfd/ocelot-core.c
new file mode 100644
index 000000000000..1816d52c65c5
--- /dev/null
+++ b/drivers/mfd/ocelot-core.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Core driver for the Ocelot chip family.
+ *
+ * The VSC7511, 7512, 7513, and 7514 can be controlled internally via an
+ * on-chip MIPS processor, or externally via SPI, I2C, PCIe. This core driver is
+ * intended to be the bus-agnostic glue between, for example, the SPI bus and
+ * the child devices.
+ *
+ * Copyright 2021-2022 Innovative Advantage Inc.
+ *
+ * Author: Colin Foster
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include "ocelot.h"
+
+#define REG_GCB_SOFT_RST 0x0008
+
+#define BIT_SOFT_CHIP_RST BIT(0)
+
+#define VSC7512_MIIM0_RES_START 0x7107009c
+#define VSC7512_MIIM1_RES_START 0x710700c0
+#define VSC7512_MIIM_RES_SIZE 0x024
+
+#define VSC7512_PHY_RES_START 0x710700f0
+#define VSC7512_PHY_RES_SIZE 0x004
+
+#define VSC7512_GPIO_RES_START 0x71070034
+#define VSC7512_GPIO_RES_SIZE 0x06c
+
+#define VSC7512_SIO_CTRL_RES_START 0x710700f8
+#define VSC7512_SIO_CTRL_RES_SIZE 0x100
+
+#define VSC7512_GCB_RST_SLEEP_US 100
+#define VSC7512_GCB_RST_TIMEOUT_US 100000
+
+static int ocelot_gcb_chip_rst_status(struct ocelot_ddata *ddata)
+{
+ int val, err;
+
+ err = regmap_read(ddata->gcb_regmap, REG_GCB_SOFT_RST, &val);
+ if (err)
+ return err;
+
+ return val;
+}
+
+int ocelot_chip_reset(struct device *dev)
+{
+ struct ocelot_ddata *ddata = dev_get_drvdata(dev);
+ int ret, val;
+
+ /*
+ * Reset the entire chip here to put it into a completely known state.
+ * Other drivers may want to reset their own subsystems. The register
+ * self-clears, so one write is all that is needed and wait for it to
+ * clear.
+ */
+ ret = regmap_write(ddata->gcb_regmap, REG_GCB_SOFT_RST, BIT_SOFT_CHIP_RST);
+ if (ret)
+ return ret;
+
+ return readx_poll_timeout(ocelot_gcb_chip_rst_status, ddata, val, !val,
+ VSC7512_GCB_RST_SLEEP_US, VSC7512_GCB_RST_TIMEOUT_US);
+}
+EXPORT_SYMBOL_NS(ocelot_chip_reset, MFD_OCELOT);
+
+static const struct resource vsc7512_miim0_resources[] = {
+ DEFINE_RES_REG_NAMED(VSC7512_MIIM0_RES_START, VSC7512_MIIM_RES_SIZE, "gcb_miim0"),
+ DEFINE_RES_REG_NAMED(VSC7512_PHY_RES_START, VSC7512_PHY_RES_SIZE, "gcb_phy"),
+};
+
+static const struct resource vsc7512_miim1_resources[] = {
+ DEFINE_RES_REG_NAMED(VSC7512_MIIM1_RES_START, VSC7512_MIIM_RES_SIZE, "gcb_miim1"),
+};
+
+static const struct resource vsc7512_pinctrl_resources[] = {
+ DEFINE_RES_REG_NAMED(VSC7512_GPIO_RES_START, VSC7512_GPIO_RES_SIZE, "gcb_gpio"),
+};
+
+static const struct resource vsc7512_sgpio_resources[] = {
+ DEFINE_RES_REG_NAMED(VSC7512_SIO_CTRL_RES_START, VSC7512_SIO_CTRL_RES_SIZE, "gcb_sio"),
+};
+
+static const struct mfd_cell vsc7512_devs[] = {
+ {
+ .name = "ocelot-pinctrl",
+ .of_compatible = "mscc,ocelot-pinctrl",
+ .num_resources = ARRAY_SIZE(vsc7512_pinctrl_resources),
+ .resources = vsc7512_pinctrl_resources,
+ }, {
+ .name = "ocelot-sgpio",
+ .of_compatible = "mscc,ocelot-sgpio",
+ .num_resources = ARRAY_SIZE(vsc7512_sgpio_resources),
+ .resources = vsc7512_sgpio_resources,
+ }, {
+ .name = "ocelot-miim0",
+ .of_compatible = "mscc,ocelot-miim",
+ .of_reg = VSC7512_MIIM0_RES_START,
+ .use_of_reg = true,
+ .num_resources = ARRAY_SIZE(vsc7512_miim0_resources),
+ .resources = vsc7512_miim0_resources,
+ }, {
+ .name = "ocelot-miim1",
+ .of_compatible = "mscc,ocelot-miim",
+ .of_reg = VSC7512_MIIM1_RES_START,
+ .use_of_reg = true,
+ .num_resources = ARRAY_SIZE(vsc7512_miim1_resources),
+ .resources = vsc7512_miim1_resources,
+ },
+};
+
+static void ocelot_core_try_add_regmap(struct device *dev,
+ const struct resource *res)
+{
+ if (dev_get_regmap(dev, res->name))
+ return;
+
+ ocelot_spi_init_regmap(dev, res);
+}
+
+static void ocelot_core_try_add_regmaps(struct device *dev,
+ const struct mfd_cell *cell)
+{
+ int i;
+
+ for (i = 0; i < cell->num_resources; i++)
+ ocelot_core_try_add_regmap(dev, &cell->resources[i]);
+}
+
+int ocelot_core_init(struct device *dev)
+{
+ int i, ndevs;
+
+ ndevs = ARRAY_SIZE(vsc7512_devs);
+
+ for (i = 0; i < ndevs; i++)
+ ocelot_core_try_add_regmaps(dev, &vsc7512_devs[i]);
+
+ return devm_mfd_add_devices(dev, PLATFORM_DEVID_AUTO, vsc7512_devs, ndevs, NULL, 0, NULL);
+}
+EXPORT_SYMBOL_NS(ocelot_core_init, MFD_OCELOT);
+
+MODULE_DESCRIPTION("Externally Controlled Ocelot Chip Driver");
+MODULE_AUTHOR("Colin Foster ");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(MFD_OCELOT_SPI);
diff --git a/drivers/mfd/ocelot-spi.c b/drivers/mfd/ocelot-spi.c
new file mode 100644
index 000000000000..0f097f4829d1
--- /dev/null
+++ b/drivers/mfd/ocelot-spi.c
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * SPI core driver for the Ocelot chip family.
+ *
+ * This driver will handle everything necessary to allow for communication over
+ * SPI to the VSC7511, VSC7512, VSC7513 and VSC7514 chips. The main functions
+ * are to prepare the chip's SPI interface for a specific bus speed, and a host
+ * processor's endianness. This will create and distribute regmaps for any
+ * children.
+ *
+ * Copyright 2021-2022 Innovative Advantage Inc.
+ *
+ * Author: Colin Foster
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "ocelot.h"
+
+#define REG_DEV_CPUORG_IF_CTRL 0x0000
+#define REG_DEV_CPUORG_IF_CFGSTAT 0x0004
+
+#define CFGSTAT_IF_NUM_VCORE (0 << 24)
+#define CFGSTAT_IF_NUM_VRAP (1 << 24)
+#define CFGSTAT_IF_NUM_SI (2 << 24)
+#define CFGSTAT_IF_NUM_MIIM (3 << 24)
+
+#define VSC7512_DEVCPU_ORG_RES_START 0x71000000
+#define VSC7512_DEVCPU_ORG_RES_SIZE 0x38
+
+#define VSC7512_CHIP_REGS_RES_START 0x71070000
+#define VSC7512_CHIP_REGS_RES_SIZE 0x14
+
+static const struct resource vsc7512_dev_cpuorg_resource =
+ DEFINE_RES_REG_NAMED(VSC7512_DEVCPU_ORG_RES_START,
+ VSC7512_DEVCPU_ORG_RES_SIZE,
+ "devcpu_org");
+
+static const struct resource vsc7512_gcb_resource =
+ DEFINE_RES_REG_NAMED(VSC7512_CHIP_REGS_RES_START,
+ VSC7512_CHIP_REGS_RES_SIZE,
+ "devcpu_gcb_chip_regs");
+
+static int ocelot_spi_initialize(struct device *dev)
+{
+ struct ocelot_ddata *ddata = dev_get_drvdata(dev);
+ u32 val, check;
+ int err;
+
+ val = OCELOT_SPI_BYTE_ORDER;
+
+ /*
+ * The SPI address must be big-endian, but we want the payload to match
+ * our CPU. These are two bits (0 and 1) but they're repeated such that
+ * the write from any configuration will be valid. The four
+ * configurations are:
+ *
+ * 0b00: little-endian, MSB first
+ * | 111111 | 22221111 | 33222222 |
+ * | 76543210 | 54321098 | 32109876 | 10987654 |
+ *
+ * 0b01: big-endian, MSB first
+ * | 33222222 | 22221111 | 111111 | |
+ * | 10987654 | 32109876 | 54321098 | 76543210 |
+ *
+ * 0b10: little-endian, LSB first
+ * | 111111 | 11112222 | 22222233 |
+ * | 01234567 | 89012345 | 67890123 | 45678901 |
+ *
+ * 0b11: big-endian, LSB first
+ * | 22222233 | 11112222 | 111111 | |
+ * | 45678901 | 67890123 | 89012345 | 01234567 |
+ */
+ err = regmap_write(ddata->cpuorg_regmap, REG_DEV_CPUORG_IF_CTRL, val);
+ if (err)
+ return err;
+
+ /*
+ * Apply the number of padding bytes between a read request and the data
+ * payload. Some registers have access times of up to 1us, so if the
+ * first payload bit is shifted out too quickly, the read will fail.
+ */
+ val = ddata->spi_padding_bytes;
+ err = regmap_write(ddata->cpuorg_regmap, REG_DEV_CPUORG_IF_CFGSTAT, val);
+ if (err)
+ return err;
+
+ /*
+ * After we write the interface configuration, read it back here. This
+ * will verify several different things. The first is that the number of
+ * padding bytes actually got written correctly. These are found in bits
+ * 0:3.
+ *
+ * The second is that bit 16 is cleared. Bit 16 is IF_CFGSTAT:IF_STAT,
+ * and will be set if the register access is too fast. This would be in
+ * the condition that the number of padding bytes is insufficient for
+ * the SPI bus frequency.
+ *
+ * The last check is for bits 31:24, which define the interface by which
+ * the registers are being accessed. Since we're accessing them via the
+ * serial interface, it must return IF_NUM_SI.
+ */
+ check = val | CFGSTAT_IF_NUM_SI;
+
+ err = regmap_read(ddata->cpuorg_regmap, REG_DEV_CPUORG_IF_CFGSTAT, &val);
+ if (err)
+ return err;
+
+ if (check != val)
+ return -ENODEV;
+
+ return 0;
+}
+
+static const struct regmap_config ocelot_spi_regmap_config = {
+ .reg_bits = 24,
+ .reg_stride = 4,
+ .reg_downshift = 2,
+ .val_bits = 32,
+
+ .write_flag_mask = 0x80,
+
+ .use_single_write = true,
+ .can_multi_write = false,
+
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_NATIVE,
+};
+
+static int ocelot_spi_regmap_bus_read(void *context, const void *reg, size_t reg_size,
+ void *val, size_t val_size)
+{
+ struct spi_transfer xfers[3] = {0};
+ struct device *dev = context;
+ struct ocelot_ddata *ddata;
+ struct spi_device *spi;
+ struct spi_message msg;
+ unsigned int index = 0;
+
+ ddata = dev_get_drvdata(dev);
+ spi = to_spi_device(dev);
+
+ xfers[index].tx_buf = reg;
+ xfers[index].len = reg_size;
+ index++;
+
+ if (ddata->spi_padding_bytes) {
+ xfers[index].len = ddata->spi_padding_bytes;
+ xfers[index].tx_buf = ddata->dummy_buf;
+ xfers[index].dummy_data = 1;
+ index++;
+ }
+
+ xfers[index].rx_buf = val;
+ xfers[index].len = val_size;
+ index++;
+
+ spi_message_init_with_transfers(&msg, xfers, index);
+
+ return spi_sync(spi, &msg);
+}
+
+static int ocelot_spi_regmap_bus_write(void *context, const void *data, size_t count)
+{
+ struct device *dev = context;
+ struct spi_device *spi = to_spi_device(dev);
+
+ return spi_write(spi, data, count);
+}
+
+static const struct regmap_bus ocelot_spi_regmap_bus = {
+ .write = ocelot_spi_regmap_bus_write,
+ .read = ocelot_spi_regmap_bus_read,
+};
+
+struct regmap *ocelot_spi_init_regmap(struct device *dev, const struct resource *res)
+{
+ struct regmap_config regmap_config;
+
+ memcpy(®map_config, &ocelot_spi_regmap_config, sizeof(regmap_config));
+
+ regmap_config.name = res->name;
+ regmap_config.max_register = resource_size(res) - 1;
+ regmap_config.reg_base = res->start;
+
+ return devm_regmap_init(dev, &ocelot_spi_regmap_bus, dev, ®map_config);
+}
+EXPORT_SYMBOL_NS(ocelot_spi_init_regmap, MFD_OCELOT_SPI);
+
+static int ocelot_spi_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct ocelot_ddata *ddata;
+ struct regmap *r;
+ int err;
+
+ ddata = devm_kzalloc(dev, sizeof(*ddata), GFP_KERNEL);
+ if (!ddata)
+ return -ENOMEM;
+
+ spi_set_drvdata(spi, ddata);
+
+ if (spi->max_speed_hz <= 500000) {
+ ddata->spi_padding_bytes = 0;
+ } else {
+ /*
+ * Calculation taken from the manual for IF_CFGSTAT:IF_CFG.
+ * Register access time is 1us, so we need to configure and send
+ * out enough padding bytes between the read request and data
+ * transmission that lasts at least 1 microsecond.
+ */
+ ddata->spi_padding_bytes = 1 + (spi->max_speed_hz / HZ_PER_MHZ + 2) / 8;
+
+ ddata->dummy_buf = devm_kzalloc(dev, ddata->spi_padding_bytes, GFP_KERNEL);
+ if (!ddata->dummy_buf)
+ return -ENOMEM;
+ }
+
+ spi->bits_per_word = 8;
+
+ err = spi_setup(spi);
+ if (err)
+ return dev_err_probe(&spi->dev, err, "Error performing SPI setup\n");
+
+ r = ocelot_spi_init_regmap(dev, &vsc7512_dev_cpuorg_resource);
+ if (IS_ERR(r))
+ return PTR_ERR(r);
+
+ ddata->cpuorg_regmap = r;
+
+ r = ocelot_spi_init_regmap(dev, &vsc7512_gcb_resource);
+ if (IS_ERR(r))
+ return PTR_ERR(r);
+
+ ddata->gcb_regmap = r;
+
+ /*
+ * The chip must be set up for SPI before it gets initialized and reset.
+ * This must be done before calling init, and after a chip reset is
+ * performed.
+ */
+ err = ocelot_spi_initialize(dev);
+ if (err)
+ return dev_err_probe(dev, err, "Error initializing SPI bus\n");
+
+ err = ocelot_chip_reset(dev);
+ if (err)
+ return dev_err_probe(dev, err, "Error resetting device\n");
+
+ /*
+ * A chip reset will clear the SPI configuration, so it needs to be done
+ * again before we can access any registers.
+ */
+ err = ocelot_spi_initialize(dev);
+ if (err)
+ return dev_err_probe(dev, err, "Error initializing SPI bus after reset\n");
+
+ err = ocelot_core_init(dev);
+ if (err)
+ return dev_err_probe(dev, err, "Error initializing Ocelot core\n");
+
+ return 0;
+}
+
+static const struct spi_device_id ocelot_spi_ids[] = {
+ { "vsc7512", 0 },
+ { }
+};
+
+static const struct of_device_id ocelot_spi_of_match[] = {
+ { .compatible = "mscc,vsc7512" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ocelot_spi_of_match);
+
+static struct spi_driver ocelot_spi_driver = {
+ .driver = {
+ .name = "ocelot-soc",
+ .of_match_table = ocelot_spi_of_match,
+ },
+ .id_table = ocelot_spi_ids,
+ .probe = ocelot_spi_probe,
+};
+module_spi_driver(ocelot_spi_driver);
+
+MODULE_DESCRIPTION("SPI Controlled Ocelot Chip Driver");
+MODULE_AUTHOR("Colin Foster ");
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_IMPORT_NS(MFD_OCELOT);
diff --git a/drivers/mfd/ocelot.h b/drivers/mfd/ocelot.h
new file mode 100644
index 000000000000..b8bc2f1486e2
--- /dev/null
+++ b/drivers/mfd/ocelot.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/* Copyright 2021, 2022 Innovative Advantage Inc. */
+
+#ifndef _MFD_OCELOT_H
+#define _MFD_OCELOT_H
+
+#include
+
+struct device;
+struct regmap;
+struct resource;
+
+/**
+ * struct ocelot_ddata - Private data for an external Ocelot chip
+ * @gcb_regmap: General Configuration Block regmap. Used for
+ * operations like chip reset.
+ * @cpuorg_regmap: CPU Device Origin Block regmap. Used for operations
+ * like SPI bus configuration.
+ * @spi_padding_bytes: Number of padding bytes that must be thrown out before
+ * read data gets returned. This is calculated during
+ * initialization based on bus speed.
+ * @dummy_buf: Zero-filled buffer of spi_padding_bytes size. The dummy
+ * bytes that will be sent out between the address and
+ * data of a SPI read operation.
+ */
+struct ocelot_ddata {
+ struct regmap *gcb_regmap;
+ struct regmap *cpuorg_regmap;
+ int spi_padding_bytes;
+ void *dummy_buf;
+};
+
+int ocelot_chip_reset(struct device *dev);
+int ocelot_core_init(struct device *dev);
+
+/* SPI-specific routines that won't be necessary for other interfaces */
+struct regmap *ocelot_spi_init_regmap(struct device *dev,
+ const struct resource *res);
+
+#define OCELOT_SPI_BYTE_ORDER_LE 0x00000000
+#define OCELOT_SPI_BYTE_ORDER_BE 0x81818181
+
+#ifdef __LITTLE_ENDIAN
+#define OCELOT_SPI_BYTE_ORDER OCELOT_SPI_BYTE_ORDER_LE
+#else
+#define OCELOT_SPI_BYTE_ORDER OCELOT_SPI_BYTE_ORDER_BE
+#endif
+
+#endif
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 94c889802566..15d4a38b1351 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -500,6 +500,8 @@ config NET_SB1000
source "drivers/net/phy/Kconfig"
+source "drivers/net/pse-pd/Kconfig"
+
source "drivers/net/can/Kconfig"
source "drivers/net/mctp/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 3f1192d3c52d..6ce076462dbf 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_NET) += loopback.o
obj-$(CONFIG_NETDEV_LEGACY_INIT) += Space.o
obj-$(CONFIG_NETCONSOLE) += netconsole.o
obj-y += phy/
+obj-y += pse-pd/
obj-y += mdio/
obj-y += pcs/
obj-$(CONFIG_RIONET) += rionet.o
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index f475eef14390..83214e2e70ab 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -68,7 +68,7 @@ static int netdev_boot_setup_add(char *name, struct ifmap *map)
for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
memset(s[i].name, 0, sizeof(s[i].name));
- strlcpy(s[i].name, name, IFNAMSIZ);
+ strscpy(s[i].name, name, IFNAMSIZ);
memcpy(&s[i].map, map, sizeof(s[i].map));
break;
}
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
index 9a247eb7679c..2d20be6ffb7e 100644
--- a/drivers/net/amt.c
+++ b/drivers/net/amt.c
@@ -2894,8 +2894,7 @@ static void amt_event_work(struct work_struct *work)
amt_event_send_request(amt);
break;
default:
- if (skb)
- kfree_skb(skb);
+ kfree_skb(skb);
break;
}
}
@@ -3033,8 +3032,7 @@ static int amt_dev_stop(struct net_device *dev)
cancel_work_sync(&amt->event_wq);
for (i = 0; i < AMT_MAX_EVENTS; i++) {
skb = amt->events[i].skb;
- if (skb)
- kfree_skb(skb);
+ kfree_skb(skb);
amt->events[i].event = AMT_EVENT_NONE;
amt->events[i].skb = NULL;
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 86d42306aa5e..24bb50dfd362 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -5650,7 +5650,7 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d",
BOND_ABI_VERSION);
}
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 9b5a5df23d21..8996bd0a194a 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -47,10 +47,10 @@ static ssize_t bonding_show_bonds(struct class *cls,
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
res = PAGE_SIZE - 10;
- res += sprintf(buf + res, "++more++ ");
+ res += sysfs_emit_at(buf, res, "++more++ ");
break;
}
- res += sprintf(buf + res, "%s ", bond->dev->name);
+ res += sysfs_emit_at(buf, res, "%s ", bond->dev->name);
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
@@ -178,10 +178,10 @@ static ssize_t bonding_show_slaves(struct device *d,
/* not enough space for another interface name */
if ((PAGE_SIZE - res) > 10)
res = PAGE_SIZE - 10;
- res += sprintf(buf + res, "++more++ ");
+ res += sysfs_emit_at(buf, res, "++more++ ");
break;
}
- res += sprintf(buf + res, "%s ", slave->dev->name);
+ res += sysfs_emit_at(buf, res, "%s ", slave->dev->name);
}
rtnl_unlock();
@@ -203,7 +203,7 @@ static ssize_t bonding_show_mode(struct device *d,
val = bond_opt_get_val(BOND_OPT_MODE, BOND_MODE(bond));
- return sprintf(buf, "%s %d\n", val->string, BOND_MODE(bond));
+ return sysfs_emit(buf, "%s %d\n", val->string, BOND_MODE(bond));
}
static DEVICE_ATTR(mode, 0644, bonding_show_mode, bonding_sysfs_store_option);
@@ -217,7 +217,7 @@ static ssize_t bonding_show_xmit_hash(struct device *d,
val = bond_opt_get_val(BOND_OPT_XMIT_HASH, bond->params.xmit_policy);
- return sprintf(buf, "%s %d\n", val->string, bond->params.xmit_policy);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.xmit_policy);
}
static DEVICE_ATTR(xmit_hash_policy, 0644,
bonding_show_xmit_hash, bonding_sysfs_store_option);
@@ -233,7 +233,7 @@ static ssize_t bonding_show_arp_validate(struct device *d,
val = bond_opt_get_val(BOND_OPT_ARP_VALIDATE,
bond->params.arp_validate);
- return sprintf(buf, "%s %d\n", val->string, bond->params.arp_validate);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.arp_validate);
}
static DEVICE_ATTR(arp_validate, 0644, bonding_show_arp_validate,
bonding_sysfs_store_option);
@@ -248,7 +248,7 @@ static ssize_t bonding_show_arp_all_targets(struct device *d,
val = bond_opt_get_val(BOND_OPT_ARP_ALL_TARGETS,
bond->params.arp_all_targets);
- return sprintf(buf, "%s %d\n",
+ return sysfs_emit(buf, "%s %d\n",
val->string, bond->params.arp_all_targets);
}
static DEVICE_ATTR(arp_all_targets, 0644,
@@ -265,7 +265,7 @@ static ssize_t bonding_show_fail_over_mac(struct device *d,
val = bond_opt_get_val(BOND_OPT_FAIL_OVER_MAC,
bond->params.fail_over_mac);
- return sprintf(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.fail_over_mac);
}
static DEVICE_ATTR(fail_over_mac, 0644,
bonding_show_fail_over_mac, bonding_sysfs_store_option);
@@ -277,7 +277,7 @@ static ssize_t bonding_show_arp_interval(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.arp_interval);
+ return sysfs_emit(buf, "%d\n", bond->params.arp_interval);
}
static DEVICE_ATTR(arp_interval, 0644,
bonding_show_arp_interval, bonding_sysfs_store_option);
@@ -292,8 +292,8 @@ static ssize_t bonding_show_arp_targets(struct device *d,
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) {
if (bond->params.arp_targets[i])
- res += sprintf(buf + res, "%pI4 ",
- &bond->params.arp_targets[i]);
+ res += sysfs_emit_at(buf, res, "%pI4 ",
+ &bond->params.arp_targets[i]);
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
@@ -310,7 +310,7 @@ static ssize_t bonding_show_missed_max(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%u\n", bond->params.missed_max);
+ return sysfs_emit(buf, "%u\n", bond->params.missed_max);
}
static DEVICE_ATTR(arp_missed_max, 0644,
bonding_show_missed_max, bonding_sysfs_store_option);
@@ -322,7 +322,7 @@ static ssize_t bonding_show_downdelay(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon);
+ return sysfs_emit(buf, "%d\n", bond->params.downdelay * bond->params.miimon);
}
static DEVICE_ATTR(downdelay, 0644,
bonding_show_downdelay, bonding_sysfs_store_option);
@@ -333,7 +333,7 @@ static ssize_t bonding_show_updelay(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon);
+ return sysfs_emit(buf, "%d\n", bond->params.updelay * bond->params.miimon);
}
static DEVICE_ATTR(updelay, 0644,
@@ -345,8 +345,8 @@ static ssize_t bonding_show_peer_notif_delay(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n",
- bond->params.peer_notif_delay * bond->params.miimon);
+ return sysfs_emit(buf, "%d\n",
+ bond->params.peer_notif_delay * bond->params.miimon);
}
static DEVICE_ATTR(peer_notif_delay, 0644,
bonding_show_peer_notif_delay, bonding_sysfs_store_option);
@@ -361,7 +361,7 @@ static ssize_t bonding_show_lacp_active(struct device *d,
val = bond_opt_get_val(BOND_OPT_LACP_ACTIVE, bond->params.lacp_active);
- return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_active);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.lacp_active);
}
static DEVICE_ATTR(lacp_active, 0644,
bonding_show_lacp_active, bonding_sysfs_store_option);
@@ -375,7 +375,7 @@ static ssize_t bonding_show_lacp_rate(struct device *d,
val = bond_opt_get_val(BOND_OPT_LACP_RATE, bond->params.lacp_fast);
- return sprintf(buf, "%s %d\n", val->string, bond->params.lacp_fast);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.lacp_fast);
}
static DEVICE_ATTR(lacp_rate, 0644,
bonding_show_lacp_rate, bonding_sysfs_store_option);
@@ -386,7 +386,7 @@ static ssize_t bonding_show_min_links(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%u\n", bond->params.min_links);
+ return sysfs_emit(buf, "%u\n", bond->params.min_links);
}
static DEVICE_ATTR(min_links, 0644,
bonding_show_min_links, bonding_sysfs_store_option);
@@ -400,7 +400,7 @@ static ssize_t bonding_show_ad_select(struct device *d,
val = bond_opt_get_val(BOND_OPT_AD_SELECT, bond->params.ad_select);
- return sprintf(buf, "%s %d\n", val->string, bond->params.ad_select);
+ return sysfs_emit(buf, "%s %d\n", val->string, bond->params.ad_select);
}
static DEVICE_ATTR(ad_select, 0644,
bonding_show_ad_select, bonding_sysfs_store_option);
@@ -412,7 +412,7 @@ static ssize_t bonding_show_num_peer_notif(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.num_peer_notif);
+ return sysfs_emit(buf, "%d\n", bond->params.num_peer_notif);
}
static DEVICE_ATTR(num_grat_arp, 0644,
bonding_show_num_peer_notif, bonding_sysfs_store_option);
@@ -426,7 +426,7 @@ static ssize_t bonding_show_miimon(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.miimon);
+ return sysfs_emit(buf, "%d\n", bond->params.miimon);
}
static DEVICE_ATTR(miimon, 0644,
bonding_show_miimon, bonding_sysfs_store_option);
@@ -443,7 +443,7 @@ static ssize_t bonding_show_primary(struct device *d,
rcu_read_lock();
primary = rcu_dereference(bond->primary_slave);
if (primary)
- count = sprintf(buf, "%s\n", primary->dev->name);
+ count = sysfs_emit(buf, "%s\n", primary->dev->name);
rcu_read_unlock();
return count;
@@ -462,8 +462,8 @@ static ssize_t bonding_show_primary_reselect(struct device *d,
val = bond_opt_get_val(BOND_OPT_PRIMARY_RESELECT,
bond->params.primary_reselect);
- return sprintf(buf, "%s %d\n",
- val->string, bond->params.primary_reselect);
+ return sysfs_emit(buf, "%s %d\n",
+ val->string, bond->params.primary_reselect);
}
static DEVICE_ATTR(primary_reselect, 0644,
bonding_show_primary_reselect, bonding_sysfs_store_option);
@@ -475,7 +475,7 @@ static ssize_t bonding_show_carrier(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.use_carrier);
+ return sysfs_emit(buf, "%d\n", bond->params.use_carrier);
}
static DEVICE_ATTR(use_carrier, 0644,
bonding_show_carrier, bonding_sysfs_store_option);
@@ -493,7 +493,7 @@ static ssize_t bonding_show_active_slave(struct device *d,
rcu_read_lock();
slave_dev = bond_option_active_slave_get_rcu(bond);
if (slave_dev)
- count = sprintf(buf, "%s\n", slave_dev->name);
+ count = sysfs_emit(buf, "%s\n", slave_dev->name);
rcu_read_unlock();
return count;
@@ -509,7 +509,7 @@ static ssize_t bonding_show_mii_status(struct device *d,
struct bonding *bond = to_bond(d);
bool active = netif_carrier_ok(bond->dev);
- return sprintf(buf, "%s\n", active ? "up" : "down");
+ return sysfs_emit(buf, "%s\n", active ? "up" : "down");
}
static DEVICE_ATTR(mii_status, 0444, bonding_show_mii_status, NULL);
@@ -524,9 +524,9 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n",
- bond_3ad_get_active_agg_info(bond, &ad_info)
- ? 0 : ad_info.aggregator_id);
+ count = sysfs_emit(buf, "%d\n",
+ bond_3ad_get_active_agg_info(bond, &ad_info)
+ ? 0 : ad_info.aggregator_id);
}
return count;
@@ -545,9 +545,9 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n",
- bond_3ad_get_active_agg_info(bond, &ad_info)
- ? 0 : ad_info.ports);
+ count = sysfs_emit(buf, "%d\n",
+ bond_3ad_get_active_agg_info(bond, &ad_info)
+ ? 0 : ad_info.ports);
}
return count;
@@ -566,9 +566,9 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n",
- bond_3ad_get_active_agg_info(bond, &ad_info)
- ? 0 : ad_info.actor_key);
+ count = sysfs_emit(buf, "%d\n",
+ bond_3ad_get_active_agg_info(bond, &ad_info)
+ ? 0 : ad_info.actor_key);
}
return count;
@@ -587,9 +587,9 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN)) {
struct ad_info ad_info;
- count = sprintf(buf, "%d\n",
- bond_3ad_get_active_agg_info(bond, &ad_info)
- ? 0 : ad_info.partner_key);
+ count = sysfs_emit(buf, "%d\n",
+ bond_3ad_get_active_agg_info(bond, &ad_info)
+ ? 0 : ad_info.partner_key);
}
return count;
@@ -609,7 +609,7 @@ static ssize_t bonding_show_ad_partner_mac(struct device *d,
struct ad_info ad_info;
if (!bond_3ad_get_active_agg_info(bond, &ad_info))
- count = sprintf(buf, "%pM\n", ad_info.partner_system);
+ count = sysfs_emit(buf, "%pM\n", ad_info.partner_system);
}
return count;
@@ -634,11 +634,11 @@ static ssize_t bonding_show_queue_id(struct device *d,
/* not enough space for another interface_name:queue_id pair */
if ((PAGE_SIZE - res) > 10)
res = PAGE_SIZE - 10;
- res += sprintf(buf + res, "++more++ ");
+ res += sysfs_emit_at(buf, res, "++more++ ");
break;
}
- res += sprintf(buf + res, "%s:%d ",
- slave->dev->name, slave->queue_id);
+ res += sysfs_emit_at(buf, res, "%s:%d ",
+ slave->dev->name, slave->queue_id);
}
if (res)
buf[res-1] = '\n'; /* eat the leftover space */
@@ -658,7 +658,7 @@ static ssize_t bonding_show_slaves_active(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.all_slaves_active);
+ return sysfs_emit(buf, "%d\n", bond->params.all_slaves_active);
}
static DEVICE_ATTR(all_slaves_active, 0644,
bonding_show_slaves_active, bonding_sysfs_store_option);
@@ -670,7 +670,7 @@ static ssize_t bonding_show_resend_igmp(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.resend_igmp);
+ return sysfs_emit(buf, "%d\n", bond->params.resend_igmp);
}
static DEVICE_ATTR(resend_igmp, 0644,
bonding_show_resend_igmp, bonding_sysfs_store_option);
@@ -682,7 +682,7 @@ static ssize_t bonding_show_lp_interval(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.lp_interval);
+ return sysfs_emit(buf, "%d\n", bond->params.lp_interval);
}
static DEVICE_ATTR(lp_interval, 0644,
bonding_show_lp_interval, bonding_sysfs_store_option);
@@ -693,7 +693,7 @@ static ssize_t bonding_show_tlb_dynamic_lb(struct device *d,
{
struct bonding *bond = to_bond(d);
- return sprintf(buf, "%d\n", bond->params.tlb_dynamic_lb);
+ return sysfs_emit(buf, "%d\n", bond->params.tlb_dynamic_lb);
}
static DEVICE_ATTR(tlb_dynamic_lb, 0644,
bonding_show_tlb_dynamic_lb, bonding_sysfs_store_option);
@@ -705,7 +705,7 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
struct bonding *bond = to_bond(d);
unsigned int packets_per_slave = bond->params.packets_per_slave;
- return sprintf(buf, "%u\n", packets_per_slave);
+ return sysfs_emit(buf, "%u\n", packets_per_slave);
}
static DEVICE_ATTR(packets_per_slave, 0644,
bonding_show_packets_per_slave, bonding_sysfs_store_option);
@@ -717,7 +717,7 @@ static ssize_t bonding_show_ad_actor_sys_prio(struct device *d,
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN))
- return sprintf(buf, "%hu\n", bond->params.ad_actor_sys_prio);
+ return sysfs_emit(buf, "%hu\n", bond->params.ad_actor_sys_prio);
return 0;
}
@@ -731,7 +731,7 @@ static ssize_t bonding_show_ad_actor_system(struct device *d,
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN))
- return sprintf(buf, "%pM\n", bond->params.ad_actor_system);
+ return sysfs_emit(buf, "%pM\n", bond->params.ad_actor_system);
return 0;
}
@@ -746,7 +746,7 @@ static ssize_t bonding_show_ad_user_port_key(struct device *d,
struct bonding *bond = to_bond(d);
if (BOND_MODE(bond) == BOND_MODE_8023AD && capable(CAP_NET_ADMIN))
- return sprintf(buf, "%hu\n", bond->params.ad_user_port_key);
+ return sysfs_emit(buf, "%hu\n", bond->params.ad_user_port_key);
return 0;
}
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 69b0a3751dff..313866f2c0e4 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -22,30 +22,30 @@ static ssize_t state_show(struct slave *slave, char *buf)
{
switch (bond_slave_state(slave)) {
case BOND_STATE_ACTIVE:
- return sprintf(buf, "active\n");
+ return sysfs_emit(buf, "active\n");
case BOND_STATE_BACKUP:
- return sprintf(buf, "backup\n");
+ return sysfs_emit(buf, "backup\n");
default:
- return sprintf(buf, "UNKNOWN\n");
+ return sysfs_emit(buf, "UNKNOWN\n");
}
}
static SLAVE_ATTR_RO(state);
static ssize_t mii_status_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%s\n", bond_slave_link_status(slave->link));
+ return sysfs_emit(buf, "%s\n", bond_slave_link_status(slave->link));
}
static SLAVE_ATTR_RO(mii_status);
static ssize_t link_failure_count_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%d\n", slave->link_failure_count);
+ return sysfs_emit(buf, "%d\n", slave->link_failure_count);
}
static SLAVE_ATTR_RO(link_failure_count);
static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%*phC\n",
+ return sysfs_emit(buf, "%*phC\n",
slave->dev->addr_len,
slave->perm_hwaddr);
}
@@ -53,7 +53,7 @@ static SLAVE_ATTR_RO(perm_hwaddr);
static ssize_t queue_id_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%d\n", slave->queue_id);
+ return sysfs_emit(buf, "%d\n", slave->queue_id);
}
static SLAVE_ATTR_RO(queue_id);
@@ -64,11 +64,11 @@ static ssize_t ad_aggregator_id_show(struct slave *slave, char *buf)
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
agg = SLAVE_AD_INFO(slave)->port.aggregator;
if (agg)
- return sprintf(buf, "%d\n",
- agg->aggregator_identifier);
+ return sysfs_emit(buf, "%d\n",
+ agg->aggregator_identifier);
}
- return sprintf(buf, "N/A\n");
+ return sysfs_emit(buf, "N/A\n");
}
static SLAVE_ATTR_RO(ad_aggregator_id);
@@ -79,11 +79,11 @@ static ssize_t ad_actor_oper_port_state_show(struct slave *slave, char *buf)
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
ad_port = &SLAVE_AD_INFO(slave)->port;
if (ad_port->aggregator)
- return sprintf(buf, "%u\n",
+ return sysfs_emit(buf, "%u\n",
ad_port->actor_oper_port_state);
}
- return sprintf(buf, "N/A\n");
+ return sysfs_emit(buf, "N/A\n");
}
static SLAVE_ATTR_RO(ad_actor_oper_port_state);
@@ -94,11 +94,11 @@ static ssize_t ad_partner_oper_port_state_show(struct slave *slave, char *buf)
if (BOND_MODE(slave->bond) == BOND_MODE_8023AD) {
ad_port = &SLAVE_AD_INFO(slave)->port;
if (ad_port->aggregator)
- return sprintf(buf, "%u\n",
+ return sysfs_emit(buf, "%u\n",
ad_port->partner_oper.port_state);
}
- return sprintf(buf, "N/A\n");
+ return sysfs_emit(buf, "N/A\n");
}
static SLAVE_ATTR_RO(ad_partner_oper_port_state);
diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c
index 3c18d028bd8c..b8da15ea6ad9 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_base.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_base.c
@@ -657,7 +657,6 @@ static void ctucan_read_rx_frame(struct ctucan_priv *priv, struct canfd_frame *c
cf->can_id = (idw >> 18) & CAN_SFF_MASK;
/* BRS, ESI, RTR Flags */
- cf->flags = 0;
if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw)) {
if (FIELD_GET(REG_FRAME_FORMAT_W_BRS, ffw))
cf->flags |= CANFD_BRS;
@@ -1425,7 +1424,7 @@ int ctucan_probe_common(struct device *dev, void __iomem *addr, int irq, unsigne
priv->can.clock.freq = can_clk_rate;
- netif_napi_add(ndev, &priv->napi, ctucan_rx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, ctucan_rx_poll);
ret = register_candev(ndev);
if (ret) {
diff --git a/drivers/net/can/ctucanfd/ctucanfd_platform.c b/drivers/net/can/ctucanfd/ctucanfd_platform.c
index 89d54c2151e1..f83684f006ea 100644
--- a/drivers/net/can/ctucanfd/ctucanfd_platform.c
+++ b/drivers/net/can/ctucanfd/ctucanfd_platform.c
@@ -58,7 +58,6 @@ static int ctucan_platform_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
addr = devm_ioremap_resource(dev, res);
if (IS_ERR(addr)) {
- dev_err(dev, "Cannot remap address.\n");
ret = PTR_ERR(addr);
goto err;
}
diff --git a/drivers/net/can/dev/rx-offload.c b/drivers/net/can/dev/rx-offload.c
index a32a01c172d4..81ebf0562c89 100644
--- a/drivers/net/can/dev/rx-offload.c
+++ b/drivers/net/can/dev/rx-offload.c
@@ -247,7 +247,7 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
struct net_device *dev = offload->dev;
struct net_device_stats *stats = &dev->stats;
struct sk_buff *skb;
- u8 len;
+ unsigned int len;
int err;
skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
@@ -329,7 +329,7 @@ static int can_rx_offload_init_queue(struct net_device *dev,
{
offload->dev = dev;
- /* Limit queue len to 4x the weight (rounted to next power of two) */
+ /* Limit queue len to 4x the weight (rounded to next power of two) */
offload->skb_queue_len_max = 2 << fls(weight);
offload->skb_queue_len_max *= 4;
skb_queue_head_init(&offload->skb_queue);
diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c
index 07e0feac8629..791a51e2f5d6 100644
--- a/drivers/net/can/dev/skb.c
+++ b/drivers/net/can/dev/skb.c
@@ -91,8 +91,8 @@ int can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
EXPORT_SYMBOL_GPL(can_put_echo_skb);
struct sk_buff *
-__can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr,
- unsigned int *frame_len_ptr)
+__can_get_echo_skb(struct net_device *dev, unsigned int idx,
+ unsigned int *len_ptr, unsigned int *frame_len_ptr)
{
struct can_priv *priv = netdev_priv(dev);
@@ -108,16 +108,12 @@ __can_get_echo_skb(struct net_device *dev, unsigned int idx, u8 *len_ptr,
*/
struct sk_buff *skb = priv->echo_skb[idx];
struct can_skb_priv *can_skb_priv = can_skb_prv(skb);
- struct canfd_frame *cf = (struct canfd_frame *)skb->data;
if (skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)
skb_tstamp_tx(skb, skb_hwtstamps(skb));
/* get the real payload length for netdev statistics */
- if (cf->can_id & CAN_RTR_FLAG)
- *len_ptr = 0;
- else
- *len_ptr = cf->len;
+ *len_ptr = can_skb_get_data_len(skb);
if (frame_len_ptr)
*frame_len_ptr = can_skb_priv->frame_len;
@@ -147,7 +143,7 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx,
unsigned int *frame_len_ptr)
{
struct sk_buff *skb;
- u8 len;
+ unsigned int len;
skb = __can_get_echo_skb(dev, idx, &len, frame_len_ptr);
if (!skb)
@@ -191,6 +187,20 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx,
}
EXPORT_SYMBOL_GPL(can_free_echo_skb);
+/* fill common values for CAN sk_buffs */
+static void init_can_skb_reserve(struct sk_buff *skb)
+{
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->skbcnt = 0;
+}
+
struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
{
struct sk_buff *skb;
@@ -204,16 +214,8 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
}
skb->protocol = htons(ETH_P_CAN);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
-
- can_skb_reserve(skb);
+ init_can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
*cf = skb_put_zero(skb, sizeof(struct can_frame));
@@ -235,23 +237,51 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev,
}
skb->protocol = htons(ETH_P_CANFD);
- skb->pkt_type = PACKET_BROADCAST;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
-
- skb_reset_mac_header(skb);
- skb_reset_network_header(skb);
- skb_reset_transport_header(skb);
-
- can_skb_reserve(skb);
+ init_can_skb_reserve(skb);
can_skb_prv(skb)->ifindex = dev->ifindex;
- can_skb_prv(skb)->skbcnt = 0;
*cfd = skb_put_zero(skb, sizeof(struct canfd_frame));
+ /* set CAN FD flag by default */
+ (*cfd)->flags = CANFD_FDF;
+
return skb;
}
EXPORT_SYMBOL_GPL(alloc_canfd_skb);
+struct sk_buff *alloc_canxl_skb(struct net_device *dev,
+ struct canxl_frame **cxl,
+ unsigned int data_len)
+{
+ struct sk_buff *skb;
+
+ if (data_len < CANXL_MIN_DLEN || data_len > CANXL_MAX_DLEN)
+ goto out_error;
+
+ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+ CANXL_HDR_SIZE + data_len);
+ if (unlikely(!skb))
+ goto out_error;
+
+ skb->protocol = htons(ETH_P_CANXL);
+ init_can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+
+ *cxl = skb_put_zero(skb, CANXL_HDR_SIZE + data_len);
+
+ /* set CAN XL flag and length information by default */
+ (*cxl)->flags = CANXL_XLF;
+ (*cxl)->len = data_len;
+
+ return skb;
+
+out_error:
+ *cxl = NULL;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(alloc_canxl_skb);
+
struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
{
struct sk_buff *skb;
@@ -291,6 +321,14 @@ static bool can_skb_headroom_valid(struct net_device *dev, struct sk_buff *skb)
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb_reset_transport_header(skb);
+
+ /* set CANFD_FDF flag for CAN FD frames */
+ if (can_is_canfd_skb(skb)) {
+ struct canfd_frame *cfd;
+
+ cfd = (struct canfd_frame *)skb->data;
+ cfd->flags |= CANFD_FDF;
+ }
}
return true;
@@ -299,18 +337,25 @@ static bool can_skb_headroom_valid(struct net_device *dev, struct sk_buff *skb)
/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb)
{
- const struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct can_priv *priv = netdev_priv(dev);
- if (skb->protocol == htons(ETH_P_CAN)) {
- if (unlikely(skb->len != CAN_MTU ||
- cfd->len > CAN_MAX_DLEN))
+ switch (ntohs(skb->protocol)) {
+ case ETH_P_CAN:
+ if (!can_is_can_skb(skb))
goto inval_skb;
- } else if (skb->protocol == htons(ETH_P_CANFD)) {
- if (unlikely(skb->len != CANFD_MTU ||
- cfd->len > CANFD_MAX_DLEN))
+ break;
+
+ case ETH_P_CANFD:
+ if (!can_is_canfd_skb(skb))
goto inval_skb;
- } else {
+ break;
+
+ case ETH_P_CANXL:
+ if (!can_is_canxl_skb(skb))
+ goto inval_skb;
+ break;
+
+ default:
goto inval_skb;
}
diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c
index ccb438eca517..5ee38e586fd8 100644
--- a/drivers/net/can/flexcan/flexcan-core.c
+++ b/drivers/net/can/flexcan/flexcan-core.c
@@ -295,45 +295,45 @@ static_assert(sizeof(struct flexcan_regs) == 0x4 * 18 + 0xfb8);
static const struct flexcan_devtype_data fsl_mcf5441x_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE |
FLEXCAN_QUIRK_NR_IRQ_3 | FLEXCAN_QUIRK_NR_MB_16 |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_FIFO,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
FLEXCAN_QUIRK_BROKEN_PERR_STATE |
FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_FIFO,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_imx25_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_FIFO,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_imx28_devtype_data = {
.quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_FIFO,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO,
};
static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_imx8qm_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static struct flexcan_devtype_data fsl_imx8mp_devtype_data = {
@@ -341,23 +341,23 @@ static struct flexcan_devtype_data fsl_imx8mp_devtype_data = {
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR |
FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX |
FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_ECC |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = {
.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = {
@@ -365,8 +365,8 @@ static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = {
FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE |
FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_FD |
FLEXCAN_QUIRK_SUPPORT_ECC |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR,
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR,
};
static const struct can_bittiming_const flexcan_bittiming_const = {
@@ -2085,20 +2085,20 @@ static int flexcan_probe(struct platform_device *pdev)
if ((devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) &&
!((devtype_data->quirks &
(FLEXCAN_QUIRK_USE_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR |
- FLEXCAN_QUIRK_SUPPPORT_RX_FIFO)) ==
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR |
+ FLEXCAN_QUIRK_SUPPORT_RX_FIFO)) ==
(FLEXCAN_QUIRK_USE_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR))) {
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR))) {
dev_err(&pdev->dev, "CAN-FD mode doesn't work in RX-FIFO mode!\n");
return -EINVAL;
}
if ((devtype_data->quirks &
- (FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR)) ==
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR) {
+ (FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR)) ==
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR) {
dev_err(&pdev->dev,
"Quirks (0x%08x) inconsistent: RX_MAILBOX_RX supported but not RX_MAILBOX\n",
devtype_data->quirks);
@@ -2177,8 +2177,7 @@ static int flexcan_probe(struct platform_device *pdev)
err = flexcan_setup_stop_mode(pdev);
if (err < 0) {
- if (err != -EPROBE_DEFER)
- dev_err(&pdev->dev, "setup stop mode failed\n");
+ dev_err_probe(&pdev->dev, err, "setup stop mode failed\n");
goto failed_setup_stop_mode;
}
diff --git a/drivers/net/can/flexcan/flexcan.h b/drivers/net/can/flexcan/flexcan.h
index 8621a8ea1dea..025c3417031f 100644
--- a/drivers/net/can/flexcan/flexcan.h
+++ b/drivers/net/can/flexcan/flexcan.h
@@ -63,11 +63,11 @@
/* Setup 16 mailboxes */
#define FLEXCAN_QUIRK_NR_MB_16 BIT(13)
/* Device supports RX via mailboxes */
-#define FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX BIT(14)
+#define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX BIT(14)
/* Device supports RTR reception via mailboxes */
-#define FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR BIT(15)
+#define FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR BIT(15)
/* Device supports RX via FIFO */
-#define FLEXCAN_QUIRK_SUPPPORT_RX_FIFO BIT(16)
+#define FLEXCAN_QUIRK_SUPPORT_RX_FIFO BIT(16)
struct flexcan_devtype_data {
u32 quirks; /* quirks needed for different IP cores */
@@ -121,7 +121,7 @@ flexcan_supports_rx_mailbox(const struct flexcan_priv *priv)
{
const u32 quirks = priv->devtype_data.quirks;
- return quirks & FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX;
+ return quirks & FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX;
}
static inline bool
@@ -129,10 +129,10 @@ flexcan_supports_rx_mailbox_rtr(const struct flexcan_priv *priv)
{
const u32 quirks = priv->devtype_data.quirks;
- return (quirks & (FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR)) ==
- (FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX |
- FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR);
+ return (quirks & (FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR)) ==
+ (FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX |
+ FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR);
}
static inline bool
@@ -140,7 +140,7 @@ flexcan_supports_rx_fifo(const struct flexcan_priv *priv)
{
const u32 quirks = priv->devtype_data.quirks;
- return quirks & FLEXCAN_QUIRK_SUPPPORT_RX_FIFO;
+ return quirks & FLEXCAN_QUIRK_SUPPORT_RX_FIFO;
}
static inline bool
@@ -149,7 +149,7 @@ flexcan_active_rx_rtr(const struct flexcan_priv *priv)
const u32 quirks = priv->devtype_data.quirks;
if (quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) {
- if (quirks & FLEXCAN_QUIRK_SUPPPORT_RX_MAILBOX_RTR)
+ if (quirks & FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR)
return true;
} else {
/* RX-FIFO is always RTR capable */
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index ad7a89b95da7..8d42b7e6661f 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -973,7 +973,7 @@ static int ifi_canfd_plat_probe(struct platform_device *pdev)
priv->ndev = ndev;
priv->base = addr;
- netif_napi_add(ndev, &priv->napi, ifi_canfd_poll, 64);
+ netif_napi_add(ndev, &priv->napi, ifi_canfd_poll);
priv->can.state = CAN_STATE_STOPPED;
diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c
index ed54c0b3c7d4..4e9680c8eb34 100644
--- a/drivers/net/can/kvaser_pciefd.c
+++ b/drivers/net/can/kvaser_pciefd.c
@@ -329,12 +329,9 @@ MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table);
static int kvaser_pciefd_spi_wait_loop(struct kvaser_pciefd *pcie, int msk)
{
u32 res;
- int ret;
- ret = readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG,
- res, res & msk, 0, 10);
-
- return ret;
+ return readl_poll_timeout(pcie->reg_base + KVASER_PCIEFD_SPI_STATUS_REG,
+ res, res & msk, 0, 10);
}
static int kvaser_pciefd_spi_cmd(struct kvaser_pciefd *pcie, const u8 *tx,
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 4709c012b1dc..dcb582563d5e 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -1467,8 +1467,7 @@ static int m_can_dev_setup(struct m_can_classdev *cdev)
}
if (!cdev->is_peripheral)
- netif_napi_add(dev, &cdev->napi,
- m_can_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &cdev->napi, m_can_poll);
/* Shared properties of all M_CAN versions */
cdev->version = m_can_version;
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 27085b796e75..567620d215f8 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1880,10 +1880,9 @@ static int rcar_canfd_probe(struct platform_device *pdev)
/* Global controller context */
gpriv = devm_kzalloc(&pdev->dev, sizeof(*gpriv), GFP_KERNEL);
- if (!gpriv) {
- err = -ENOMEM;
- goto fail_dev;
- }
+ if (!gpriv)
+ return -ENOMEM;
+
gpriv->pdev = pdev;
gpriv->channels_mask = channels_mask;
gpriv->fdmode = fdmode;
@@ -1904,12 +1903,9 @@ static int rcar_canfd_probe(struct platform_device *pdev)
/* Peripheral clock */
gpriv->clkp = devm_clk_get(&pdev->dev, "fck");
- if (IS_ERR(gpriv->clkp)) {
- err = PTR_ERR(gpriv->clkp);
- dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n",
- err);
- goto fail_dev;
- }
+ if (IS_ERR(gpriv->clkp))
+ return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->clkp),
+ "cannot get peripheral clock\n");
/* fCAN clock: Pick External clock. If not available fallback to
* CANFD clock
@@ -1917,12 +1913,10 @@ static int rcar_canfd_probe(struct platform_device *pdev)
gpriv->can_clk = devm_clk_get(&pdev->dev, "can_clk");
if (IS_ERR(gpriv->can_clk) || (clk_get_rate(gpriv->can_clk) == 0)) {
gpriv->can_clk = devm_clk_get(&pdev->dev, "canfd");
- if (IS_ERR(gpriv->can_clk)) {
- err = PTR_ERR(gpriv->can_clk);
- dev_err(&pdev->dev,
- "cannot get canfd clock, error %d\n", err);
- goto fail_dev;
- }
+ if (IS_ERR(gpriv->can_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(gpriv->can_clk),
+ "cannot get canfd clock\n");
+
gpriv->fcan = RCANFD_CANFDCLK;
} else {
diff --git a/drivers/net/can/sja1000/peak_pcmcia.c b/drivers/net/can/sja1000/peak_pcmcia.c
index 131a084c3535..ebd5941c3f53 100644
--- a/drivers/net/can/sja1000/peak_pcmcia.c
+++ b/drivers/net/can/sja1000/peak_pcmcia.c
@@ -478,7 +478,7 @@ static void pcan_free_channels(struct pcan_pccard *card)
if (!netdev)
continue;
- strlcpy(name, netdev->name, IFNAMSIZ);
+ strscpy(name, netdev->name, IFNAMSIZ);
unregister_sja1000dev(netdev);
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 98dfd5f295a7..1bb1129b0450 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -661,8 +661,6 @@ static const struct ethtool_ops sja1000_ethtool_ops = {
int register_sja1000dev(struct net_device *dev)
{
- int ret;
-
if (!sja1000_probe_chip(dev))
return -ENODEV;
@@ -673,9 +671,7 @@ int register_sja1000dev(struct net_device *dev)
set_reset_mode(dev);
chipset_init(dev);
- ret = register_candev(dev);
-
- return ret;
+ return register_candev(dev);
}
EXPORT_SYMBOL_GPL(register_sja1000dev);
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 81bc741905fd..6779d5357069 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -14,6 +14,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -103,6 +104,11 @@ static void sp_technologic_init(struct sja1000_priv *priv, struct device_node *o
spin_lock_init(&tp->io_lock);
}
+static void sp_rzn1_init(struct sja1000_priv *priv, struct device_node *of)
+{
+ priv->flags = SJA1000_QUIRK_NO_CDR_REG;
+}
+
static void sp_populate(struct sja1000_priv *priv,
struct sja1000_platform_data *pdata,
unsigned long resource_mem_flags)
@@ -153,11 +159,13 @@ static void sp_populate_of(struct sja1000_priv *priv, struct device_node *of)
priv->write_reg = sp_write_reg8;
}
- err = of_property_read_u32(of, "nxp,external-clock-frequency", &prop);
- if (!err)
- priv->can.clock.freq = prop / 2;
- else
- priv->can.clock.freq = SP_CAN_CLOCK; /* default */
+ if (!priv->can.clock.freq) {
+ err = of_property_read_u32(of, "nxp,external-clock-frequency", &prop);
+ if (!err)
+ priv->can.clock.freq = prop / 2;
+ else
+ priv->can.clock.freq = SP_CAN_CLOCK; /* default */
+ }
err = of_property_read_u32(of, "nxp,tx-output-mode", &prop);
if (!err)
@@ -192,8 +200,13 @@ static struct sja1000_of_data technologic_data = {
.init = sp_technologic_init,
};
+static struct sja1000_of_data renesas_data = {
+ .init = sp_rzn1_init,
+};
+
static const struct of_device_id sp_of_table[] = {
{ .compatible = "nxp,sja1000", .data = NULL, },
+ { .compatible = "renesas,rzn1-sja1000", .data = &renesas_data, },
{ .compatible = "technologic,sja1000", .data = &technologic_data, },
{ /* sentinel */ },
};
@@ -210,6 +223,7 @@ static int sp_probe(struct platform_device *pdev)
struct device_node *of = pdev->dev.of_node;
const struct sja1000_of_data *of_data = NULL;
size_t priv_sz = 0;
+ struct clk *clk;
pdata = dev_get_platdata(&pdev->dev);
if (!pdata && !of) {
@@ -234,6 +248,11 @@ static int sp_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
+
+ clk = devm_clk_get_optional_enabled(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(clk),
+ "CAN clk operation failed");
} else {
res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res_irq)
@@ -262,6 +281,15 @@ static int sp_probe(struct platform_device *pdev)
priv->reg_base = addr;
if (of) {
+ if (clk) {
+ priv->can.clock.freq = clk_get_rate(clk) / 2;
+ if (!priv->can.clock.freq) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "Zero CAN clk rate");
+ goto exit_free;
+ }
+ }
+
sp_populate_of(priv, of);
if (of_data && of_data->init)
diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.h b/drivers/net/can/usb/etas_es58x/es58x_core.h
index d769bdf740b7..640fe0a1df63 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_core.h
+++ b/drivers/net/can/usb/etas_es58x/es58x_core.h
@@ -222,7 +222,7 @@ union es58x_urb_cmd {
u8 cmd_type;
u8 cmd_id;
} __packed;
- u8 raw_cmd[0];
+ DECLARE_FLEX_ARRAY(u8, raw_cmd);
};
/**
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index c1ff3c046d62..f0065d40eb24 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -10,20 +10,24 @@
*/
#include
+#include
#include
#include
#include
#include
#include
+#include
+#include
#include
+#include
#include
#include
#include
/* Device specific constants */
-#define USB_GSUSB_1_VENDOR_ID 0x1d50
-#define USB_GSUSB_1_PRODUCT_ID 0x606f
+#define USB_GS_USB_1_VENDOR_ID 0x1d50
+#define USB_GS_USB_1_PRODUCT_ID 0x606f
#define USB_CANDLELIGHT_VENDOR_ID 0x1209
#define USB_CANDLELIGHT_PRODUCT_ID 0x2323
@@ -34,8 +38,16 @@
#define USB_ABE_CANDEBUGGER_FD_VENDOR_ID 0x16d0
#define USB_ABE_CANDEBUGGER_FD_PRODUCT_ID 0x10b8
-#define GSUSB_ENDPOINT_IN 1
-#define GSUSB_ENDPOINT_OUT 2
+#define GS_USB_ENDPOINT_IN 1
+#define GS_USB_ENDPOINT_OUT 2
+
+/* Timestamp 32 bit timer runs at 1 MHz (1 µs tick). Worker accounts
+ * for timer overflow (will be after ~71 minutes)
+ */
+#define GS_USB_TIMESTAMP_TIMER_HZ (1 * HZ_PER_MHZ)
+#define GS_USB_TIMESTAMP_WORK_DELAY_SEC 1800
+static_assert(GS_USB_TIMESTAMP_WORK_DELAY_SEC <
+ CYCLECOUNTER_MASK(32) / GS_USB_TIMESTAMP_TIMER_HZ / 2);
/* Device specific constants */
enum gs_usb_breq {
@@ -52,6 +64,8 @@ enum gs_usb_breq {
GS_USB_BREQ_SET_USER_ID,
GS_USB_BREQ_DATA_BITTIMING,
GS_USB_BREQ_BT_CONST_EXT,
+ GS_USB_BREQ_SET_TERMINATION,
+ GS_USB_BREQ_GET_TERMINATION,
};
enum gs_can_mode {
@@ -75,6 +89,14 @@ enum gs_can_identify_mode {
GS_CAN_IDENTIFY_ON
};
+enum gs_can_termination_state {
+ GS_CAN_TERMINATION_STATE_OFF = 0,
+ GS_CAN_TERMINATION_STATE_ON
+};
+
+#define GS_USB_TERMINATION_DISABLED CAN_TERMINATION_DISABLED
+#define GS_USB_TERMINATION_ENABLED 120
+
/* data types passed between host and device */
/* The firmware on the original USB2CAN by Geschwister Schneider
@@ -111,6 +133,7 @@ struct gs_device_config {
#define GS_CAN_MODE_FD BIT(8)
/* GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9) */
/* GS_CAN_FEATURE_BT_CONST_EXT BIT(10) */
+/* GS_CAN_FEATURE_TERMINATION BIT(11) */
struct gs_device_mode {
__le32 mode;
@@ -135,6 +158,10 @@ struct gs_identify_mode {
__le32 mode;
} __packed;
+struct gs_device_termination_state {
+ __le32 state;
+} __packed;
+
#define GS_CAN_FEATURE_LISTEN_ONLY BIT(0)
#define GS_CAN_FEATURE_LOOP_BACK BIT(1)
#define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2)
@@ -146,7 +173,8 @@ struct gs_identify_mode {
#define GS_CAN_FEATURE_FD BIT(8)
#define GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9)
#define GS_CAN_FEATURE_BT_CONST_EXT BIT(10)
-#define GS_CAN_FEATURE_MASK GENMASK(10, 0)
+#define GS_CAN_FEATURE_TERMINATION BIT(11)
+#define GS_CAN_FEATURE_MASK GENMASK(11, 0)
/* internal quirks - keep in GS_CAN_FEATURE space for now */
@@ -199,6 +227,11 @@ struct classic_can {
u8 data[8];
} __packed;
+struct classic_can_ts {
+ u8 data[8];
+ __le32 timestamp_us;
+} __packed;
+
struct classic_can_quirk {
u8 data[8];
u8 quirk;
@@ -208,6 +241,11 @@ struct canfd {
u8 data[64];
} __packed;
+struct canfd_ts {
+ u8 data[64];
+ __le32 timestamp_us;
+} __packed;
+
struct canfd_quirk {
u8 data[64];
u8 quirk;
@@ -224,8 +262,10 @@ struct gs_host_frame {
union {
DECLARE_FLEX_ARRAY(struct classic_can, classic_can);
+ DECLARE_FLEX_ARRAY(struct classic_can_ts, classic_can_ts);
DECLARE_FLEX_ARRAY(struct classic_can_quirk, classic_can_quirk);
DECLARE_FLEX_ARRAY(struct canfd, canfd);
+ DECLARE_FLEX_ARRAY(struct canfd_ts, canfd_ts);
DECLARE_FLEX_ARRAY(struct canfd_quirk, canfd_quirk);
};
} __packed;
@@ -259,6 +299,12 @@ struct gs_can {
struct can_bittiming_const bt_const, data_bt_const;
unsigned int channel; /* channel number */
+ /* time counter for hardware timestamps */
+ struct cyclecounter cc;
+ struct timecounter tc;
+ spinlock_t tc_lock; /* spinlock to guard access tc->cycle_last */
+ struct delayed_work timestamp;
+
u32 feature;
unsigned int hf_size_tx;
@@ -268,8 +314,6 @@ struct gs_can {
struct usb_anchor tx_submitted;
atomic_t active_tx_urbs;
- void *rxbuf[GS_MAX_RX_URBS];
- dma_addr_t rxbuf_dma[GS_MAX_RX_URBS];
};
/* usb interface struct */
@@ -328,27 +372,109 @@ static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev,
return NULL;
}
-static int gs_cmd_reset(struct gs_can *gsdev)
+static int gs_cmd_reset(struct gs_can *dev)
{
- struct gs_device_mode *dm;
- struct usb_interface *intf = gsdev->iface;
+ struct gs_device_mode dm = {
+ .mode = GS_CAN_MODE_RESET,
+ };
+
+ return usb_control_msg_send(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &dm, sizeof(dm), 1000,
+ GFP_KERNEL);
+}
+
+static inline int gs_usb_get_timestamp(const struct gs_can *dev,
+ u32 *timestamp_p)
+{
+ __le32 timestamp;
int rc;
- dm = kzalloc(sizeof(*dm), GFP_KERNEL);
- if (!dm)
- return -ENOMEM;
+ rc = usb_control_msg_recv(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_TIMESTAMP,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0,
+ ×tamp, sizeof(timestamp),
+ USB_CTRL_GET_TIMEOUT,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
- dm->mode = GS_CAN_MODE_RESET;
+ *timestamp_p = le32_to_cpu(timestamp);
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_sndctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_MODE,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- gsdev->channel, 0, dm, sizeof(*dm), 1000);
+ return 0;
+}
- kfree(dm);
+static u64 gs_usb_timestamp_read(const struct cyclecounter *cc) __must_hold(&dev->tc_lock)
+{
+ struct gs_can *dev = container_of(cc, struct gs_can, cc);
+ u32 timestamp = 0;
+ int err;
- return rc;
+ lockdep_assert_held(&dev->tc_lock);
+
+ /* drop lock for synchronous USB transfer */
+ spin_unlock_bh(&dev->tc_lock);
+ err = gs_usb_get_timestamp(dev, ×tamp);
+ spin_lock_bh(&dev->tc_lock);
+ if (err)
+ netdev_err(dev->netdev,
+ "Error %d while reading timestamp. HW timestamps may be inaccurate.",
+ err);
+
+ return timestamp;
+}
+
+static void gs_usb_timestamp_work(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct gs_can *dev;
+
+ dev = container_of(delayed_work, struct gs_can, timestamp);
+ spin_lock_bh(&dev->tc_lock);
+ timecounter_read(&dev->tc);
+ spin_unlock_bh(&dev->tc_lock);
+
+ schedule_delayed_work(&dev->timestamp,
+ GS_USB_TIMESTAMP_WORK_DELAY_SEC * HZ);
+}
+
+static void gs_usb_skb_set_timestamp(struct gs_can *dev,
+ struct sk_buff *skb, u32 timestamp)
+{
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+ u64 ns;
+
+ spin_lock_bh(&dev->tc_lock);
+ ns = timecounter_cyc2time(&dev->tc, timestamp);
+ spin_unlock_bh(&dev->tc_lock);
+
+ hwtstamps->hwtstamp = ns_to_ktime(ns);
+}
+
+static void gs_usb_timestamp_init(struct gs_can *dev)
+{
+ struct cyclecounter *cc = &dev->cc;
+
+ cc->read = gs_usb_timestamp_read;
+ cc->mask = CYCLECOUNTER_MASK(32);
+ cc->shift = 32 - bits_per(NSEC_PER_SEC / GS_USB_TIMESTAMP_TIMER_HZ);
+ cc->mult = clocksource_hz2mult(GS_USB_TIMESTAMP_TIMER_HZ, cc->shift);
+
+ spin_lock_init(&dev->tc_lock);
+ spin_lock_bh(&dev->tc_lock);
+ timecounter_init(&dev->tc, &dev->cc, ktime_get_real_ns());
+ spin_unlock_bh(&dev->tc_lock);
+
+ INIT_DELAYED_WORK(&dev->timestamp, gs_usb_timestamp_work);
+ schedule_delayed_work(&dev->timestamp,
+ GS_USB_TIMESTAMP_WORK_DELAY_SEC * HZ);
+}
+
+static void gs_usb_timestamp_stop(struct gs_can *dev)
+{
+ cancel_delayed_work_sync(&dev->timestamp);
}
static void gs_update_state(struct gs_can *dev, struct can_frame *cf)
@@ -376,6 +502,24 @@ static void gs_update_state(struct gs_can *dev, struct can_frame *cf)
}
}
+static void gs_usb_set_timestamp(struct gs_can *dev, struct sk_buff *skb,
+ const struct gs_host_frame *hf)
+{
+ u32 timestamp;
+
+ if (!(dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP))
+ return;
+
+ if (hf->flags & GS_CAN_FLAG_FD)
+ timestamp = le32_to_cpu(hf->canfd_ts->timestamp_us);
+ else
+ timestamp = le32_to_cpu(hf->classic_can_ts->timestamp_us);
+
+ gs_usb_skb_set_timestamp(dev, skb, timestamp);
+
+ return;
+}
+
static void gs_usb_receive_bulk_callback(struct urb *urb)
{
struct gs_usb *usbcan = urb->context;
@@ -443,6 +587,8 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
gs_update_state(dev, cf);
}
+ gs_usb_set_timestamp(dev, skb, hf);
+
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += hf->can_dlc;
@@ -465,6 +611,9 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
goto resubmit_urb;
}
+ skb = dev->can.echo_skb[hf->echo_id];
+ gs_usb_set_timestamp(dev, skb, hf);
+
netdev->stats.tx_packets++;
netdev->stats.tx_bytes += can_get_echo_skb(netdev, hf->echo_id,
NULL);
@@ -491,7 +640,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
resubmit_urb:
usb_fill_bulk_urb(urb, usbcan->udev,
- usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN),
+ usb_rcvbulkpipe(usbcan->udev, GS_USB_ENDPOINT_IN),
hf, dev->parent->hf_size_rx,
gs_usb_receive_bulk_callback, usbcan);
@@ -511,72 +660,44 @@ static int gs_usb_set_bittiming(struct net_device *netdev)
{
struct gs_can *dev = netdev_priv(netdev);
struct can_bittiming *bt = &dev->can.bittiming;
- struct usb_interface *intf = dev->iface;
- int rc;
- struct gs_device_bittiming *dbt;
-
- dbt = kmalloc(sizeof(*dbt), GFP_KERNEL);
- if (!dbt)
- return -ENOMEM;
-
- dbt->prop_seg = cpu_to_le32(bt->prop_seg);
- dbt->phase_seg1 = cpu_to_le32(bt->phase_seg1);
- dbt->phase_seg2 = cpu_to_le32(bt->phase_seg2);
- dbt->sjw = cpu_to_le32(bt->sjw);
- dbt->brp = cpu_to_le32(bt->brp);
+ struct gs_device_bittiming dbt = {
+ .prop_seg = cpu_to_le32(bt->prop_seg),
+ .phase_seg1 = cpu_to_le32(bt->phase_seg1),
+ .phase_seg2 = cpu_to_le32(bt->phase_seg2),
+ .sjw = cpu_to_le32(bt->sjw),
+ .brp = cpu_to_le32(bt->brp),
+ };
/* request bit timings */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_sndctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_BITTIMING,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- dev->channel, 0, dbt, sizeof(*dbt), 1000);
-
- kfree(dbt);
-
- if (rc < 0)
- dev_err(netdev->dev.parent, "Couldn't set bittimings (err=%d)",
- rc);
-
- return (rc > 0) ? 0 : rc;
+ return usb_control_msg_send(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_BITTIMING,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &dbt, sizeof(dbt), 1000,
+ GFP_KERNEL);
}
static int gs_usb_set_data_bittiming(struct net_device *netdev)
{
struct gs_can *dev = netdev_priv(netdev);
struct can_bittiming *bt = &dev->can.data_bittiming;
- struct usb_interface *intf = dev->iface;
- struct gs_device_bittiming *dbt;
+ struct gs_device_bittiming dbt = {
+ .prop_seg = cpu_to_le32(bt->prop_seg),
+ .phase_seg1 = cpu_to_le32(bt->phase_seg1),
+ .phase_seg2 = cpu_to_le32(bt->phase_seg2),
+ .sjw = cpu_to_le32(bt->sjw),
+ .brp = cpu_to_le32(bt->brp),
+ };
u8 request = GS_USB_BREQ_DATA_BITTIMING;
- int rc;
-
- dbt = kmalloc(sizeof(*dbt), GFP_KERNEL);
- if (!dbt)
- return -ENOMEM;
-
- dbt->prop_seg = cpu_to_le32(bt->prop_seg);
- dbt->phase_seg1 = cpu_to_le32(bt->phase_seg1);
- dbt->phase_seg2 = cpu_to_le32(bt->phase_seg2);
- dbt->sjw = cpu_to_le32(bt->sjw);
- dbt->brp = cpu_to_le32(bt->brp);
if (dev->feature & GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO)
request = GS_USB_BREQ_QUIRK_CANTACT_PRO_DATA_BITTIMING;
- /* request bit timings */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_sndctrlpipe(interface_to_usbdev(intf), 0),
- request,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- dev->channel, 0, dbt, sizeof(*dbt), 1000);
-
- kfree(dbt);
-
- if (rc < 0)
- dev_err(netdev->dev.parent,
- "Couldn't set data bittimings (err=%d)", rc);
-
- return (rc > 0) ? 0 : rc;
+ /* request data bit timings */
+ return usb_control_msg_send(interface_to_usbdev(dev->iface), 0,
+ request,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &dbt, sizeof(dbt), 1000,
+ GFP_KERNEL);
}
static void gs_usb_xmit_callback(struct urb *urb)
@@ -587,9 +708,6 @@ static void gs_usb_xmit_callback(struct urb *urb)
if (urb->status)
netdev_info(netdev, "usb xmit fail %u\n", txc->echo_id);
-
- usb_free_coherent(urb->dev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
}
static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
@@ -618,8 +736,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
if (!urb)
goto nomem_urb;
- hf = usb_alloc_coherent(dev->udev, dev->hf_size_tx, GFP_ATOMIC,
- &urb->transfer_dma);
+ hf = kmalloc(dev->hf_size_tx, GFP_ATOMIC);
if (!hf) {
netdev_err(netdev, "No memory left for USB buffer\n");
goto nomem_hf;
@@ -659,11 +776,11 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
}
usb_fill_bulk_urb(urb, dev->udev,
- usb_sndbulkpipe(dev->udev, GSUSB_ENDPOINT_OUT),
+ usb_sndbulkpipe(dev->udev, GS_USB_ENDPOINT_OUT),
hf, dev->hf_size_tx,
gs_usb_xmit_callback, txc);
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_flags |= URB_FREE_BUFFER;
usb_anchor_urb(urb, &dev->tx_submitted);
can_put_echo_skb(skb, netdev, idx, 0);
@@ -678,8 +795,6 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
gs_free_tx_context(txc);
usb_unanchor_urb(urb);
- usb_free_coherent(dev->udev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
if (rc == -ENODEV) {
netif_device_detach(netdev);
@@ -699,8 +814,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
return NETDEV_TX_OK;
badidx:
- usb_free_coherent(dev->udev, urb->transfer_buffer_length,
- urb->transfer_buffer, urb->transfer_dma);
+ kfree(hf);
nomem_hf:
usb_free_urb(urb);
@@ -715,11 +829,13 @@ static int gs_can_open(struct net_device *netdev)
{
struct gs_can *dev = netdev_priv(netdev);
struct gs_usb *parent = dev->parent;
- int rc, i;
- struct gs_device_mode *dm;
+ struct gs_device_mode dm = {
+ .mode = cpu_to_le32(GS_CAN_MODE_START),
+ };
struct gs_host_frame *hf;
u32 ctrlmode;
u32 flags = 0;
+ int rc, i;
rc = open_candev(netdev);
if (rc)
@@ -744,7 +860,6 @@ static int gs_can_open(struct net_device *netdev)
for (i = 0; i < GS_MAX_RX_URBS; i++) {
struct urb *urb;
u8 *buf;
- dma_addr_t buf_dma;
/* alloc rx urb */
urb = usb_alloc_urb(0, GFP_KERNEL);
@@ -752,10 +867,8 @@ static int gs_can_open(struct net_device *netdev)
return -ENOMEM;
/* alloc rx buffer */
- buf = usb_alloc_coherent(dev->udev,
- dev->parent->hf_size_rx,
- GFP_KERNEL,
- &buf_dma);
+ buf = kmalloc(dev->parent->hf_size_rx,
+ GFP_KERNEL);
if (!buf) {
netdev_err(netdev,
"No memory left for USB buffer\n");
@@ -763,17 +876,15 @@ static int gs_can_open(struct net_device *netdev)
return -ENOMEM;
}
- urb->transfer_dma = buf_dma;
-
/* fill, anchor, and submit rx urb */
usb_fill_bulk_urb(urb,
dev->udev,
usb_rcvbulkpipe(dev->udev,
- GSUSB_ENDPOINT_IN),
+ GS_USB_ENDPOINT_IN),
buf,
dev->parent->hf_size_rx,
gs_usb_receive_bulk_callback, parent);
- urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ urb->transfer_flags |= URB_FREE_BUFFER;
usb_anchor_urb(urb, &parent->rx_submitted);
@@ -786,17 +897,10 @@ static int gs_can_open(struct net_device *netdev)
"usb_submit failed (err=%d)\n", rc);
usb_unanchor_urb(urb);
- usb_free_coherent(dev->udev,
- sizeof(struct gs_host_frame),
- buf,
- buf_dma);
usb_free_urb(urb);
break;
}
- dev->rxbuf[i] = buf;
- dev->rxbuf_dma[i] = buf_dma;
-
/* Drop reference,
* USB core will take care of freeing it
*/
@@ -804,10 +908,6 @@ static int gs_can_open(struct net_device *netdev)
}
}
- dm = kmalloc(sizeof(*dm), GFP_KERNEL);
- if (!dm)
- return -ENOMEM;
-
/* flags */
if (ctrlmode & CAN_CTRLMODE_LOOPBACK)
flags |= GS_CAN_MODE_LOOP_BACK;
@@ -823,25 +923,30 @@ static int gs_can_open(struct net_device *netdev)
if (ctrlmode & CAN_CTRLMODE_3_SAMPLES)
flags |= GS_CAN_MODE_TRIPLE_SAMPLE;
+ /* if hardware supports timestamps, enable it */
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ flags |= GS_CAN_MODE_HW_TIMESTAMP;
+
+ /* start polling timestamp */
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ gs_usb_timestamp_init(dev);
+
/* finally start device */
dev->can.state = CAN_STATE_ERROR_ACTIVE;
- dm->mode = cpu_to_le32(GS_CAN_MODE_START);
- dm->flags = cpu_to_le32(flags);
- rc = usb_control_msg(interface_to_usbdev(dev->iface),
- usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
- GS_USB_BREQ_MODE,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- dev->channel, 0, dm, sizeof(*dm), 1000);
-
- if (rc < 0) {
+ dm.flags = cpu_to_le32(flags);
+ rc = usb_control_msg_send(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_MODE,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &dm, sizeof(dm), 1000,
+ GFP_KERNEL);
+ if (rc) {
netdev_err(netdev, "Couldn't start device (err=%d)\n", rc);
- kfree(dm);
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ gs_usb_timestamp_stop(dev);
dev->can.state = CAN_STATE_STOPPED;
return rc;
}
- kfree(dm);
-
parent->active_channels++;
if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY))
netif_start_queue(netdev);
@@ -854,19 +959,17 @@ static int gs_can_close(struct net_device *netdev)
int rc;
struct gs_can *dev = netdev_priv(netdev);
struct gs_usb *parent = dev->parent;
- unsigned int i;
netif_stop_queue(netdev);
+ /* stop polling timestamp */
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ gs_usb_timestamp_stop(dev);
+
/* Stop polling */
parent->active_channels--;
if (!parent->active_channels) {
usb_kill_anchored_urbs(&parent->rx_submitted);
- for (i = 0; i < GS_MAX_RX_URBS; i++)
- usb_free_coherent(dev->udev,
- sizeof(struct gs_host_frame),
- dev->rxbuf[i],
- dev->rxbuf_dma[i]);
}
/* Stop sending URBs */
@@ -890,38 +993,39 @@ static int gs_can_close(struct net_device *netdev)
return 0;
}
+static int gs_can_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+ const struct gs_can *dev = netdev_priv(netdev);
+
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ return can_eth_ioctl_hwts(netdev, ifr, cmd);
+
+ return -EOPNOTSUPP;
+}
+
static const struct net_device_ops gs_usb_netdev_ops = {
.ndo_open = gs_can_open,
.ndo_stop = gs_can_close,
.ndo_start_xmit = gs_can_start_xmit,
.ndo_change_mtu = can_change_mtu,
+ .ndo_eth_ioctl = gs_can_eth_ioctl,
};
static int gs_usb_set_identify(struct net_device *netdev, bool do_identify)
{
struct gs_can *dev = netdev_priv(netdev);
- struct gs_identify_mode *imode;
- int rc;
-
- imode = kmalloc(sizeof(*imode), GFP_KERNEL);
-
- if (!imode)
- return -ENOMEM;
+ struct gs_identify_mode imode;
if (do_identify)
- imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_ON);
+ imode.mode = cpu_to_le32(GS_CAN_IDENTIFY_ON);
else
- imode->mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF);
+ imode.mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF);
- rc = usb_control_msg(interface_to_usbdev(dev->iface),
- usb_sndctrlpipe(interface_to_usbdev(dev->iface), 0),
- GS_USB_BREQ_IDENTIFY,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- dev->channel, 0, imode, sizeof(*imode), 100);
-
- kfree(imode);
-
- return (rc > 0) ? 0 : rc;
+ return usb_control_msg_send(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_IDENTIFY,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0, &imode, sizeof(imode), 100,
+ GFP_KERNEL);
}
/* blink LED's for finding the this interface */
@@ -948,9 +1052,67 @@ static int gs_usb_set_phys_id(struct net_device *netdev,
return rc;
}
+static int gs_usb_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+
+ /* report if device supports HW timestamps */
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ return can_ethtool_op_get_ts_info_hwts(netdev, info);
+
+ return ethtool_op_get_ts_info(netdev, info);
+}
+
static const struct ethtool_ops gs_usb_ethtool_ops = {
.set_phys_id = gs_usb_set_phys_id,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = gs_usb_get_ts_info,
+};
+
+static int gs_usb_get_termination(struct net_device *netdev, u16 *term)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct gs_device_termination_state term_state;
+ int rc;
+
+ rc = usb_control_msg_recv(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_GET_TERMINATION,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0,
+ &term_state, sizeof(term_state), 1000,
+ GFP_KERNEL);
+ if (rc)
+ return rc;
+
+ if (term_state.state == cpu_to_le32(GS_CAN_TERMINATION_STATE_ON))
+ *term = GS_USB_TERMINATION_ENABLED;
+ else
+ *term = GS_USB_TERMINATION_DISABLED;
+
+ return 0;
+}
+
+static int gs_usb_set_termination(struct net_device *netdev, u16 term)
+{
+ struct gs_can *dev = netdev_priv(netdev);
+ struct gs_device_termination_state term_state;
+
+ if (term == GS_USB_TERMINATION_ENABLED)
+ term_state.state = cpu_to_le32(GS_CAN_TERMINATION_STATE_ON);
+ else
+ term_state.state = cpu_to_le32(GS_CAN_TERMINATION_STATE_OFF);
+
+ return usb_control_msg_send(interface_to_usbdev(dev->iface), 0,
+ GS_USB_BREQ_SET_TERMINATION,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ dev->channel, 0,
+ &term_state, sizeof(term_state), 1000,
+ GFP_KERNEL);
+}
+
+static const u16 gs_usb_termination_const[] = {
+ GS_USB_TERMINATION_DISABLED,
+ GS_USB_TERMINATION_ENABLED
};
static struct gs_can *gs_make_candev(unsigned int channel,
@@ -960,26 +1122,21 @@ static struct gs_can *gs_make_candev(unsigned int channel,
struct gs_can *dev;
struct net_device *netdev;
int rc;
- struct gs_device_bt_const *bt_const;
- struct gs_device_bt_const_extended *bt_const_extended;
+ struct gs_device_bt_const_extended bt_const_extended;
+ struct gs_device_bt_const bt_const;
u32 feature;
- bt_const = kmalloc(sizeof(*bt_const), GFP_KERNEL);
- if (!bt_const)
- return ERR_PTR(-ENOMEM);
-
/* fetch bit timing constants */
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_BT_CONST,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- channel, 0, bt_const, sizeof(*bt_const), 1000);
+ rc = usb_control_msg_recv(interface_to_usbdev(intf), 0,
+ GS_USB_BREQ_BT_CONST,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ channel, 0, &bt_const, sizeof(bt_const), 1000,
+ GFP_KERNEL);
- if (rc < 0) {
+ if (rc) {
dev_err(&intf->dev,
- "Couldn't get bit timing const for channel (err=%d)\n",
- rc);
- kfree(bt_const);
+ "Couldn't get bit timing const for channel %d (%pe)\n",
+ channel, ERR_PTR(rc));
return ERR_PTR(rc);
}
@@ -987,7 +1144,6 @@ static struct gs_can *gs_make_candev(unsigned int channel,
netdev = alloc_candev(sizeof(struct gs_can), GS_MAX_TX_URBS);
if (!netdev) {
dev_err(&intf->dev, "Couldn't allocate candev\n");
- kfree(bt_const);
return ERR_PTR(-ENOMEM);
}
@@ -1000,14 +1156,14 @@ static struct gs_can *gs_make_candev(unsigned int channel,
/* dev setup */
strcpy(dev->bt_const.name, KBUILD_MODNAME);
- dev->bt_const.tseg1_min = le32_to_cpu(bt_const->tseg1_min);
- dev->bt_const.tseg1_max = le32_to_cpu(bt_const->tseg1_max);
- dev->bt_const.tseg2_min = le32_to_cpu(bt_const->tseg2_min);
- dev->bt_const.tseg2_max = le32_to_cpu(bt_const->tseg2_max);
- dev->bt_const.sjw_max = le32_to_cpu(bt_const->sjw_max);
- dev->bt_const.brp_min = le32_to_cpu(bt_const->brp_min);
- dev->bt_const.brp_max = le32_to_cpu(bt_const->brp_max);
- dev->bt_const.brp_inc = le32_to_cpu(bt_const->brp_inc);
+ dev->bt_const.tseg1_min = le32_to_cpu(bt_const.tseg1_min);
+ dev->bt_const.tseg1_max = le32_to_cpu(bt_const.tseg1_max);
+ dev->bt_const.tseg2_min = le32_to_cpu(bt_const.tseg2_min);
+ dev->bt_const.tseg2_max = le32_to_cpu(bt_const.tseg2_max);
+ dev->bt_const.sjw_max = le32_to_cpu(bt_const.sjw_max);
+ dev->bt_const.brp_min = le32_to_cpu(bt_const.brp_min);
+ dev->bt_const.brp_max = le32_to_cpu(bt_const.brp_max);
+ dev->bt_const.brp_inc = le32_to_cpu(bt_const.brp_inc);
dev->udev = interface_to_usbdev(intf);
dev->iface = intf;
@@ -1024,13 +1180,13 @@ static struct gs_can *gs_make_candev(unsigned int channel,
/* can setup */
dev->can.state = CAN_STATE_STOPPED;
- dev->can.clock.freq = le32_to_cpu(bt_const->fclk_can);
+ dev->can.clock.freq = le32_to_cpu(bt_const.fclk_can);
dev->can.bittiming_const = &dev->bt_const;
dev->can.do_set_bittiming = gs_usb_set_bittiming;
dev->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC;
- feature = le32_to_cpu(bt_const->feature);
+ feature = le32_to_cpu(bt_const.feature);
dev->feature = FIELD_GET(GS_CAN_FEATURE_MASK, feature);
if (feature & GS_CAN_FEATURE_LISTEN_ONLY)
dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY;
@@ -1053,6 +1209,21 @@ static struct gs_can *gs_make_candev(unsigned int channel,
dev->can.do_set_data_bittiming = gs_usb_set_data_bittiming;
}
+ if (feature & GS_CAN_FEATURE_TERMINATION) {
+ rc = gs_usb_get_termination(netdev, &dev->can.termination);
+ if (rc) {
+ dev->feature &= ~GS_CAN_FEATURE_TERMINATION;
+
+ dev_info(&intf->dev,
+ "Disabling termination support for channel %d (%pe)\n",
+ channel, ERR_PTR(rc));
+ } else {
+ dev->can.termination_const = gs_usb_termination_const;
+ dev->can.termination_const_cnt = ARRAY_SIZE(gs_usb_termination_const);
+ dev->can.do_set_termination = gs_usb_set_termination;
+ }
+ }
+
/* The CANtact Pro from LinkLayer Labs is based on the
* LPC54616 µC, which is affected by the NXP LPC USB transfer
* erratum. However, the current firmware (version 2) doesn't
@@ -1067,8 +1238,8 @@ static struct gs_can *gs_make_candev(unsigned int channel,
* GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO to workaround this
* issue.
*/
- if (dev->udev->descriptor.idVendor == cpu_to_le16(USB_GSUSB_1_VENDOR_ID) &&
- dev->udev->descriptor.idProduct == cpu_to_le16(USB_GSUSB_1_PRODUCT_ID) &&
+ if (dev->udev->descriptor.idVendor == cpu_to_le16(USB_GS_USB_1_VENDOR_ID) &&
+ dev->udev->descriptor.idProduct == cpu_to_le16(USB_GS_USB_1_PRODUCT_ID) &&
dev->udev->manufacturer && dev->udev->product &&
!strcmp(dev->udev->manufacturer, "LinkLayer Labs") &&
!strcmp(dev->udev->product, "CANtact Pro") &&
@@ -1081,57 +1252,52 @@ static struct gs_can *gs_make_candev(unsigned int channel,
feature & GS_CAN_FEATURE_IDENTIFY))
dev->feature &= ~GS_CAN_FEATURE_IDENTIFY;
- kfree(bt_const);
-
/* fetch extended bit timing constants if device has feature
* GS_CAN_FEATURE_FD and GS_CAN_FEATURE_BT_CONST_EXT
*/
if (feature & GS_CAN_FEATURE_FD &&
feature & GS_CAN_FEATURE_BT_CONST_EXT) {
- bt_const_extended = kmalloc(sizeof(*bt_const_extended), GFP_KERNEL);
- if (!bt_const_extended)
- return ERR_PTR(-ENOMEM);
-
- rc = usb_control_msg(interface_to_usbdev(intf),
- usb_rcvctrlpipe(interface_to_usbdev(intf), 0),
- GS_USB_BREQ_BT_CONST_EXT,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- channel, 0, bt_const_extended,
- sizeof(*bt_const_extended),
- 1000);
- if (rc < 0) {
+ rc = usb_control_msg_recv(interface_to_usbdev(intf), 0,
+ GS_USB_BREQ_BT_CONST_EXT,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ channel, 0, &bt_const_extended,
+ sizeof(bt_const_extended),
+ 1000, GFP_KERNEL);
+ if (rc) {
dev_err(&intf->dev,
- "Couldn't get extended bit timing const for channel (err=%d)\n",
- rc);
- kfree(bt_const_extended);
- return ERR_PTR(rc);
+ "Couldn't get extended bit timing const for channel %d (%pe)\n",
+ channel, ERR_PTR(rc));
+ goto out_free_candev;
}
strcpy(dev->data_bt_const.name, KBUILD_MODNAME);
- dev->data_bt_const.tseg1_min = le32_to_cpu(bt_const_extended->dtseg1_min);
- dev->data_bt_const.tseg1_max = le32_to_cpu(bt_const_extended->dtseg1_max);
- dev->data_bt_const.tseg2_min = le32_to_cpu(bt_const_extended->dtseg2_min);
- dev->data_bt_const.tseg2_max = le32_to_cpu(bt_const_extended->dtseg2_max);
- dev->data_bt_const.sjw_max = le32_to_cpu(bt_const_extended->dsjw_max);
- dev->data_bt_const.brp_min = le32_to_cpu(bt_const_extended->dbrp_min);
- dev->data_bt_const.brp_max = le32_to_cpu(bt_const_extended->dbrp_max);
- dev->data_bt_const.brp_inc = le32_to_cpu(bt_const_extended->dbrp_inc);
+ dev->data_bt_const.tseg1_min = le32_to_cpu(bt_const_extended.dtseg1_min);
+ dev->data_bt_const.tseg1_max = le32_to_cpu(bt_const_extended.dtseg1_max);
+ dev->data_bt_const.tseg2_min = le32_to_cpu(bt_const_extended.dtseg2_min);
+ dev->data_bt_const.tseg2_max = le32_to_cpu(bt_const_extended.dtseg2_max);
+ dev->data_bt_const.sjw_max = le32_to_cpu(bt_const_extended.dsjw_max);
+ dev->data_bt_const.brp_min = le32_to_cpu(bt_const_extended.dbrp_min);
+ dev->data_bt_const.brp_max = le32_to_cpu(bt_const_extended.dbrp_max);
+ dev->data_bt_const.brp_inc = le32_to_cpu(bt_const_extended.dbrp_inc);
dev->can.data_bittiming_const = &dev->data_bt_const;
-
- kfree(bt_const_extended);
}
SET_NETDEV_DEV(netdev, &intf->dev);
rc = register_candev(dev->netdev);
if (rc) {
- free_candev(dev->netdev);
- dev_err(&intf->dev, "Couldn't register candev (err=%d)\n", rc);
- return ERR_PTR(rc);
+ dev_err(&intf->dev,
+ "Couldn't register candev for channel %d (%pe)\n",
+ channel, ERR_PTR(rc));
+ goto out_free_candev;
}
return dev;
+
+ out_free_candev:
+ free_candev(dev->netdev);
+ return ERR_PTR(rc);
}
static void gs_destroy_candev(struct gs_can *dev)
@@ -1147,76 +1313,61 @@ static int gs_usb_probe(struct usb_interface *intf,
struct usb_device *udev = interface_to_usbdev(intf);
struct gs_host_frame *hf;
struct gs_usb *dev;
- int rc = -ENOMEM;
+ struct gs_host_config hconf = {
+ .byte_order = cpu_to_le32(0x0000beef),
+ };
+ struct gs_device_config dconf;
unsigned int icount, i;
- struct gs_host_config *hconf;
- struct gs_device_config *dconf;
-
- hconf = kmalloc(sizeof(*hconf), GFP_KERNEL);
- if (!hconf)
- return -ENOMEM;
-
- hconf->byte_order = cpu_to_le32(0x0000beef);
+ int rc;
/* send host config */
- rc = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- GS_USB_BREQ_HOST_FORMAT,
- USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- 1, intf->cur_altsetting->desc.bInterfaceNumber,
- hconf, sizeof(*hconf), 1000);
-
- kfree(hconf);
-
- if (rc < 0) {
+ rc = usb_control_msg_send(udev, 0,
+ GS_USB_BREQ_HOST_FORMAT,
+ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ 1, intf->cur_altsetting->desc.bInterfaceNumber,
+ &hconf, sizeof(hconf), 1000,
+ GFP_KERNEL);
+ if (rc) {
dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", rc);
return rc;
}
- dconf = kmalloc(sizeof(*dconf), GFP_KERNEL);
- if (!dconf)
- return -ENOMEM;
-
/* read device config */
- rc = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- GS_USB_BREQ_DEVICE_CONFIG,
- USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- 1, intf->cur_altsetting->desc.bInterfaceNumber,
- dconf, sizeof(*dconf), 1000);
- if (rc < 0) {
+ rc = usb_control_msg_recv(udev, 0,
+ GS_USB_BREQ_DEVICE_CONFIG,
+ USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
+ 1, intf->cur_altsetting->desc.bInterfaceNumber,
+ &dconf, sizeof(dconf), 1000,
+ GFP_KERNEL);
+ if (rc) {
dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n",
rc);
- kfree(dconf);
return rc;
}
- icount = dconf->icount + 1;
+ icount = dconf.icount + 1;
dev_info(&intf->dev, "Configuring for %u interfaces\n", icount);
if (icount > GS_MAX_INTF) {
dev_err(&intf->dev,
"Driver cannot handle more that %u CAN interfaces\n",
GS_MAX_INTF);
- kfree(dconf);
return -EINVAL;
}
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- kfree(dconf);
+ if (!dev)
return -ENOMEM;
- }
init_usb_anchor(&dev->rx_submitted);
- /* default to classic CAN, switch to CAN-FD if at least one of
- * our channels support CAN-FD.
- */
- dev->hf_size_rx = struct_size(hf, classic_can, 1);
usb_set_intfdata(intf, dev);
dev->udev = udev;
for (i = 0; i < icount; i++) {
- dev->canch[i] = gs_make_candev(i, intf, dconf);
+ unsigned int hf_size_rx = 0;
+
+ dev->canch[i] = gs_make_candev(i, intf, &dconf);
if (IS_ERR_OR_NULL(dev->canch[i])) {
/* save error code to return later */
rc = PTR_ERR(dev->canch[i]);
@@ -1227,18 +1378,28 @@ static int gs_usb_probe(struct usb_interface *intf,
gs_destroy_candev(dev->canch[i]);
usb_kill_anchored_urbs(&dev->rx_submitted);
- kfree(dconf);
kfree(dev);
return rc;
}
dev->canch[i]->parent = dev;
- if (dev->canch[i]->can.ctrlmode_supported & CAN_CTRLMODE_FD)
- dev->hf_size_rx = struct_size(hf, canfd, 1);
+ /* set RX packet size based on FD and if hardware
+ * timestamps are supported.
+ */
+ if (dev->canch[i]->can.ctrlmode_supported & CAN_CTRLMODE_FD) {
+ if (dev->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ hf_size_rx = struct_size(hf, canfd_ts, 1);
+ else
+ hf_size_rx = struct_size(hf, canfd, 1);
+ } else {
+ if (dev->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ hf_size_rx = struct_size(hf, classic_can_ts, 1);
+ else
+ hf_size_rx = struct_size(hf, classic_can, 1);
+ }
+ dev->hf_size_rx = max(dev->hf_size_rx, hf_size_rx);
}
- kfree(dconf);
-
return 0;
}
@@ -1263,8 +1424,8 @@ static void gs_usb_disconnect(struct usb_interface *intf)
}
static const struct usb_device_id gs_usb_table[] = {
- { USB_DEVICE_INTERFACE_NUMBER(USB_GSUSB_1_VENDOR_ID,
- USB_GSUSB_1_PRODUCT_ID, 0) },
+ { USB_DEVICE_INTERFACE_NUMBER(USB_GS_USB_1_VENDOR_ID,
+ USB_GS_USB_1_PRODUCT_ID, 0) },
{ USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID,
USB_CANDLELIGHT_PRODUCT_ID, 0) },
{ USB_DEVICE_INTERFACE_NUMBER(USB_CES_CANEXT_FD_VENDOR_ID,
diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
index dd65c101bfb8..6871d474dabf 100644
--- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
+++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
@@ -534,7 +534,7 @@ static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev,
struct kvaser_cmd *cmd;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -573,7 +573,7 @@ kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv,
struct kvaser_usb *dev = priv->dev;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (!cmd)
return -ENOMEM;
@@ -694,7 +694,7 @@ static int kvaser_usb_hydra_map_channel(struct kvaser_usb *dev, u16 transid,
struct kvaser_cmd *cmd;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -735,7 +735,7 @@ static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev,
int err;
int i;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -1394,7 +1394,7 @@ kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv,
u32 kcan_id;
u32 kcan_header;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd_ext), GFP_ATOMIC);
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (!cmd)
return NULL;
@@ -1468,7 +1468,7 @@ kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv,
u32 flags;
u32 id;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_ATOMIC);
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
if (!cmd)
return NULL;
@@ -1533,7 +1533,7 @@ static int kvaser_usb_hydra_set_bittiming(struct net_device *netdev)
int sjw = bt->sjw;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -1567,7 +1567,7 @@ static int kvaser_usb_hydra_set_data_bittiming(struct net_device *netdev)
int sjw = dbt->sjw;
int err;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -1711,7 +1711,7 @@ static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev)
u32 flags;
struct kvaser_usb_dev_card_data *card_data = &dev->card_data;
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
@@ -1851,7 +1851,7 @@ static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv)
return -EINVAL;
}
- cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL);
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index 8c9d53f6e24c..225697d70a9a 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -962,7 +962,7 @@ static void peak_usb_disconnect(struct usb_interface *intf)
dev_prev_siblings = dev->prev_siblings;
dev->state &= ~PCAN_USB_STATE_CONNECTED;
- strlcpy(name, netdev->name, IFNAMSIZ);
+ strscpy(name, netdev->name, IFNAMSIZ);
unregister_netdev(netdev);
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 36b6310a2e5b..285635c23443 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -71,11 +71,10 @@ MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
{
- struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
stats->rx_packets++;
- stats->rx_bytes += cfd->len;
+ stats->rx_bytes += can_skb_get_data_len(skb);
skb->pkt_type = PACKET_BROADCAST;
skb->dev = dev;
@@ -86,14 +85,14 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
{
- struct canfd_frame *cfd = (struct canfd_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
- int loop, len;
+ unsigned int len;
+ int loop;
if (can_dropped_invalid_skb(dev, skb))
return NETDEV_TX_OK;
- len = cfd->can_id & CAN_RTR_FLAG ? 0 : cfd->len;
+ len = can_skb_get_data_len(skb);
stats->tx_packets++;
stats->tx_bytes += len;
@@ -137,7 +136,8 @@ static int vcan_change_mtu(struct net_device *dev, int new_mtu)
if (dev->flags & IFF_UP)
return -EBUSY;
- if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
+ if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU &&
+ !can_is_canxl_dev_mtu(new_mtu))
return -EINVAL;
dev->mtu = new_mtu;
diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c
index cffd107d8b28..26a472d2ea58 100644
--- a/drivers/net/can/vxcan.c
+++ b/drivers/net/can/vxcan.c
@@ -38,10 +38,9 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *oskb, struct net_device *dev)
{
struct vxcan_priv *priv = netdev_priv(dev);
struct net_device *peer;
- struct canfd_frame *cfd = (struct canfd_frame *)oskb->data;
struct net_device_stats *peerstats, *srcstats = &dev->stats;
struct sk_buff *skb;
- u8 len;
+ unsigned int len;
if (can_dropped_invalid_skb(dev, oskb))
return NETDEV_TX_OK;
@@ -70,7 +69,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *oskb, struct net_device *dev)
skb->dev = peer;
skb->ip_summed = CHECKSUM_UNNECESSARY;
- len = cfd->can_id & CAN_RTR_FLAG ? 0 : cfd->len;
+ len = can_skb_get_data_len(skb);
if (netif_rx(skb) == NET_RX_SUCCESS) {
srcstats->tx_packets++;
srcstats->tx_bytes += len;
@@ -132,7 +131,8 @@ static int vxcan_change_mtu(struct net_device *dev, int new_mtu)
if (dev->flags & IFF_UP)
return -EBUSY;
- if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU)
+ if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU &&
+ !can_is_canxl_dev_mtu(new_mtu))
return -EINVAL;
dev->mtu = new_mtu;
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index d8ae0e8af2a0..07507b4820d7 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -76,7 +76,7 @@ config NET_DSA_SMSC_LAN9303
select NET_DSA_TAG_LAN9303
select REGMAP
help
- This enables support for the SMSC/Microchip LAN9303 3 port ethernet
+ This enables support for the Microchip LAN9303/LAN9354 3 port ethernet
switch chips.
config NET_DSA_SMSC_LAN9303_I2C
@@ -90,11 +90,11 @@ config NET_DSA_SMSC_LAN9303_I2C
for I2C managed mode.
config NET_DSA_SMSC_LAN9303_MDIO
- tristate "SMSC/Microchip LAN9303 3-ports 10/100 ethernet switch in MDIO managed mode"
+ tristate "Microchip LAN9303/LAN9354 3-ports 10/100 ethernet switch in MDIO managed mode"
select NET_DSA_SMSC_LAN9303
depends on VLAN_8021Q || VLAN_8021Q=n
help
- Enable access functions if the SMSC/Microchip LAN9303 is configured
+ Enable access functions if the Microchip LAN9303/LAN9354 is configured
for MDIO managed mode.
config NET_DSA_VITESSE_VSC73XX
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 48cf344750ff..59cdfc51ce06 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -972,7 +972,7 @@ void b53_get_strings(struct dsa_switch *ds, int port, u32 stringset,
if (stringset == ETH_SS_STATS) {
for (i = 0; i < mib_size; i++)
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
mibs[i].name, ETH_GSTRING_LEN);
} else if (stringset == ETH_SS_PHY_STATS) {
phydev = b53_get_phy_device(ds, port);
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
index a7aeb3c132c9..6ddc03b58b28 100644
--- a/drivers/net/dsa/b53/b53_mdio.c
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -356,8 +356,6 @@ static void b53_mdio_remove(struct mdio_device *mdiodev)
return;
b53_switch_remove(dev);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void b53_mdio_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/b53/b53_mmap.c b/drivers/net/dsa/b53/b53_mmap.c
index ae4c79d39bc0..e968322dfbf0 100644
--- a/drivers/net/dsa/b53/b53_mmap.c
+++ b/drivers/net/dsa/b53/b53_mmap.c
@@ -316,8 +316,6 @@ static int b53_mmap_remove(struct platform_device *pdev)
if (dev)
b53_switch_remove(dev);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/b53/b53_srab.c b/drivers/net/dsa/b53/b53_srab.c
index da0b889880f6..bcb44034404d 100644
--- a/drivers/net/dsa/b53/b53_srab.c
+++ b/drivers/net/dsa/b53/b53_srab.c
@@ -667,8 +667,6 @@ static int b53_srab_remove(struct platform_device *pdev)
b53_srab_intr_set(dev->priv, false);
b53_switch_remove(dev);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index be0edfa093d0..cde253d27bd0 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -94,6 +94,24 @@ static u16 bcm_sf2_reg_led_base(struct bcm_sf2_priv *priv, int port)
return REG_SWITCH_STATUS;
}
+static u32 bcm_sf2_port_override_offset(struct bcm_sf2_priv *priv, int port)
+{
+ switch (priv->type) {
+ case BCM4908_DEVICE_ID:
+ case BCM7445_DEVICE_ID:
+ return port == 8 ? CORE_STS_OVERRIDE_IMP :
+ CORE_STS_OVERRIDE_GMIIP_PORT(port);
+ case BCM7278_DEVICE_ID:
+ return port == 8 ? CORE_STS_OVERRIDE_IMP2 :
+ CORE_STS_OVERRIDE_GMIIP2_PORT(port);
+ default:
+ WARN_ONCE(1, "Unsupported device: %d\n", priv->type);
+ }
+
+ /* RO fallback register */
+ return REG_SWITCH_STATUS;
+}
+
/* Return the number of active ports, not counting the IMP (CPU) port */
static unsigned int bcm_sf2_num_active_ports(struct dsa_switch *ds)
{
@@ -141,7 +159,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
unsigned int i;
- u32 reg, offset;
+ u32 reg;
/* Enable the port memories */
reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
@@ -167,21 +185,6 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
b53_brcm_hdr_setup(ds, port);
if (port == 8) {
- if (priv->type == BCM4908_DEVICE_ID ||
- priv->type == BCM7445_DEVICE_ID)
- offset = CORE_STS_OVERRIDE_IMP;
- else
- offset = CORE_STS_OVERRIDE_IMP2;
-
- /* Force link status for IMP port */
- reg = core_readl(priv, offset);
- reg |= (MII_SW_OR | LINK_STS);
- if (priv->type == BCM4908_DEVICE_ID)
- reg |= GMII_SPEED_UP_2G;
- else
- reg &= ~GMII_SPEED_UP_2G;
- core_writel(priv, reg, offset);
-
/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
reg = core_readl(priv, CORE_IMP_CTL);
reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
@@ -812,17 +815,10 @@ static void bcm_sf2_sw_mac_link_down(struct dsa_switch *ds, int port,
if (priv->wol_ports_mask & BIT(port))
return;
- if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
- if (priv->type == BCM4908_DEVICE_ID ||
- priv->type == BCM7445_DEVICE_ID)
- offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
- else
- offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
-
- reg = core_readl(priv, offset);
- reg &= ~LINK_STS;
- core_writel(priv, reg, offset);
- }
+ offset = bcm_sf2_port_override_offset(priv, port);
+ reg = core_readl(priv, offset);
+ reg &= ~LINK_STS;
+ core_writel(priv, reg, offset);
bcm_sf2_sw_mac_link_set(ds, port, interface, false);
}
@@ -836,56 +832,56 @@ static void bcm_sf2_sw_mac_link_up(struct dsa_switch *ds, int port,
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_eee *p = &priv->dev->ports[port].eee;
+ u32 reg_rgmii_ctrl = 0;
+ u32 reg, offset;
bcm_sf2_sw_mac_link_set(ds, port, interface, true);
- if (port != core_readl(priv, CORE_IMP0_PRT_ID)) {
- u32 reg_rgmii_ctrl = 0;
- u32 reg, offset;
+ offset = bcm_sf2_port_override_offset(priv, port);
- if (priv->type == BCM4908_DEVICE_ID ||
- priv->type == BCM7445_DEVICE_ID)
- offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
- else
- offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
-
- if (interface == PHY_INTERFACE_MODE_RGMII ||
- interface == PHY_INTERFACE_MODE_RGMII_TXID ||
- interface == PHY_INTERFACE_MODE_MII ||
- interface == PHY_INTERFACE_MODE_REVMII) {
- reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
- reg = reg_readl(priv, reg_rgmii_ctrl);
- reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
-
- if (tx_pause)
- reg |= TX_PAUSE_EN;
- if (rx_pause)
- reg |= RX_PAUSE_EN;
-
- reg_writel(priv, reg, reg_rgmii_ctrl);
- }
-
- reg = SW_OVERRIDE | LINK_STS;
- switch (speed) {
- case SPEED_1000:
- reg |= SPDSTS_1000 << SPEED_SHIFT;
- break;
- case SPEED_100:
- reg |= SPDSTS_100 << SPEED_SHIFT;
- break;
- }
-
- if (duplex == DUPLEX_FULL)
- reg |= DUPLX_MODE;
+ if (phy_interface_mode_is_rgmii(interface) ||
+ interface == PHY_INTERFACE_MODE_MII ||
+ interface == PHY_INTERFACE_MODE_REVMII) {
+ reg_rgmii_ctrl = bcm_sf2_reg_rgmii_cntrl(priv, port);
+ reg = reg_readl(priv, reg_rgmii_ctrl);
+ reg &= ~(RX_PAUSE_EN | TX_PAUSE_EN);
if (tx_pause)
- reg |= TXFLOW_CNTL;
+ reg |= TX_PAUSE_EN;
if (rx_pause)
- reg |= RXFLOW_CNTL;
+ reg |= RX_PAUSE_EN;
- core_writel(priv, reg, offset);
+ reg_writel(priv, reg, reg_rgmii_ctrl);
}
+ reg = LINK_STS;
+ if (port == 8) {
+ if (priv->type == BCM4908_DEVICE_ID)
+ reg |= GMII_SPEED_UP_2G;
+ reg |= MII_SW_OR;
+ } else {
+ reg |= SW_OVERRIDE;
+ }
+
+ switch (speed) {
+ case SPEED_1000:
+ reg |= SPDSTS_1000 << SPEED_SHIFT;
+ break;
+ case SPEED_100:
+ reg |= SPDSTS_100 << SPEED_SHIFT;
+ break;
+ }
+
+ if (duplex == DUPLEX_FULL)
+ reg |= DUPLX_MODE;
+
+ if (tx_pause)
+ reg |= TXFLOW_CNTL;
+ if (rx_pause)
+ reg |= RXFLOW_CNTL;
+
+ core_writel(priv, reg, offset);
+
if (mode == MLO_AN_PHY && phydev)
p->eee_enabled = b53_eee_init(ds, port, phydev);
}
@@ -987,7 +983,7 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds)
static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
- struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
struct ethtool_wolinfo pwol = { };
@@ -1011,7 +1007,7 @@ static void bcm_sf2_sw_get_wol(struct dsa_switch *ds, int port,
static int bcm_sf2_sw_set_wol(struct dsa_switch *ds, int port,
struct ethtool_wolinfo *wol)
{
- struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
struct ethtool_wolinfo pwol = { };
@@ -1555,8 +1551,6 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev)
if (priv->type == BCM7278_DEVICE_ID)
reset_control_assert(priv->rcdev);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index edbe5e7f1cb6..c4010b7bf089 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -1102,7 +1102,7 @@ static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc, u32 *rule_locs)
{
- struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
@@ -1145,7 +1145,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
struct ethtool_rxnfc *nfc)
{
- struct net_device *p = dsa_to_port(ds, port)->cpu_dp->master;
+ struct net_device *p = dsa_port_to_master(dsa_to_port(ds, port));
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
int ret = 0;
@@ -1296,7 +1296,7 @@ void bcm_sf2_cfp_get_strings(struct dsa_switch *ds, int port,
"CFP%03d_%sCntr",
i, bcm_sf2_cfp_stats[j].name);
iter = (i - 1) * s + j;
- strlcpy(data + iter * ETH_GSTRING_LEN,
+ strscpy(data + iter * ETH_GSTRING_LEN,
buf, ETH_GSTRING_LEN);
}
}
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index 263e41191c29..b9107fe40023 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -351,8 +351,6 @@ static void dsa_loop_drv_remove(struct mdio_device *mdiodev)
dsa_unregister_switch(ds);
dev_put(ps->netdev);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void dsa_loop_drv_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 01f90994dedd..951f7935c872 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -128,6 +128,16 @@ static void hellcreek_select_prio(struct hellcreek *hellcreek, int prio)
hellcreek_write(hellcreek, val, HR_PSEL);
}
+static void hellcreek_select_port_prio(struct hellcreek *hellcreek, int port,
+ int prio)
+{
+ u16 val = port << HR_PSEL_PTWSEL_SHIFT;
+
+ val |= prio << HR_PSEL_PRTCWSEL_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PSEL);
+}
+
static void hellcreek_select_counter(struct hellcreek *hellcreek, int counter)
{
u16 val = counter << HR_CSEL_SHIFT;
@@ -288,7 +298,7 @@ static void hellcreek_get_strings(struct dsa_switch *ds, int port,
for (i = 0; i < ARRAY_SIZE(hellcreek_counter); ++i) {
const struct hellcreek_counter *counter = &hellcreek_counter[i];
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
counter->name, ETH_GSTRING_LEN);
}
}
@@ -1537,6 +1547,45 @@ out:
return ret;
}
+static void hellcreek_setup_maxsdu(struct hellcreek *hellcreek, int port,
+ const struct tc_taprio_qopt_offload *schedule)
+{
+ int tc;
+
+ for (tc = 0; tc < 8; ++tc) {
+ u32 max_sdu = schedule->max_sdu[tc] + VLAN_ETH_HLEN - ETH_FCS_LEN;
+ u16 val;
+
+ if (!schedule->max_sdu[tc])
+ continue;
+
+ dev_dbg(hellcreek->dev, "Configure max-sdu %u for tc %d on port %d\n",
+ max_sdu, tc, port);
+
+ hellcreek_select_port_prio(hellcreek, port, tc);
+
+ val = (max_sdu & HR_PTPRTCCFG_MAXSDU_MASK) << HR_PTPRTCCFG_MAXSDU_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PTPRTCCFG);
+ }
+}
+
+static void hellcreek_reset_maxsdu(struct hellcreek *hellcreek, int port)
+{
+ int tc;
+
+ for (tc = 0; tc < 8; ++tc) {
+ u16 val;
+
+ hellcreek_select_port_prio(hellcreek, port, tc);
+
+ val = (HELLCREEK_DEFAULT_MAX_SDU & HR_PTPRTCCFG_MAXSDU_MASK)
+ << HR_PTPRTCCFG_MAXSDU_SHIFT;
+
+ hellcreek_write(hellcreek, val, HR_PTPRTCCFG);
+ }
+}
+
static void hellcreek_setup_gcl(struct hellcreek *hellcreek, int port,
const struct tc_taprio_qopt_offload *schedule)
{
@@ -1720,7 +1769,10 @@ static int hellcreek_port_set_schedule(struct dsa_switch *ds, int port,
}
hellcreek_port->current_schedule = taprio_offload_get(taprio);
- /* Then select port */
+ /* Configure max sdu */
+ hellcreek_setup_maxsdu(hellcreek, port, hellcreek_port->current_schedule);
+
+ /* Select tdg */
hellcreek_select_tgd(hellcreek, port);
/* Enable gating and keep defaults */
@@ -1772,7 +1824,10 @@ static int hellcreek_port_del_schedule(struct dsa_switch *ds, int port)
hellcreek_port->current_schedule = NULL;
}
- /* Then select port */
+ /* Reset max sdu */
+ hellcreek_reset_maxsdu(hellcreek, port);
+
+ /* Select tgd */
hellcreek_select_tgd(hellcreek, port);
/* Disable gating and return to regular switching flow */
@@ -1809,22 +1864,43 @@ static bool hellcreek_validate_schedule(struct hellcreek *hellcreek,
return true;
}
+static int hellcreek_tc_query_caps(struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ caps->supports_queue_max_sdu = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int hellcreek_port_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type, void *type_data)
{
- struct tc_taprio_qopt_offload *taprio = type_data;
struct hellcreek *hellcreek = ds->priv;
- if (type != TC_SETUP_QDISC_TAPRIO)
+ switch (type) {
+ case TC_QUERY_CAPS:
+ return hellcreek_tc_query_caps(type_data);
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_qopt_offload *taprio = type_data;
+
+ if (!hellcreek_validate_schedule(hellcreek, taprio))
+ return -EOPNOTSUPP;
+
+ if (taprio->enable)
+ return hellcreek_port_set_schedule(ds, port, taprio);
+
+ return hellcreek_port_del_schedule(ds, port);
+ }
+ default:
return -EOPNOTSUPP;
-
- if (!hellcreek_validate_schedule(hellcreek, taprio))
- return -EOPNOTSUPP;
-
- if (taprio->enable)
- return hellcreek_port_set_schedule(ds, port, taprio);
-
- return hellcreek_port_del_schedule(ds, port);
+ }
}
static const struct dsa_switch_ops hellcreek_ds_ops = {
@@ -1996,7 +2072,6 @@ static int hellcreek_remove(struct platform_device *pdev)
hellcreek_hwtstamp_free(hellcreek);
hellcreek_ptp_free(hellcreek);
dsa_unregister_switch(hellcreek->ds);
- platform_set_drvdata(pdev, NULL);
return 0;
}
diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h
index 9e303b8ab13c..4a678f7d61ae 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.h
+++ b/drivers/net/dsa/hirschmann/hellcreek.h
@@ -37,6 +37,7 @@
#define HELLCREEK_VLAN_UNTAGGED_MEMBER 0x1
#define HELLCREEK_VLAN_TAGGED_MEMBER 0x3
#define HELLCREEK_NUM_EGRESS_QUEUES 8
+#define HELLCREEK_DEFAULT_MAX_SDU 1536
/* Register definitions */
#define HR_MODID_C (0 * 2)
@@ -72,6 +73,12 @@
#define HR_PRTCCFG_PCP_TC_MAP_SHIFT 0
#define HR_PRTCCFG_PCP_TC_MAP_MASK GENMASK(2, 0)
+#define HR_PTPRTCCFG (0xa9 * 2)
+#define HR_PTPRTCCFG_SET_QTRACK BIT(15)
+#define HR_PTPRTCCFG_REJECT BIT(14)
+#define HR_PTPRTCCFG_MAXSDU_SHIFT 0
+#define HR_PTPRTCCFG_MAXSDU_MASK GENMASK(10, 0)
+
#define HR_CSEL (0x8d * 2)
#define HR_CSEL_SHIFT 0
#define HR_CSEL_MASK GENMASK(7, 0)
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index e03ff1f267bb..438e46af03e9 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -22,6 +22,10 @@
*/
#define LAN9303_CHIP_REV 0x14
# define LAN9303_CHIP_ID 0x9303
+# define LAN9352_CHIP_ID 0x9352
+# define LAN9353_CHIP_ID 0x9353
+# define LAN9354_CHIP_ID 0x9354
+# define LAN9355_CHIP_ID 0x9355
#define LAN9303_IRQ_CFG 0x15
# define LAN9303_IRQ_CFG_IRQ_ENABLE BIT(8)
# define LAN9303_IRQ_CFG_IRQ_POL BIT(4)
@@ -32,6 +36,7 @@
#define LAN9303_INT_EN 0x17
# define LAN9303_INT_EN_PHY_INT2_EN BIT(27)
# define LAN9303_INT_EN_PHY_INT1_EN BIT(26)
+#define LAN9303_BYTE_ORDER 0x19
#define LAN9303_HW_CFG 0x1D
# define LAN9303_HW_CFG_READY BIT(27)
# define LAN9303_HW_CFG_AMDX_EN_PORT2 BIT(26)
@@ -851,15 +856,12 @@ static int lan9303_check_device(struct lan9303 *chip)
if (ret) {
dev_err(chip->dev, "failed to read chip revision register: %d\n",
ret);
- if (!chip->reset_gpio) {
- dev_dbg(chip->dev,
- "hint: maybe failed due to missing reset GPIO\n");
- }
return ret;
}
- if ((reg >> 16) != LAN9303_CHIP_ID) {
- dev_err(chip->dev, "expecting LAN9303 chip, but found: %X\n",
+ if (((reg >> 16) != LAN9303_CHIP_ID) &&
+ ((reg >> 16) != LAN9354_CHIP_ID)) {
+ dev_err(chip->dev, "unexpected device found: LAN%4.4X\n",
reg >> 16);
return -ENODEV;
}
@@ -875,7 +877,7 @@ static int lan9303_check_device(struct lan9303 *chip)
if (ret)
dev_warn(chip->dev, "failed to disable switching %d\n", ret);
- dev_info(chip->dev, "Found LAN9303 rev. %u\n", reg & 0xffff);
+ dev_info(chip->dev, "Found LAN%4.4X rev. %u\n", (reg >> 16), reg & 0xffff);
ret = lan9303_detect_phy_setup(chip);
if (ret) {
@@ -1090,7 +1092,7 @@ static int lan9303_port_enable(struct dsa_switch *ds, int port,
if (!dsa_port_is_user(dp))
return 0;
- vlan_vid_add(dp->cpu_dp->master, htons(ETH_P_8021Q), port);
+ vlan_vid_add(dsa_port_to_master(dp), htons(ETH_P_8021Q), port);
return lan9303_enable_processing_port(chip, port);
}
@@ -1103,7 +1105,7 @@ static void lan9303_port_disable(struct dsa_switch *ds, int port)
if (!dsa_port_is_user(dp))
return;
- vlan_vid_del(dp->cpu_dp->master, htons(ETH_P_8021Q), port);
+ vlan_vid_del(dsa_port_to_master(dp), htons(ETH_P_8021Q), port);
lan9303_disable_processing_port(chip, port);
lan9303_phy_write(ds, chip->phy_addr_base + port, MII_BMCR, BMCR_PDOWN);
@@ -1349,6 +1351,7 @@ static int lan9303_probe_reset_gpio(struct lan9303 *chip,
int lan9303_probe(struct lan9303 *chip, struct device_node *np)
{
int ret;
+ u32 reg;
mutex_init(&chip->indirect_mutex);
mutex_init(&chip->alr_mutex);
@@ -1359,6 +1362,19 @@ int lan9303_probe(struct lan9303 *chip, struct device_node *np)
lan9303_handle_reset(chip);
+ /* First read to the device. This is a Dummy read to ensure MDIO */
+ /* access is in 32-bit sync. */
+ ret = lan9303_read(chip->regmap, LAN9303_BYTE_ORDER, ®);
+ if (ret) {
+ dev_err(chip->dev, "failed to access the device: %d\n",
+ ret);
+ if (!chip->reset_gpio) {
+ dev_dbg(chip->dev,
+ "hint: maybe failed due to missing reset GPIO\n");
+ }
+ return ret;
+ }
+
ret = lan9303_check_device(chip);
if (ret)
return ret;
diff --git a/drivers/net/dsa/lan9303_i2c.c b/drivers/net/dsa/lan9303_i2c.c
index 8ca4713310fa..79be5fc044bd 100644
--- a/drivers/net/dsa/lan9303_i2c.c
+++ b/drivers/net/dsa/lan9303_i2c.c
@@ -74,8 +74,6 @@ static int lan9303_i2c_remove(struct i2c_client *client)
lan9303_remove(&sw_dev->chip);
- i2c_set_clientdata(client, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c
index bbb7032409ba..4f33369a2de5 100644
--- a/drivers/net/dsa/lan9303_mdio.c
+++ b/drivers/net/dsa/lan9303_mdio.c
@@ -138,8 +138,6 @@ static void lan9303_mdio_remove(struct mdio_device *mdiodev)
return;
lan9303_remove(&sw_dev->chip);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void lan9303_mdio_shutdown(struct mdio_device *mdiodev)
@@ -158,6 +156,7 @@ static void lan9303_mdio_shutdown(struct mdio_device *mdiodev)
static const struct of_device_id lan9303_mdio_of_match[] = {
{ .compatible = "smsc,lan9303-mdio" },
+ { .compatible = "microchip,lan9354-mdio" },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, lan9303_mdio_of_match);
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index e531b93f3cb2..05ecaa007ab1 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -1989,11 +1989,9 @@ static int gswip_gphy_fw_probe(struct gswip_priv *priv,
}
gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np);
- if (IS_ERR(gphy_fw->reset)) {
- if (PTR_ERR(gphy_fw->reset) != -EPROBE_DEFER)
- dev_err(dev, "Failed to lookup gphy reset\n");
- return PTR_ERR(gphy_fw->reset);
- }
+ if (IS_ERR(gphy_fw->reset))
+ return dev_err_probe(dev, PTR_ERR(gphy_fw->reset),
+ "Failed to lookup gphy reset\n");
return gswip_gphy_fw_load(priv, gphy_fw);
}
@@ -2231,8 +2229,6 @@ static int gswip_remove(struct platform_device *pdev)
for (i = 0; i < priv->num_gphy_fw; i++)
gswip_gphy_fw_remove(priv, &priv->gphy_fw[i]);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/microchip/ksz8.h b/drivers/net/dsa/microchip/ksz8.h
index 42c50cc4d853..8582b4b67d98 100644
--- a/drivers/net/dsa/microchip/ksz8.h
+++ b/drivers/net/dsa/microchip/ksz8.h
@@ -17,8 +17,8 @@ u32 ksz8_get_port_addr(int port, int offset);
void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member);
void ksz8_flush_dyn_mac_table(struct ksz_device *dev, int port);
void ksz8_port_setup(struct ksz_device *dev, int port, bool cpu_port);
-void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
-void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
+int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
int ksz8_r_dyn_mac_table(struct ksz_device *dev, u16 addr, u8 *mac_addr,
u8 *fid, u8 *src_port, u8 *timestamp, u16 *entries);
int ksz8_r_sta_mac_table(struct ksz_device *dev, u16 addr,
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index c79a5128235f..bd3b133e7085 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -552,7 +552,7 @@ static void ksz8_w_vlan_table(struct ksz_device *dev, u16 vid, u16 vlan)
ksz8_w_table(dev, TABLE_VLAN, addr, buf);
}
-void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
+int ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
{
u8 restart, speed, ctrl, link;
int processed = true;
@@ -560,14 +560,24 @@ void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
u8 val1, val2;
u16 data = 0;
u8 p = phy;
+ int ret;
regs = dev->info->regs;
switch (reg) {
case MII_BMCR:
- ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
- ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
- ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+ ret = ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
if (restart & PORT_PHY_LOOPBACK)
data |= BMCR_LOOPBACK;
if (ctrl & PORT_FORCE_100_MBIT)
@@ -597,7 +607,10 @@ void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
data |= KSZ886X_BMCR_DISABLE_LED;
break;
case MII_BMSR:
- ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ ret = ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ if (ret)
+ return ret;
+
data = BMSR_100FULL |
BMSR_100HALF |
BMSR_10FULL |
@@ -618,7 +631,10 @@ void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
data = KSZ8795_ID_LO;
break;
case MII_ADVERTISE:
- ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
+ ret = ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
data = ADVERTISE_CSMA;
if (ctrl & PORT_AUTO_NEG_SYM_PAUSE)
data |= ADVERTISE_PAUSE_CAP;
@@ -632,7 +648,10 @@ void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
data |= ADVERTISE_10HALF;
break;
case MII_LPA:
- ksz_pread8(dev, p, regs[P_REMOTE_STATUS], &link);
+ ret = ksz_pread8(dev, p, regs[P_REMOTE_STATUS], &link);
+ if (ret)
+ return ret;
+
data = LPA_SLCT;
if (link & PORT_REMOTE_SYM_PAUSE)
data |= LPA_PAUSE_CAP;
@@ -648,8 +667,14 @@ void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
data |= LPA_LPACK;
break;
case PHY_REG_LINK_MD:
- ksz_pread8(dev, p, REG_PORT_LINK_MD_CTRL, &val1);
- ksz_pread8(dev, p, REG_PORT_LINK_MD_RESULT, &val2);
+ ret = ksz_pread8(dev, p, REG_PORT_LINK_MD_CTRL, &val1);
+ if (ret)
+ return ret;
+
+ ret = ksz_pread8(dev, p, REG_PORT_LINK_MD_RESULT, &val2);
+ if (ret)
+ return ret;
+
if (val1 & PORT_START_CABLE_DIAG)
data |= PHY_START_CABLE_DIAG;
@@ -664,7 +689,10 @@ void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
FIELD_GET(PORT_CABLE_FAULT_COUNTER_L, val2));
break;
case PHY_REG_PHY_CTRL:
- ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ ret = ksz_pread8(dev, p, regs[P_LINK_STATUS], &link);
+ if (ret)
+ return ret;
+
if (link & PORT_MDIX_STATUS)
data |= KSZ886X_CTRL_MDIX_STAT;
break;
@@ -674,13 +702,16 @@ void ksz8_r_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 *val)
}
if (processed)
*val = data;
+
+ return 0;
}
-void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
+int ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
{
u8 restart, speed, ctrl, data;
const u16 *regs;
u8 p = phy;
+ int ret;
regs = dev->info->regs;
@@ -690,15 +721,26 @@ void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
/* Do not support PHY reset function. */
if (val & BMCR_RESET)
break;
- ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
+ ret = ksz_pread8(dev, p, regs[P_SPEED_STATUS], &speed);
+ if (ret)
+ return ret;
+
data = speed;
if (val & KSZ886X_BMCR_HP_MDIX)
data |= PORT_HP_MDIX;
else
data &= ~PORT_HP_MDIX;
- if (data != speed)
- ksz_pwrite8(dev, p, regs[P_SPEED_STATUS], data);
- ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+
+ if (data != speed) {
+ ret = ksz_pwrite8(dev, p, regs[P_SPEED_STATUS], data);
+ if (ret)
+ return ret;
+ }
+
+ ret = ksz_pread8(dev, p, regs[P_FORCE_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
data = ctrl;
if (ksz_is_ksz88x3(dev)) {
if ((val & BMCR_ANENABLE))
@@ -724,9 +766,17 @@ void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
data |= PORT_FORCE_FULL_DUPLEX;
else
data &= ~PORT_FORCE_FULL_DUPLEX;
- if (data != ctrl)
- ksz_pwrite8(dev, p, regs[P_FORCE_CTRL], data);
- ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
+
+ if (data != ctrl) {
+ ret = ksz_pwrite8(dev, p, regs[P_FORCE_CTRL], data);
+ if (ret)
+ return ret;
+ }
+
+ ret = ksz_pread8(dev, p, regs[P_NEG_RESTART_CTRL], &restart);
+ if (ret)
+ return ret;
+
data = restart;
if (val & KSZ886X_BMCR_DISABLE_LED)
data |= PORT_LED_OFF;
@@ -756,11 +806,19 @@ void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
data |= PORT_PHY_LOOPBACK;
else
data &= ~PORT_PHY_LOOPBACK;
- if (data != restart)
- ksz_pwrite8(dev, p, regs[P_NEG_RESTART_CTRL], data);
+
+ if (data != restart) {
+ ret = ksz_pwrite8(dev, p, regs[P_NEG_RESTART_CTRL],
+ data);
+ if (ret)
+ return ret;
+ }
break;
case MII_ADVERTISE:
- ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
+ ret = ksz_pread8(dev, p, regs[P_LOCAL_CTRL], &ctrl);
+ if (ret)
+ return ret;
+
data = ctrl;
data &= ~(PORT_AUTO_NEG_SYM_PAUSE |
PORT_AUTO_NEG_100BTX_FD |
@@ -777,8 +835,12 @@ void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
data |= PORT_AUTO_NEG_10BT_FD;
if (val & ADVERTISE_10HALF)
data |= PORT_AUTO_NEG_10BT;
- if (data != ctrl)
- ksz_pwrite8(dev, p, regs[P_LOCAL_CTRL], data);
+
+ if (data != ctrl) {
+ ret = ksz_pwrite8(dev, p, regs[P_LOCAL_CTRL], data);
+ if (ret)
+ return ret;
+ }
break;
case PHY_REG_LINK_MD:
if (val & PHY_START_CABLE_DIAG)
@@ -787,6 +849,8 @@ void ksz8_w_phy(struct ksz_device *dev, u16 phy, u16 reg, u16 val)
default:
break;
}
+
+ return 0;
}
void ksz8_cfg_port_member(struct ksz_device *dev, int port, u8 member)
@@ -1187,7 +1251,6 @@ void ksz8_config_cpu_port(struct dsa_switch *ds)
if (i == dev->phy_port_cnt)
break;
p->on = 1;
- p->phy = 1;
}
for (i = 0; i < dev->phy_port_cnt; i++) {
p = &dev->ports[i];
diff --git a/drivers/net/dsa/microchip/ksz8863_smi.c b/drivers/net/dsa/microchip/ksz8863_smi.c
index 5247fdfb964d..ddb40838181e 100644
--- a/drivers/net/dsa/microchip/ksz8863_smi.c
+++ b/drivers/net/dsa/microchip/ksz8863_smi.c
@@ -180,8 +180,6 @@ static void ksz8863_smi_remove(struct mdio_device *mdiodev)
if (dev)
ksz_switch_remove(dev);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void ksz8863_smi_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c
index e4f446db0ca1..a6a0321a8931 100644
--- a/drivers/net/dsa/microchip/ksz9477.c
+++ b/drivers/net/dsa/microchip/ksz9477.c
@@ -193,6 +193,11 @@ int ksz9477_reset_switch(struct ksz_device *dev)
ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F);
ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32);
+ /* KSZ9893 compatible chips do not support refclk configuration */
+ if (dev->chip_id == KSZ9893_CHIP_ID ||
+ dev->chip_id == KSZ8563_CHIP_ID)
+ return 0;
+
data8 = SW_ENABLE_REFCLKO;
if (dev->synclko_disable)
data8 = 0;
@@ -264,9 +269,20 @@ void ksz9477_port_init_cnt(struct ksz_device *dev, int port)
mutex_unlock(&mib->cnt_mutex);
}
-void ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
+static void ksz9477_r_phy_quirks(struct ksz_device *dev, u16 addr, u16 reg,
+ u16 *data)
+{
+ /* KSZ8563R do not have extended registers but BMSR_ESTATEN and
+ * BMSR_ERCAP bits are set.
+ */
+ if (dev->chip_id == KSZ8563_CHIP_ID && reg == MII_BMSR)
+ *data &= ~(BMSR_ESTATEN | BMSR_ERCAP);
+}
+
+int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
{
u16 val = 0xffff;
+ int ret;
/* No real PHY after this. Simulate the PHY.
* A fixed PHY can be setup in the device tree, but this function is
@@ -274,7 +290,7 @@ void ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
* For RGMII PHY there is no way to access it so the fixed PHY should
* be used. For SGMII PHY the supporting code will be added later.
*/
- if (addr >= dev->phy_port_cnt) {
+ if (!dev->info->internal_phy[addr]) {
struct ksz_port *p = &dev->ports[addr];
switch (reg) {
@@ -307,23 +323,25 @@ void ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
break;
}
} else {
- ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
+ ret = ksz_pread16(dev, addr, 0x100 + (reg << 1), &val);
+ if (ret)
+ return ret;
+
+ ksz9477_r_phy_quirks(dev, addr, reg, &val);
}
*data = val;
+
+ return 0;
}
-void ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
+int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
{
/* No real PHY after this. */
- if (addr >= dev->phy_port_cnt)
- return;
+ if (!dev->info->internal_phy[addr])
+ return 0;
- /* No gigabit support. Do not write to this register. */
- if (!(dev->features & GBIT_SUPPORT) && reg == MII_CTRL1000)
- return;
-
- ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
+ return ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val);
}
void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member)
@@ -869,7 +887,7 @@ static phy_interface_t ksz9477_get_interface(struct ksz_device *dev, int port)
phy_interface_t interface;
bool gbit;
- if (port < dev->phy_port_cnt)
+ if (dev->info->internal_phy[port])
return PHY_INTERFACE_MODE_NA;
gbit = ksz_get_gbit(dev, port);
@@ -914,7 +932,7 @@ static void ksz9477_phy_errata_setup(struct ksz_device *dev, int port)
/* Energy Efficient Ethernet (EEE) feature select must
* be manually disabled (except on KSZ8565 which is 100Mbit)
*/
- if (dev->features & GBIT_SUPPORT)
+ if (dev->info->gbit_capable[port])
ksz9477_port_mmd_write(dev, port, 0x07, 0x3c, 0x0000);
/* Register settings are required to meet data sheet
@@ -941,10 +959,35 @@ void ksz9477_get_caps(struct ksz_device *dev, int port,
config->mac_capabilities = MAC_10 | MAC_100 | MAC_ASYM_PAUSE |
MAC_SYM_PAUSE;
- if (dev->features & GBIT_SUPPORT)
+ if (dev->info->gbit_capable[port])
config->mac_capabilities |= MAC_1000FD;
}
+int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
+{
+ u32 secs = msecs / 1000;
+ u8 value;
+ u8 data;
+ int ret;
+
+ value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
+
+ ret = ksz_write8(dev, REG_SW_LUE_CTRL_3, value);
+ if (ret < 0)
+ return ret;
+
+ data = FIELD_GET(SW_AGE_PERIOD_10_8_M, secs);
+
+ ret = ksz_read8(dev, REG_SW_LUE_CTRL_0, &value);
+ if (ret < 0)
+ return ret;
+
+ value &= ~SW_AGE_CNT_M;
+ value |= FIELD_PREP(SW_AGE_CNT_M, data);
+
+ return ksz_write8(dev, REG_SW_LUE_CTRL_0, value);
+}
+
void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
{
struct dsa_switch *ds = dev->ds;
@@ -976,7 +1019,7 @@ void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
/* enable 802.1p priority */
ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true);
- if (port < dev->phy_port_cnt) {
+ if (dev->info->internal_phy[port]) {
/* do not force flow control */
ksz_port_cfg(dev, port, REG_PORT_CTRL_0,
PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL,
@@ -999,7 +1042,7 @@ void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port)
ksz9477_cfg_port_member(dev, port, member);
/* clear pending interrupts */
- if (port < dev->phy_port_cnt)
+ if (dev->info->internal_phy[port])
ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16);
}
@@ -1051,25 +1094,13 @@ void ksz9477_config_cpu_port(struct dsa_switch *ds)
/* enable cpu port */
ksz9477_port_setup(dev, i, true);
- p->on = 1;
}
}
for (i = 0; i < dev->info->port_cnt; i++) {
if (i == dev->cpu_port)
continue;
- p = &dev->ports[i];
-
ksz_port_stp_state_set(ds, i, BR_STATE_DISABLED);
- p->on = 1;
- if (i < dev->phy_port_cnt)
- p->phy = 1;
- if (dev->chip_id == 0x00947700 && i == 6) {
- p->sgmii = 1;
-
- /* SGMII PHY detection code is not implemented yet. */
- p->phy = 0;
- }
}
}
@@ -1158,29 +1189,6 @@ int ksz9477_switch_init(struct ksz_device *dev)
if (ret)
return ret;
- ret = ksz_read8(dev, REG_GLOBAL_OPTIONS, &data8);
- if (ret)
- return ret;
-
- /* Number of ports can be reduced depending on chip. */
- dev->phy_port_cnt = 5;
-
- /* Default capability is gigabit capable. */
- dev->features = GBIT_SUPPORT;
-
- if (dev->chip_id == KSZ9893_CHIP_ID) {
- dev->features |= IS_9893;
-
- /* Chip does not support gigabit. */
- if (data8 & SW_QW_ABLE)
- dev->features &= ~GBIT_SUPPORT;
- dev->phy_port_cnt = 2;
- } else {
- /* Chip does not support gigabit. */
- if (!(data8 & SW_GIGABIT_ABLE))
- dev->features &= ~GBIT_SUPPORT;
- }
-
return 0;
}
diff --git a/drivers/net/dsa/microchip/ksz9477.h b/drivers/net/dsa/microchip/ksz9477.h
index cd278b307b3c..00862c4cfb7f 100644
--- a/drivers/net/dsa/microchip/ksz9477.h
+++ b/drivers/net/dsa/microchip/ksz9477.h
@@ -16,8 +16,9 @@ u32 ksz9477_get_port_addr(int port, int offset);
void ksz9477_cfg_port_member(struct ksz_device *dev, int port, u8 member);
void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port);
void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port);
-void ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
-void ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
+int ksz9477_set_ageing_time(struct ksz_device *dev, unsigned int msecs);
+int ksz9477_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
+int ksz9477_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
void ksz9477_r_mib_cnt(struct ksz_device *dev, int port, u16 addr, u64 *cnt);
void ksz9477_r_mib_pkt(struct ksz_device *dev, int port, u16 addr,
u64 *dropped, u64 *cnt);
diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c
index 99966514d444..e111756f6473 100644
--- a/drivers/net/dsa/microchip/ksz9477_i2c.c
+++ b/drivers/net/dsa/microchip/ksz9477_i2c.c
@@ -59,8 +59,6 @@ static int ksz9477_i2c_remove(struct i2c_client *i2c)
if (dev)
ksz_switch_remove(dev);
- i2c_set_clientdata(i2c, NULL);
-
return 0;
}
@@ -91,6 +89,10 @@ static const struct of_device_id ksz9477_dt_ids[] = {
.compatible = "microchip,ksz9477",
.data = &ksz_switch_chips[KSZ9477]
},
+ {
+ .compatible = "microchip,ksz9896",
+ .data = &ksz_switch_chips[KSZ9896]
+ },
{
.compatible = "microchip,ksz9897",
.data = &ksz_switch_chips[KSZ9897]
diff --git a/drivers/net/dsa/microchip/ksz9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h
index ddf99d1e4bbd..53c68d286dd3 100644
--- a/drivers/net/dsa/microchip/ksz9477_reg.h
+++ b/drivers/net/dsa/microchip/ksz9477_reg.h
@@ -189,8 +189,9 @@
#define SW_VLAN_ENABLE BIT(7)
#define SW_DROP_INVALID_VID BIT(6)
-#define SW_AGE_CNT_M 0x7
+#define SW_AGE_CNT_M GENMASK(5, 3)
#define SW_AGE_CNT_S 3
+#define SW_AGE_PERIOD_10_8_M GENMASK(10, 8)
#define SW_RESV_MCAST_ENABLE BIT(2)
#define SW_HASH_OPTION_M 0x03
#define SW_HASH_OPTION_CRC 1
@@ -225,6 +226,7 @@
#define SW_PRIO_LOWEST_DA_SA 3
#define REG_SW_LUE_CTRL_3 0x0313
+#define SW_AGE_PERIOD_7_0_M GENMASK(7, 0)
#define REG_SW_LUE_INT_STATUS 0x0314
#define REG_SW_LUE_INT_ENABLE 0x0315
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 872aba63e7d4..d612181b3226 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -14,6 +14,9 @@
#include
#include
#include
+#include
+#include
+#include
#include
#include
#include
@@ -183,6 +186,7 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
.cfg_port_member = ksz9477_cfg_port_member,
.flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
.port_setup = ksz9477_port_setup,
+ .set_ageing_time = ksz9477_set_ageing_time,
.r_phy = ksz9477_r_phy,
.w_phy = ksz9477_w_phy,
.r_mib_cnt = ksz9477_r_mib_cnt,
@@ -213,10 +217,12 @@ static const struct ksz_dev_ops ksz9477_dev_ops = {
static const struct ksz_dev_ops lan937x_dev_ops = {
.setup = lan937x_setup,
+ .teardown = lan937x_teardown,
.get_port_addr = ksz9477_get_port_addr,
.cfg_port_member = ksz9477_cfg_port_member,
.flush_dyn_mac_table = ksz9477_flush_dyn_mac_table,
.port_setup = lan937x_port_setup,
+ .set_ageing_time = lan937x_set_ageing_time,
.r_phy = lan937x_r_phy,
.w_phy = lan937x_w_phy,
.r_mib_cnt = ksz9477_r_mib_cnt,
@@ -421,7 +427,636 @@ static const u8 lan937x_shifts[] = {
[ALU_STAT_INDEX] = 8,
};
+static const struct regmap_range ksz8563_valid_regs[] = {
+ regmap_reg_range(0x0000, 0x0003),
+ regmap_reg_range(0x0006, 0x0006),
+ regmap_reg_range(0x000f, 0x001f),
+ regmap_reg_range(0x0100, 0x0100),
+ regmap_reg_range(0x0104, 0x0107),
+ regmap_reg_range(0x010d, 0x010d),
+ regmap_reg_range(0x0110, 0x0113),
+ regmap_reg_range(0x0120, 0x012b),
+ regmap_reg_range(0x0201, 0x0201),
+ regmap_reg_range(0x0210, 0x0213),
+ regmap_reg_range(0x0300, 0x0300),
+ regmap_reg_range(0x0302, 0x031b),
+ regmap_reg_range(0x0320, 0x032b),
+ regmap_reg_range(0x0330, 0x0336),
+ regmap_reg_range(0x0338, 0x033e),
+ regmap_reg_range(0x0340, 0x035f),
+ regmap_reg_range(0x0370, 0x0370),
+ regmap_reg_range(0x0378, 0x0378),
+ regmap_reg_range(0x037c, 0x037d),
+ regmap_reg_range(0x0390, 0x0393),
+ regmap_reg_range(0x0400, 0x040e),
+ regmap_reg_range(0x0410, 0x042f),
+ regmap_reg_range(0x0500, 0x0519),
+ regmap_reg_range(0x0520, 0x054b),
+ regmap_reg_range(0x0550, 0x05b3),
+
+ /* port 1 */
+ regmap_reg_range(0x1000, 0x1001),
+ regmap_reg_range(0x1004, 0x100b),
+ regmap_reg_range(0x1013, 0x1013),
+ regmap_reg_range(0x1017, 0x1017),
+ regmap_reg_range(0x101b, 0x101b),
+ regmap_reg_range(0x101f, 0x1021),
+ regmap_reg_range(0x1030, 0x1030),
+ regmap_reg_range(0x1100, 0x1111),
+ regmap_reg_range(0x111a, 0x111d),
+ regmap_reg_range(0x1122, 0x1127),
+ regmap_reg_range(0x112a, 0x112b),
+ regmap_reg_range(0x1136, 0x1139),
+ regmap_reg_range(0x113e, 0x113f),
+ regmap_reg_range(0x1400, 0x1401),
+ regmap_reg_range(0x1403, 0x1403),
+ regmap_reg_range(0x1410, 0x1417),
+ regmap_reg_range(0x1420, 0x1423),
+ regmap_reg_range(0x1500, 0x1507),
+ regmap_reg_range(0x1600, 0x1612),
+ regmap_reg_range(0x1800, 0x180f),
+ regmap_reg_range(0x1900, 0x1907),
+ regmap_reg_range(0x1914, 0x191b),
+ regmap_reg_range(0x1a00, 0x1a03),
+ regmap_reg_range(0x1a04, 0x1a08),
+ regmap_reg_range(0x1b00, 0x1b01),
+ regmap_reg_range(0x1b04, 0x1b04),
+ regmap_reg_range(0x1c00, 0x1c05),
+ regmap_reg_range(0x1c08, 0x1c1b),
+
+ /* port 2 */
+ regmap_reg_range(0x2000, 0x2001),
+ regmap_reg_range(0x2004, 0x200b),
+ regmap_reg_range(0x2013, 0x2013),
+ regmap_reg_range(0x2017, 0x2017),
+ regmap_reg_range(0x201b, 0x201b),
+ regmap_reg_range(0x201f, 0x2021),
+ regmap_reg_range(0x2030, 0x2030),
+ regmap_reg_range(0x2100, 0x2111),
+ regmap_reg_range(0x211a, 0x211d),
+ regmap_reg_range(0x2122, 0x2127),
+ regmap_reg_range(0x212a, 0x212b),
+ regmap_reg_range(0x2136, 0x2139),
+ regmap_reg_range(0x213e, 0x213f),
+ regmap_reg_range(0x2400, 0x2401),
+ regmap_reg_range(0x2403, 0x2403),
+ regmap_reg_range(0x2410, 0x2417),
+ regmap_reg_range(0x2420, 0x2423),
+ regmap_reg_range(0x2500, 0x2507),
+ regmap_reg_range(0x2600, 0x2612),
+ regmap_reg_range(0x2800, 0x280f),
+ regmap_reg_range(0x2900, 0x2907),
+ regmap_reg_range(0x2914, 0x291b),
+ regmap_reg_range(0x2a00, 0x2a03),
+ regmap_reg_range(0x2a04, 0x2a08),
+ regmap_reg_range(0x2b00, 0x2b01),
+ regmap_reg_range(0x2b04, 0x2b04),
+ regmap_reg_range(0x2c00, 0x2c05),
+ regmap_reg_range(0x2c08, 0x2c1b),
+
+ /* port 3 */
+ regmap_reg_range(0x3000, 0x3001),
+ regmap_reg_range(0x3004, 0x300b),
+ regmap_reg_range(0x3013, 0x3013),
+ regmap_reg_range(0x3017, 0x3017),
+ regmap_reg_range(0x301b, 0x301b),
+ regmap_reg_range(0x301f, 0x3021),
+ regmap_reg_range(0x3030, 0x3030),
+ regmap_reg_range(0x3300, 0x3301),
+ regmap_reg_range(0x3303, 0x3303),
+ regmap_reg_range(0x3400, 0x3401),
+ regmap_reg_range(0x3403, 0x3403),
+ regmap_reg_range(0x3410, 0x3417),
+ regmap_reg_range(0x3420, 0x3423),
+ regmap_reg_range(0x3500, 0x3507),
+ regmap_reg_range(0x3600, 0x3612),
+ regmap_reg_range(0x3800, 0x380f),
+ regmap_reg_range(0x3900, 0x3907),
+ regmap_reg_range(0x3914, 0x391b),
+ regmap_reg_range(0x3a00, 0x3a03),
+ regmap_reg_range(0x3a04, 0x3a08),
+ regmap_reg_range(0x3b00, 0x3b01),
+ regmap_reg_range(0x3b04, 0x3b04),
+ regmap_reg_range(0x3c00, 0x3c05),
+ regmap_reg_range(0x3c08, 0x3c1b),
+};
+
+static const struct regmap_access_table ksz8563_register_set = {
+ .yes_ranges = ksz8563_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ksz8563_valid_regs),
+};
+
+static const struct regmap_range ksz9477_valid_regs[] = {
+ regmap_reg_range(0x0000, 0x0003),
+ regmap_reg_range(0x0006, 0x0006),
+ regmap_reg_range(0x0010, 0x001f),
+ regmap_reg_range(0x0100, 0x0100),
+ regmap_reg_range(0x0103, 0x0107),
+ regmap_reg_range(0x010d, 0x010d),
+ regmap_reg_range(0x0110, 0x0113),
+ regmap_reg_range(0x0120, 0x012b),
+ regmap_reg_range(0x0201, 0x0201),
+ regmap_reg_range(0x0210, 0x0213),
+ regmap_reg_range(0x0300, 0x0300),
+ regmap_reg_range(0x0302, 0x031b),
+ regmap_reg_range(0x0320, 0x032b),
+ regmap_reg_range(0x0330, 0x0336),
+ regmap_reg_range(0x0338, 0x033b),
+ regmap_reg_range(0x033e, 0x033e),
+ regmap_reg_range(0x0340, 0x035f),
+ regmap_reg_range(0x0370, 0x0370),
+ regmap_reg_range(0x0378, 0x0378),
+ regmap_reg_range(0x037c, 0x037d),
+ regmap_reg_range(0x0390, 0x0393),
+ regmap_reg_range(0x0400, 0x040e),
+ regmap_reg_range(0x0410, 0x042f),
+ regmap_reg_range(0x0444, 0x044b),
+ regmap_reg_range(0x0450, 0x046f),
+ regmap_reg_range(0x0500, 0x0519),
+ regmap_reg_range(0x0520, 0x054b),
+ regmap_reg_range(0x0550, 0x05b3),
+ regmap_reg_range(0x0604, 0x060b),
+ regmap_reg_range(0x0610, 0x0612),
+ regmap_reg_range(0x0614, 0x062c),
+ regmap_reg_range(0x0640, 0x0645),
+ regmap_reg_range(0x0648, 0x064d),
+
+ /* port 1 */
+ regmap_reg_range(0x1000, 0x1001),
+ regmap_reg_range(0x1013, 0x1013),
+ regmap_reg_range(0x1017, 0x1017),
+ regmap_reg_range(0x101b, 0x101b),
+ regmap_reg_range(0x101f, 0x1020),
+ regmap_reg_range(0x1030, 0x1030),
+ regmap_reg_range(0x1100, 0x1115),
+ regmap_reg_range(0x111a, 0x111f),
+ regmap_reg_range(0x1122, 0x1127),
+ regmap_reg_range(0x112a, 0x112b),
+ regmap_reg_range(0x1136, 0x1139),
+ regmap_reg_range(0x113e, 0x113f),
+ regmap_reg_range(0x1400, 0x1401),
+ regmap_reg_range(0x1403, 0x1403),
+ regmap_reg_range(0x1410, 0x1417),
+ regmap_reg_range(0x1420, 0x1423),
+ regmap_reg_range(0x1500, 0x1507),
+ regmap_reg_range(0x1600, 0x1613),
+ regmap_reg_range(0x1800, 0x180f),
+ regmap_reg_range(0x1820, 0x1827),
+ regmap_reg_range(0x1830, 0x1837),
+ regmap_reg_range(0x1840, 0x184b),
+ regmap_reg_range(0x1900, 0x1907),
+ regmap_reg_range(0x1914, 0x191b),
+ regmap_reg_range(0x1920, 0x1920),
+ regmap_reg_range(0x1923, 0x1927),
+ regmap_reg_range(0x1a00, 0x1a03),
+ regmap_reg_range(0x1a04, 0x1a07),
+ regmap_reg_range(0x1b00, 0x1b01),
+ regmap_reg_range(0x1b04, 0x1b04),
+ regmap_reg_range(0x1c00, 0x1c05),
+ regmap_reg_range(0x1c08, 0x1c1b),
+
+ /* port 2 */
+ regmap_reg_range(0x2000, 0x2001),
+ regmap_reg_range(0x2013, 0x2013),
+ regmap_reg_range(0x2017, 0x2017),
+ regmap_reg_range(0x201b, 0x201b),
+ regmap_reg_range(0x201f, 0x2020),
+ regmap_reg_range(0x2030, 0x2030),
+ regmap_reg_range(0x2100, 0x2115),
+ regmap_reg_range(0x211a, 0x211f),
+ regmap_reg_range(0x2122, 0x2127),
+ regmap_reg_range(0x212a, 0x212b),
+ regmap_reg_range(0x2136, 0x2139),
+ regmap_reg_range(0x213e, 0x213f),
+ regmap_reg_range(0x2400, 0x2401),
+ regmap_reg_range(0x2403, 0x2403),
+ regmap_reg_range(0x2410, 0x2417),
+ regmap_reg_range(0x2420, 0x2423),
+ regmap_reg_range(0x2500, 0x2507),
+ regmap_reg_range(0x2600, 0x2613),
+ regmap_reg_range(0x2800, 0x280f),
+ regmap_reg_range(0x2820, 0x2827),
+ regmap_reg_range(0x2830, 0x2837),
+ regmap_reg_range(0x2840, 0x284b),
+ regmap_reg_range(0x2900, 0x2907),
+ regmap_reg_range(0x2914, 0x291b),
+ regmap_reg_range(0x2920, 0x2920),
+ regmap_reg_range(0x2923, 0x2927),
+ regmap_reg_range(0x2a00, 0x2a03),
+ regmap_reg_range(0x2a04, 0x2a07),
+ regmap_reg_range(0x2b00, 0x2b01),
+ regmap_reg_range(0x2b04, 0x2b04),
+ regmap_reg_range(0x2c00, 0x2c05),
+ regmap_reg_range(0x2c08, 0x2c1b),
+
+ /* port 3 */
+ regmap_reg_range(0x3000, 0x3001),
+ regmap_reg_range(0x3013, 0x3013),
+ regmap_reg_range(0x3017, 0x3017),
+ regmap_reg_range(0x301b, 0x301b),
+ regmap_reg_range(0x301f, 0x3020),
+ regmap_reg_range(0x3030, 0x3030),
+ regmap_reg_range(0x3100, 0x3115),
+ regmap_reg_range(0x311a, 0x311f),
+ regmap_reg_range(0x3122, 0x3127),
+ regmap_reg_range(0x312a, 0x312b),
+ regmap_reg_range(0x3136, 0x3139),
+ regmap_reg_range(0x313e, 0x313f),
+ regmap_reg_range(0x3400, 0x3401),
+ regmap_reg_range(0x3403, 0x3403),
+ regmap_reg_range(0x3410, 0x3417),
+ regmap_reg_range(0x3420, 0x3423),
+ regmap_reg_range(0x3500, 0x3507),
+ regmap_reg_range(0x3600, 0x3613),
+ regmap_reg_range(0x3800, 0x380f),
+ regmap_reg_range(0x3820, 0x3827),
+ regmap_reg_range(0x3830, 0x3837),
+ regmap_reg_range(0x3840, 0x384b),
+ regmap_reg_range(0x3900, 0x3907),
+ regmap_reg_range(0x3914, 0x391b),
+ regmap_reg_range(0x3920, 0x3920),
+ regmap_reg_range(0x3923, 0x3927),
+ regmap_reg_range(0x3a00, 0x3a03),
+ regmap_reg_range(0x3a04, 0x3a07),
+ regmap_reg_range(0x3b00, 0x3b01),
+ regmap_reg_range(0x3b04, 0x3b04),
+ regmap_reg_range(0x3c00, 0x3c05),
+ regmap_reg_range(0x3c08, 0x3c1b),
+
+ /* port 4 */
+ regmap_reg_range(0x4000, 0x4001),
+ regmap_reg_range(0x4013, 0x4013),
+ regmap_reg_range(0x4017, 0x4017),
+ regmap_reg_range(0x401b, 0x401b),
+ regmap_reg_range(0x401f, 0x4020),
+ regmap_reg_range(0x4030, 0x4030),
+ regmap_reg_range(0x4100, 0x4115),
+ regmap_reg_range(0x411a, 0x411f),
+ regmap_reg_range(0x4122, 0x4127),
+ regmap_reg_range(0x412a, 0x412b),
+ regmap_reg_range(0x4136, 0x4139),
+ regmap_reg_range(0x413e, 0x413f),
+ regmap_reg_range(0x4400, 0x4401),
+ regmap_reg_range(0x4403, 0x4403),
+ regmap_reg_range(0x4410, 0x4417),
+ regmap_reg_range(0x4420, 0x4423),
+ regmap_reg_range(0x4500, 0x4507),
+ regmap_reg_range(0x4600, 0x4613),
+ regmap_reg_range(0x4800, 0x480f),
+ regmap_reg_range(0x4820, 0x4827),
+ regmap_reg_range(0x4830, 0x4837),
+ regmap_reg_range(0x4840, 0x484b),
+ regmap_reg_range(0x4900, 0x4907),
+ regmap_reg_range(0x4914, 0x491b),
+ regmap_reg_range(0x4920, 0x4920),
+ regmap_reg_range(0x4923, 0x4927),
+ regmap_reg_range(0x4a00, 0x4a03),
+ regmap_reg_range(0x4a04, 0x4a07),
+ regmap_reg_range(0x4b00, 0x4b01),
+ regmap_reg_range(0x4b04, 0x4b04),
+ regmap_reg_range(0x4c00, 0x4c05),
+ regmap_reg_range(0x4c08, 0x4c1b),
+
+ /* port 5 */
+ regmap_reg_range(0x5000, 0x5001),
+ regmap_reg_range(0x5013, 0x5013),
+ regmap_reg_range(0x5017, 0x5017),
+ regmap_reg_range(0x501b, 0x501b),
+ regmap_reg_range(0x501f, 0x5020),
+ regmap_reg_range(0x5030, 0x5030),
+ regmap_reg_range(0x5100, 0x5115),
+ regmap_reg_range(0x511a, 0x511f),
+ regmap_reg_range(0x5122, 0x5127),
+ regmap_reg_range(0x512a, 0x512b),
+ regmap_reg_range(0x5136, 0x5139),
+ regmap_reg_range(0x513e, 0x513f),
+ regmap_reg_range(0x5400, 0x5401),
+ regmap_reg_range(0x5403, 0x5403),
+ regmap_reg_range(0x5410, 0x5417),
+ regmap_reg_range(0x5420, 0x5423),
+ regmap_reg_range(0x5500, 0x5507),
+ regmap_reg_range(0x5600, 0x5613),
+ regmap_reg_range(0x5800, 0x580f),
+ regmap_reg_range(0x5820, 0x5827),
+ regmap_reg_range(0x5830, 0x5837),
+ regmap_reg_range(0x5840, 0x584b),
+ regmap_reg_range(0x5900, 0x5907),
+ regmap_reg_range(0x5914, 0x591b),
+ regmap_reg_range(0x5920, 0x5920),
+ regmap_reg_range(0x5923, 0x5927),
+ regmap_reg_range(0x5a00, 0x5a03),
+ regmap_reg_range(0x5a04, 0x5a07),
+ regmap_reg_range(0x5b00, 0x5b01),
+ regmap_reg_range(0x5b04, 0x5b04),
+ regmap_reg_range(0x5c00, 0x5c05),
+ regmap_reg_range(0x5c08, 0x5c1b),
+
+ /* port 6 */
+ regmap_reg_range(0x6000, 0x6001),
+ regmap_reg_range(0x6013, 0x6013),
+ regmap_reg_range(0x6017, 0x6017),
+ regmap_reg_range(0x601b, 0x601b),
+ regmap_reg_range(0x601f, 0x6020),
+ regmap_reg_range(0x6030, 0x6030),
+ regmap_reg_range(0x6300, 0x6301),
+ regmap_reg_range(0x6400, 0x6401),
+ regmap_reg_range(0x6403, 0x6403),
+ regmap_reg_range(0x6410, 0x6417),
+ regmap_reg_range(0x6420, 0x6423),
+ regmap_reg_range(0x6500, 0x6507),
+ regmap_reg_range(0x6600, 0x6613),
+ regmap_reg_range(0x6800, 0x680f),
+ regmap_reg_range(0x6820, 0x6827),
+ regmap_reg_range(0x6830, 0x6837),
+ regmap_reg_range(0x6840, 0x684b),
+ regmap_reg_range(0x6900, 0x6907),
+ regmap_reg_range(0x6914, 0x691b),
+ regmap_reg_range(0x6920, 0x6920),
+ regmap_reg_range(0x6923, 0x6927),
+ regmap_reg_range(0x6a00, 0x6a03),
+ regmap_reg_range(0x6a04, 0x6a07),
+ regmap_reg_range(0x6b00, 0x6b01),
+ regmap_reg_range(0x6b04, 0x6b04),
+ regmap_reg_range(0x6c00, 0x6c05),
+ regmap_reg_range(0x6c08, 0x6c1b),
+
+ /* port 7 */
+ regmap_reg_range(0x7000, 0x7001),
+ regmap_reg_range(0x7013, 0x7013),
+ regmap_reg_range(0x7017, 0x7017),
+ regmap_reg_range(0x701b, 0x701b),
+ regmap_reg_range(0x701f, 0x7020),
+ regmap_reg_range(0x7030, 0x7030),
+ regmap_reg_range(0x7200, 0x7203),
+ regmap_reg_range(0x7206, 0x7207),
+ regmap_reg_range(0x7300, 0x7301),
+ regmap_reg_range(0x7400, 0x7401),
+ regmap_reg_range(0x7403, 0x7403),
+ regmap_reg_range(0x7410, 0x7417),
+ regmap_reg_range(0x7420, 0x7423),
+ regmap_reg_range(0x7500, 0x7507),
+ regmap_reg_range(0x7600, 0x7613),
+ regmap_reg_range(0x7800, 0x780f),
+ regmap_reg_range(0x7820, 0x7827),
+ regmap_reg_range(0x7830, 0x7837),
+ regmap_reg_range(0x7840, 0x784b),
+ regmap_reg_range(0x7900, 0x7907),
+ regmap_reg_range(0x7914, 0x791b),
+ regmap_reg_range(0x7920, 0x7920),
+ regmap_reg_range(0x7923, 0x7927),
+ regmap_reg_range(0x7a00, 0x7a03),
+ regmap_reg_range(0x7a04, 0x7a07),
+ regmap_reg_range(0x7b00, 0x7b01),
+ regmap_reg_range(0x7b04, 0x7b04),
+ regmap_reg_range(0x7c00, 0x7c05),
+ regmap_reg_range(0x7c08, 0x7c1b),
+};
+
+static const struct regmap_access_table ksz9477_register_set = {
+ .yes_ranges = ksz9477_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ksz9477_valid_regs),
+};
+
+static const struct regmap_range ksz9896_valid_regs[] = {
+ regmap_reg_range(0x0000, 0x0003),
+ regmap_reg_range(0x0006, 0x0006),
+ regmap_reg_range(0x0010, 0x001f),
+ regmap_reg_range(0x0100, 0x0100),
+ regmap_reg_range(0x0103, 0x0107),
+ regmap_reg_range(0x010d, 0x010d),
+ regmap_reg_range(0x0110, 0x0113),
+ regmap_reg_range(0x0120, 0x0127),
+ regmap_reg_range(0x0201, 0x0201),
+ regmap_reg_range(0x0210, 0x0213),
+ regmap_reg_range(0x0300, 0x0300),
+ regmap_reg_range(0x0302, 0x030b),
+ regmap_reg_range(0x0310, 0x031b),
+ regmap_reg_range(0x0320, 0x032b),
+ regmap_reg_range(0x0330, 0x0336),
+ regmap_reg_range(0x0338, 0x033b),
+ regmap_reg_range(0x033e, 0x033e),
+ regmap_reg_range(0x0340, 0x035f),
+ regmap_reg_range(0x0370, 0x0370),
+ regmap_reg_range(0x0378, 0x0378),
+ regmap_reg_range(0x037c, 0x037d),
+ regmap_reg_range(0x0390, 0x0393),
+ regmap_reg_range(0x0400, 0x040e),
+ regmap_reg_range(0x0410, 0x042f),
+
+ /* port 1 */
+ regmap_reg_range(0x1000, 0x1001),
+ regmap_reg_range(0x1013, 0x1013),
+ regmap_reg_range(0x1017, 0x1017),
+ regmap_reg_range(0x101b, 0x101b),
+ regmap_reg_range(0x101f, 0x1020),
+ regmap_reg_range(0x1030, 0x1030),
+ regmap_reg_range(0x1100, 0x1115),
+ regmap_reg_range(0x111a, 0x111f),
+ regmap_reg_range(0x1122, 0x1127),
+ regmap_reg_range(0x112a, 0x112b),
+ regmap_reg_range(0x1136, 0x1139),
+ regmap_reg_range(0x113e, 0x113f),
+ regmap_reg_range(0x1400, 0x1401),
+ regmap_reg_range(0x1403, 0x1403),
+ regmap_reg_range(0x1410, 0x1417),
+ regmap_reg_range(0x1420, 0x1423),
+ regmap_reg_range(0x1500, 0x1507),
+ regmap_reg_range(0x1600, 0x1612),
+ regmap_reg_range(0x1800, 0x180f),
+ regmap_reg_range(0x1820, 0x1827),
+ regmap_reg_range(0x1830, 0x1837),
+ regmap_reg_range(0x1840, 0x184b),
+ regmap_reg_range(0x1900, 0x1907),
+ regmap_reg_range(0x1914, 0x1915),
+ regmap_reg_range(0x1a00, 0x1a03),
+ regmap_reg_range(0x1a04, 0x1a07),
+ regmap_reg_range(0x1b00, 0x1b01),
+ regmap_reg_range(0x1b04, 0x1b04),
+
+ /* port 2 */
+ regmap_reg_range(0x2000, 0x2001),
+ regmap_reg_range(0x2013, 0x2013),
+ regmap_reg_range(0x2017, 0x2017),
+ regmap_reg_range(0x201b, 0x201b),
+ regmap_reg_range(0x201f, 0x2020),
+ regmap_reg_range(0x2030, 0x2030),
+ regmap_reg_range(0x2100, 0x2115),
+ regmap_reg_range(0x211a, 0x211f),
+ regmap_reg_range(0x2122, 0x2127),
+ regmap_reg_range(0x212a, 0x212b),
+ regmap_reg_range(0x2136, 0x2139),
+ regmap_reg_range(0x213e, 0x213f),
+ regmap_reg_range(0x2400, 0x2401),
+ regmap_reg_range(0x2403, 0x2403),
+ regmap_reg_range(0x2410, 0x2417),
+ regmap_reg_range(0x2420, 0x2423),
+ regmap_reg_range(0x2500, 0x2507),
+ regmap_reg_range(0x2600, 0x2612),
+ regmap_reg_range(0x2800, 0x280f),
+ regmap_reg_range(0x2820, 0x2827),
+ regmap_reg_range(0x2830, 0x2837),
+ regmap_reg_range(0x2840, 0x284b),
+ regmap_reg_range(0x2900, 0x2907),
+ regmap_reg_range(0x2914, 0x2915),
+ regmap_reg_range(0x2a00, 0x2a03),
+ regmap_reg_range(0x2a04, 0x2a07),
+ regmap_reg_range(0x2b00, 0x2b01),
+ regmap_reg_range(0x2b04, 0x2b04),
+
+ /* port 3 */
+ regmap_reg_range(0x3000, 0x3001),
+ regmap_reg_range(0x3013, 0x3013),
+ regmap_reg_range(0x3017, 0x3017),
+ regmap_reg_range(0x301b, 0x301b),
+ regmap_reg_range(0x301f, 0x3020),
+ regmap_reg_range(0x3030, 0x3030),
+ regmap_reg_range(0x3100, 0x3115),
+ regmap_reg_range(0x311a, 0x311f),
+ regmap_reg_range(0x3122, 0x3127),
+ regmap_reg_range(0x312a, 0x312b),
+ regmap_reg_range(0x3136, 0x3139),
+ regmap_reg_range(0x313e, 0x313f),
+ regmap_reg_range(0x3400, 0x3401),
+ regmap_reg_range(0x3403, 0x3403),
+ regmap_reg_range(0x3410, 0x3417),
+ regmap_reg_range(0x3420, 0x3423),
+ regmap_reg_range(0x3500, 0x3507),
+ regmap_reg_range(0x3600, 0x3612),
+ regmap_reg_range(0x3800, 0x380f),
+ regmap_reg_range(0x3820, 0x3827),
+ regmap_reg_range(0x3830, 0x3837),
+ regmap_reg_range(0x3840, 0x384b),
+ regmap_reg_range(0x3900, 0x3907),
+ regmap_reg_range(0x3914, 0x3915),
+ regmap_reg_range(0x3a00, 0x3a03),
+ regmap_reg_range(0x3a04, 0x3a07),
+ regmap_reg_range(0x3b00, 0x3b01),
+ regmap_reg_range(0x3b04, 0x3b04),
+
+ /* port 4 */
+ regmap_reg_range(0x4000, 0x4001),
+ regmap_reg_range(0x4013, 0x4013),
+ regmap_reg_range(0x4017, 0x4017),
+ regmap_reg_range(0x401b, 0x401b),
+ regmap_reg_range(0x401f, 0x4020),
+ regmap_reg_range(0x4030, 0x4030),
+ regmap_reg_range(0x4100, 0x4115),
+ regmap_reg_range(0x411a, 0x411f),
+ regmap_reg_range(0x4122, 0x4127),
+ regmap_reg_range(0x412a, 0x412b),
+ regmap_reg_range(0x4136, 0x4139),
+ regmap_reg_range(0x413e, 0x413f),
+ regmap_reg_range(0x4400, 0x4401),
+ regmap_reg_range(0x4403, 0x4403),
+ regmap_reg_range(0x4410, 0x4417),
+ regmap_reg_range(0x4420, 0x4423),
+ regmap_reg_range(0x4500, 0x4507),
+ regmap_reg_range(0x4600, 0x4612),
+ regmap_reg_range(0x4800, 0x480f),
+ regmap_reg_range(0x4820, 0x4827),
+ regmap_reg_range(0x4830, 0x4837),
+ regmap_reg_range(0x4840, 0x484b),
+ regmap_reg_range(0x4900, 0x4907),
+ regmap_reg_range(0x4914, 0x4915),
+ regmap_reg_range(0x4a00, 0x4a03),
+ regmap_reg_range(0x4a04, 0x4a07),
+ regmap_reg_range(0x4b00, 0x4b01),
+ regmap_reg_range(0x4b04, 0x4b04),
+
+ /* port 5 */
+ regmap_reg_range(0x5000, 0x5001),
+ regmap_reg_range(0x5013, 0x5013),
+ regmap_reg_range(0x5017, 0x5017),
+ regmap_reg_range(0x501b, 0x501b),
+ regmap_reg_range(0x501f, 0x5020),
+ regmap_reg_range(0x5030, 0x5030),
+ regmap_reg_range(0x5100, 0x5115),
+ regmap_reg_range(0x511a, 0x511f),
+ regmap_reg_range(0x5122, 0x5127),
+ regmap_reg_range(0x512a, 0x512b),
+ regmap_reg_range(0x5136, 0x5139),
+ regmap_reg_range(0x513e, 0x513f),
+ regmap_reg_range(0x5400, 0x5401),
+ regmap_reg_range(0x5403, 0x5403),
+ regmap_reg_range(0x5410, 0x5417),
+ regmap_reg_range(0x5420, 0x5423),
+ regmap_reg_range(0x5500, 0x5507),
+ regmap_reg_range(0x5600, 0x5612),
+ regmap_reg_range(0x5800, 0x580f),
+ regmap_reg_range(0x5820, 0x5827),
+ regmap_reg_range(0x5830, 0x5837),
+ regmap_reg_range(0x5840, 0x584b),
+ regmap_reg_range(0x5900, 0x5907),
+ regmap_reg_range(0x5914, 0x5915),
+ regmap_reg_range(0x5a00, 0x5a03),
+ regmap_reg_range(0x5a04, 0x5a07),
+ regmap_reg_range(0x5b00, 0x5b01),
+ regmap_reg_range(0x5b04, 0x5b04),
+
+ /* port 6 */
+ regmap_reg_range(0x6000, 0x6001),
+ regmap_reg_range(0x6013, 0x6013),
+ regmap_reg_range(0x6017, 0x6017),
+ regmap_reg_range(0x601b, 0x601b),
+ regmap_reg_range(0x601f, 0x6020),
+ regmap_reg_range(0x6030, 0x6030),
+ regmap_reg_range(0x6100, 0x6115),
+ regmap_reg_range(0x611a, 0x611f),
+ regmap_reg_range(0x6122, 0x6127),
+ regmap_reg_range(0x612a, 0x612b),
+ regmap_reg_range(0x6136, 0x6139),
+ regmap_reg_range(0x613e, 0x613f),
+ regmap_reg_range(0x6300, 0x6301),
+ regmap_reg_range(0x6400, 0x6401),
+ regmap_reg_range(0x6403, 0x6403),
+ regmap_reg_range(0x6410, 0x6417),
+ regmap_reg_range(0x6420, 0x6423),
+ regmap_reg_range(0x6500, 0x6507),
+ regmap_reg_range(0x6600, 0x6612),
+ regmap_reg_range(0x6800, 0x680f),
+ regmap_reg_range(0x6820, 0x6827),
+ regmap_reg_range(0x6830, 0x6837),
+ regmap_reg_range(0x6840, 0x684b),
+ regmap_reg_range(0x6900, 0x6907),
+ regmap_reg_range(0x6914, 0x6915),
+ regmap_reg_range(0x6a00, 0x6a03),
+ regmap_reg_range(0x6a04, 0x6a07),
+ regmap_reg_range(0x6b00, 0x6b01),
+ regmap_reg_range(0x6b04, 0x6b04),
+};
+
+static const struct regmap_access_table ksz9896_register_set = {
+ .yes_ranges = ksz9896_valid_regs,
+ .n_yes_ranges = ARRAY_SIZE(ksz9896_valid_regs),
+};
+
const struct ksz_chip_data ksz_switch_chips[] = {
+ [KSZ8563] = {
+ .chip_id = KSZ8563_CHIP_ID,
+ .dev_name = "KSZ8563",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x07, /* can be configured as cpu port */
+ .port_cnt = 3, /* total port count */
+ .ops = &ksz9477_dev_ops,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz8795_xmii_ctrl1, /* Same as ksz8795 */
+ .supports_mii = {false, false, true},
+ .supports_rmii = {false, false, true},
+ .supports_rgmii = {false, false, true},
+ .internal_phy = {true, true, false},
+ .gbit_capable = {false, false, true},
+ .wr_table = &ksz8563_register_set,
+ .rd_table = &ksz8563_register_set,
+ },
+
[KSZ8795] = {
.chip_id = KSZ8795_CHIP_ID,
.dev_name = "KSZ8795",
@@ -536,6 +1171,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
+ .port_nirqs = 4,
.ops = &ksz9477_dev_ops,
.phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
@@ -554,6 +1190,41 @@ const struct ksz_chip_data ksz_switch_chips[] = {
false, true, false},
.internal_phy = {true, true, true, true,
true, false, false},
+ .gbit_capable = {true, true, true, true, true, true, true},
+ .wr_table = &ksz9477_register_set,
+ .rd_table = &ksz9477_register_set,
+ },
+
+ [KSZ9896] = {
+ .chip_id = KSZ9896_CHIP_ID,
+ .dev_name = "KSZ9896",
+ .num_vlans = 4096,
+ .num_alus = 4096,
+ .num_statics = 16,
+ .cpu_ports = 0x3F, /* can be configured as cpu port */
+ .port_cnt = 6, /* total physical port count */
+ .port_nirqs = 2,
+ .ops = &ksz9477_dev_ops,
+ .phy_errata_9477 = true,
+ .mib_names = ksz9477_mib_names,
+ .mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
+ .reg_mib_cnt = MIB_COUNTER_NUM,
+ .regs = ksz9477_regs,
+ .masks = ksz9477_masks,
+ .shifts = ksz9477_shifts,
+ .xmii_ctrl0 = ksz9477_xmii_ctrl0,
+ .xmii_ctrl1 = ksz9477_xmii_ctrl1,
+ .supports_mii = {false, false, false, false,
+ false, true},
+ .supports_rmii = {false, false, false, false,
+ false, true},
+ .supports_rgmii = {false, false, false, false,
+ false, true},
+ .internal_phy = {true, true, true, true,
+ true, false},
+ .gbit_capable = {true, true, true, true, true, true},
+ .wr_table = &ksz9896_register_set,
+ .rd_table = &ksz9896_register_set,
},
[KSZ9897] = {
@@ -564,6 +1235,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
+ .port_nirqs = 2,
.ops = &ksz9477_dev_ops,
.phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
@@ -582,6 +1254,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
false, true, true},
.internal_phy = {true, true, true, true,
true, false, false},
+ .gbit_capable = {true, true, true, true, true, true, true},
},
[KSZ9893] = {
@@ -592,6 +1265,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x07, /* can be configured as cpu port */
.port_cnt = 3, /* total port count */
+ .port_nirqs = 2,
.ops = &ksz9477_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -605,6 +1279,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.supports_rmii = {false, false, true},
.supports_rgmii = {false, false, true},
.internal_phy = {true, true, false},
+ .gbit_capable = {true, true, true},
},
[KSZ9567] = {
@@ -615,6 +1290,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 16,
.cpu_ports = 0x7F, /* can be configured as cpu port */
.port_cnt = 7, /* total physical port count */
+ .port_nirqs = 3,
.ops = &ksz9477_dev_ops,
.phy_errata_9477 = true,
.mib_names = ksz9477_mib_names,
@@ -633,6 +1309,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
false, true, true},
.internal_phy = {true, true, true, true,
true, false, false},
+ .gbit_capable = {true, true, true, true, true, true, true},
},
[LAN9370] = {
@@ -643,6 +1320,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x10, /* can be configured as cpu port */
.port_cnt = 5, /* total physical port count */
+ .port_nirqs = 6,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -666,6 +1344,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x30, /* can be configured as cpu port */
.port_cnt = 6, /* total physical port count */
+ .port_nirqs = 6,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -689,6 +1368,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x30, /* can be configured as cpu port */
.port_cnt = 8, /* total physical port count */
+ .port_nirqs = 6,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -716,6 +1396,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x38, /* can be configured as cpu port */
.port_cnt = 5, /* total physical port count */
+ .port_nirqs = 6,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -743,6 +1424,7 @@ const struct ksz_chip_data ksz_switch_chips[] = {
.num_statics = 256,
.cpu_ports = 0x30, /* can be configured as cpu port */
.port_cnt = 8, /* total physical port count */
+ .port_nirqs = 6,
.ops = &lan937x_dev_ops,
.mib_names = ksz9477_mib_names,
.mib_cnt = ARRAY_SIZE(ksz9477_mib_names),
@@ -974,9 +1656,280 @@ static void ksz_update_port_member(struct ksz_device *dev, int port)
dev->dev_ops->cfg_port_member(dev, port, port_member | cpu_port);
}
+static int ksz_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
+{
+ struct ksz_device *dev = bus->priv;
+ u16 val;
+ int ret;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ ret = dev->dev_ops->r_phy(dev, addr, regnum, &val);
+ if (ret < 0)
+ return ret;
+
+ return val;
+}
+
+static int ksz_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
+ u16 val)
+{
+ struct ksz_device *dev = bus->priv;
+
+ if (regnum & MII_ADDR_C45)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->w_phy(dev, addr, regnum, val);
+}
+
+static int ksz_irq_phy_setup(struct ksz_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ int phy;
+ int irq;
+ int ret;
+
+ for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++) {
+ if (BIT(phy) & ds->phys_mii_mask) {
+ irq = irq_find_mapping(dev->ports[phy].pirq.domain,
+ PORT_SRC_PHY_INT);
+ if (irq < 0) {
+ ret = irq;
+ goto out;
+ }
+ ds->slave_mii_bus->irq[phy] = irq;
+ }
+ }
+ return 0;
+out:
+ while (phy--)
+ if (BIT(phy) & ds->phys_mii_mask)
+ irq_dispose_mapping(ds->slave_mii_bus->irq[phy]);
+
+ return ret;
+}
+
+static void ksz_irq_phy_free(struct ksz_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ int phy;
+
+ for (phy = 0; phy < KSZ_MAX_NUM_PORTS; phy++)
+ if (BIT(phy) & ds->phys_mii_mask)
+ irq_dispose_mapping(ds->slave_mii_bus->irq[phy]);
+}
+
+static int ksz_mdio_register(struct ksz_device *dev)
+{
+ struct dsa_switch *ds = dev->ds;
+ struct device_node *mdio_np;
+ struct mii_bus *bus;
+ int ret;
+
+ mdio_np = of_get_child_by_name(dev->dev->of_node, "mdio");
+ if (!mdio_np)
+ return 0;
+
+ bus = devm_mdiobus_alloc(ds->dev);
+ if (!bus) {
+ of_node_put(mdio_np);
+ return -ENOMEM;
+ }
+
+ bus->priv = dev;
+ bus->read = ksz_sw_mdio_read;
+ bus->write = ksz_sw_mdio_write;
+ bus->name = "ksz slave smi";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index);
+ bus->parent = ds->dev;
+ bus->phy_mask = ~ds->phys_mii_mask;
+
+ ds->slave_mii_bus = bus;
+
+ if (dev->irq > 0) {
+ ret = ksz_irq_phy_setup(dev);
+ if (ret) {
+ of_node_put(mdio_np);
+ return ret;
+ }
+ }
+
+ ret = devm_of_mdiobus_register(ds->dev, bus, mdio_np);
+ if (ret) {
+ dev_err(ds->dev, "unable to register MDIO bus %s\n",
+ bus->id);
+ if (dev->irq > 0)
+ ksz_irq_phy_free(dev);
+ }
+
+ of_node_put(mdio_np);
+
+ return ret;
+}
+
+static void ksz_irq_mask(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ kirq->masked |= BIT(d->hwirq);
+}
+
+static void ksz_irq_unmask(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ kirq->masked &= ~BIT(d->hwirq);
+}
+
+static void ksz_irq_bus_lock(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+
+ mutex_lock(&kirq->dev->lock_irq);
+}
+
+static void ksz_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct ksz_irq *kirq = irq_data_get_irq_chip_data(d);
+ struct ksz_device *dev = kirq->dev;
+ int ret;
+
+ ret = ksz_write32(dev, kirq->reg_mask, kirq->masked);
+ if (ret)
+ dev_err(dev->dev, "failed to change IRQ mask\n");
+
+ mutex_unlock(&dev->lock_irq);
+}
+
+static const struct irq_chip ksz_irq_chip = {
+ .name = "ksz-irq",
+ .irq_mask = ksz_irq_mask,
+ .irq_unmask = ksz_irq_unmask,
+ .irq_bus_lock = ksz_irq_bus_lock,
+ .irq_bus_sync_unlock = ksz_irq_bus_sync_unlock,
+};
+
+static int ksz_irq_domain_map(struct irq_domain *d,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip_and_handler(irq, &ksz_irq_chip, handle_level_irq);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops ksz_irq_domain_ops = {
+ .map = ksz_irq_domain_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
+static void ksz_irq_free(struct ksz_irq *kirq)
+{
+ int irq, virq;
+
+ free_irq(kirq->irq_num, kirq);
+
+ for (irq = 0; irq < kirq->nirqs; irq++) {
+ virq = irq_find_mapping(kirq->domain, irq);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(kirq->domain);
+}
+
+static irqreturn_t ksz_irq_thread_fn(int irq, void *dev_id)
+{
+ struct ksz_irq *kirq = dev_id;
+ unsigned int nhandled = 0;
+ struct ksz_device *dev;
+ unsigned int sub_irq;
+ u8 data;
+ int ret;
+ u8 n;
+
+ dev = kirq->dev;
+
+ /* Read interrupt status register */
+ ret = ksz_read8(dev, kirq->reg_status, &data);
+ if (ret)
+ goto out;
+
+ for (n = 0; n < kirq->nirqs; ++n) {
+ if (data & BIT(n)) {
+ sub_irq = irq_find_mapping(kirq->domain, n);
+ handle_nested_irq(sub_irq);
+ ++nhandled;
+ }
+ }
+out:
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
+
+static int ksz_irq_common_setup(struct ksz_device *dev, struct ksz_irq *kirq)
+{
+ int ret, n;
+
+ kirq->dev = dev;
+ kirq->masked = ~0;
+
+ kirq->domain = irq_domain_add_simple(dev->dev->of_node, kirq->nirqs, 0,
+ &ksz_irq_domain_ops, kirq);
+ if (!kirq->domain)
+ return -ENOMEM;
+
+ for (n = 0; n < kirq->nirqs; n++)
+ irq_create_mapping(kirq->domain, n);
+
+ ret = request_threaded_irq(kirq->irq_num, NULL, ksz_irq_thread_fn,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
+ kirq->name, kirq);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ ksz_irq_free(kirq);
+
+ return ret;
+}
+
+static int ksz_girq_setup(struct ksz_device *dev)
+{
+ struct ksz_irq *girq = &dev->girq;
+
+ girq->nirqs = dev->info->port_cnt;
+ girq->reg_mask = REG_SW_PORT_INT_MASK__1;
+ girq->reg_status = REG_SW_PORT_INT_STATUS__1;
+ snprintf(girq->name, sizeof(girq->name), "global_port_irq");
+
+ girq->irq_num = dev->irq;
+
+ return ksz_irq_common_setup(dev, girq);
+}
+
+static int ksz_pirq_setup(struct ksz_device *dev, u8 p)
+{
+ struct ksz_irq *pirq = &dev->ports[p].pirq;
+
+ pirq->nirqs = dev->info->port_nirqs;
+ pirq->reg_mask = dev->dev_ops->get_port_addr(p, REG_PORT_INT_MASK);
+ pirq->reg_status = dev->dev_ops->get_port_addr(p, REG_PORT_INT_STATUS);
+ snprintf(pirq->name, sizeof(pirq->name), "port_irq-%d", p);
+
+ pirq->irq_num = irq_find_mapping(dev->girq.domain, p);
+ if (pirq->irq_num < 0)
+ return pirq->irq_num;
+
+ return ksz_irq_common_setup(dev, pirq);
+}
+
static int ksz_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
+ struct dsa_port *dp;
struct ksz_port *p;
const u16 *regs;
int ret;
@@ -1025,11 +1978,55 @@ static int ksz_setup(struct dsa_switch *ds)
p = &dev->ports[dev->cpu_port];
p->learning = true;
+ if (dev->irq > 0) {
+ ret = ksz_girq_setup(dev);
+ if (ret)
+ return ret;
+
+ dsa_switch_for_each_user_port(dp, dev->ds) {
+ ret = ksz_pirq_setup(dev, dp->index);
+ if (ret)
+ goto out_girq;
+ }
+ }
+
+ ret = ksz_mdio_register(dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to register the mdio");
+ goto out_pirq;
+ }
+
/* start switch */
regmap_update_bits(dev->regmap[0], regs[S_START_CTRL],
SW_START, SW_START);
return 0;
+
+out_pirq:
+ if (dev->irq > 0)
+ dsa_switch_for_each_user_port(dp, dev->ds)
+ ksz_irq_free(&dev->ports[dp->index].pirq);
+out_girq:
+ if (dev->irq > 0)
+ ksz_irq_free(&dev->girq);
+
+ return ret;
+}
+
+static void ksz_teardown(struct dsa_switch *ds)
+{
+ struct ksz_device *dev = ds->priv;
+ struct dsa_port *dp;
+
+ if (dev->irq > 0) {
+ dsa_switch_for_each_user_port(dp, dev->ds)
+ ksz_irq_free(&dev->ports[dp->index].pirq);
+
+ ksz_irq_free(&dev->girq);
+ }
+
+ if (dev->dev_ops->teardown)
+ dev->dev_ops->teardown(ds);
}
static void port_r_cnt(struct ksz_device *dev, int port)
@@ -1113,8 +2110,11 @@ static int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
{
struct ksz_device *dev = ds->priv;
u16 val = 0xffff;
+ int ret;
- dev->dev_ops->r_phy(dev, addr, reg, &val);
+ ret = dev->dev_ops->r_phy(dev, addr, reg, &val);
+ if (ret)
+ return ret;
return val;
}
@@ -1122,8 +2122,11 @@ static int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg)
static int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val)
{
struct ksz_device *dev = ds->priv;
+ int ret;
- dev->dev_ops->w_phy(dev, addr, reg, val);
+ ret = dev->dev_ops->w_phy(dev, addr, reg, val);
+ if (ret)
+ return ret;
return 0;
}
@@ -1212,6 +2215,16 @@ static void ksz_port_fast_age(struct dsa_switch *ds, int port)
dev->dev_ops->flush_dyn_mac_table(dev, port);
}
+static int ksz_set_ageing_time(struct dsa_switch *ds, unsigned int msecs)
+{
+ struct ksz_device *dev = ds->priv;
+
+ if (!dev->dev_ops->set_ageing_time)
+ return -EOPNOTSUPP;
+
+ return dev->dev_ops->set_ageing_time(dev, msecs);
+}
+
static int ksz_port_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid,
struct dsa_db db)
@@ -1375,10 +2388,12 @@ static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds,
proto = DSA_TAG_PROTO_KSZ8795;
if (dev->chip_id == KSZ8830_CHIP_ID ||
+ dev->chip_id == KSZ8563_CHIP_ID ||
dev->chip_id == KSZ9893_CHIP_ID)
proto = DSA_TAG_PROTO_KSZ9893;
if (dev->chip_id == KSZ9477_CHIP_ID ||
+ dev->chip_id == KSZ9896_CHIP_ID ||
dev->chip_id == KSZ9897_CHIP_ID ||
dev->chip_id == KSZ9567_CHIP_ID)
proto = DSA_TAG_PROTO_KSZ9477;
@@ -1493,7 +2508,8 @@ static void ksz_set_xmii(struct ksz_device *dev, int port,
case PHY_INTERFACE_MODE_RGMII_RXID:
data8 |= bitval[P_RGMII_SEL];
/* On KSZ9893, disable RGMII in-band status support */
- if (dev->features & IS_9893)
+ if (dev->chip_id == KSZ9893_CHIP_ID ||
+ dev->chip_id == KSZ8563_CHIP_ID)
data8 &= ~P_MII_MAC_MODE;
break;
default:
@@ -1703,7 +2719,7 @@ static void ksz_phylink_mac_link_up(struct dsa_switch *ds, int port,
static int ksz_switch_detect(struct ksz_device *dev)
{
- u8 id1, id2;
+ u8 id1, id2, id4;
u16 id16;
u32 id32;
int ret;
@@ -1748,8 +2764,8 @@ static int ksz_switch_detect(struct ksz_device *dev)
switch (id32) {
case KSZ9477_CHIP_ID:
+ case KSZ9896_CHIP_ID:
case KSZ9897_CHIP_ID:
- case KSZ9893_CHIP_ID:
case KSZ9567_CHIP_ID:
case LAN9370_CHIP_ID:
case LAN9371_CHIP_ID:
@@ -1757,6 +2773,18 @@ static int ksz_switch_detect(struct ksz_device *dev)
case LAN9373_CHIP_ID:
case LAN9374_CHIP_ID:
dev->chip_id = id32;
+ break;
+ case KSZ9893_CHIP_ID:
+ ret = ksz_read8(dev, REG_CHIP_ID4,
+ &id4);
+ if (ret)
+ return ret;
+
+ if (id4 == SKU_ID_KSZ8563)
+ dev->chip_id = KSZ8563_CHIP_ID;
+ else
+ dev->chip_id = KSZ9893_CHIP_ID;
+
break;
default:
dev_err(dev->dev,
@@ -1771,6 +2799,7 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.get_tag_protocol = ksz_get_tag_protocol,
.get_phy_flags = ksz_get_phy_flags,
.setup = ksz_setup,
+ .teardown = ksz_teardown,
.phy_read = ksz_phy_read16,
.phy_write = ksz_phy_write16,
.phylink_get_caps = ksz_phylink_get_caps,
@@ -1778,6 +2807,7 @@ static const struct dsa_switch_ops ksz_switch_ops = {
.phylink_mac_link_up = ksz_phylink_mac_link_up,
.phylink_mac_link_down = ksz_mac_link_down,
.port_enable = ksz_enable_port,
+ .set_ageing_time = ksz_set_ageing_time,
.get_strings = ksz_get_strings,
.get_ethtool_stats = ksz_get_ethtool_stats,
.get_sset_count = ksz_sset_count,
@@ -1935,6 +2965,9 @@ int ksz_switch_register(struct ksz_device *dev)
GFP_KERNEL);
if (!dev->ports[i].mib.counters)
return -ENOMEM;
+
+ dev->ports[i].ksz_dev = dev;
+ dev->ports[i].num = i;
}
/* set the real number of ports */
diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h
index 0d9520dc6d2d..9cfa179575ce 100644
--- a/drivers/net/dsa/microchip/ksz_common.h
+++ b/drivers/net/dsa/microchip/ksz_common.h
@@ -13,9 +13,12 @@
#include
#include
#include
+#include
#define KSZ_MAX_NUM_PORTS 8
+struct ksz_device;
+
struct vlan_table {
u32 table[3];
};
@@ -42,6 +45,7 @@ struct ksz_chip_data {
int num_statics;
int cpu_ports;
int port_cnt;
+ u8 port_nirqs;
const struct ksz_dev_ops *ops;
bool phy_errata_9477;
bool ksz87xx_eee_link_erratum;
@@ -61,6 +65,20 @@ struct ksz_chip_data {
bool supports_rmii[KSZ_MAX_NUM_PORTS];
bool supports_rgmii[KSZ_MAX_NUM_PORTS];
bool internal_phy[KSZ_MAX_NUM_PORTS];
+ bool gbit_capable[KSZ_MAX_NUM_PORTS];
+ const struct regmap_access_table *wr_table;
+ const struct regmap_access_table *rd_table;
+};
+
+struct ksz_irq {
+ u16 masked;
+ u16 reg_mask;
+ u16 reg_status;
+ struct irq_domain *domain;
+ int nirqs;
+ int irq_num;
+ char name[16];
+ struct ksz_device *dev;
};
struct ksz_port {
@@ -70,9 +88,7 @@ struct ksz_port {
struct phy_device phydev;
u32 on:1; /* port is not disabled by hardware */
- u32 phy:1; /* port has a PHY */
u32 fiber:1; /* port is fiber */
- u32 sgmii:1; /* port is SGMII */
u32 force:1;
u32 read:1; /* read MIB counters in background */
u32 freeze:1; /* MIB counter freeze is enabled */
@@ -82,6 +98,9 @@ struct ksz_port {
u16 max_frame;
u32 rgmii_tx_val;
u32 rgmii_rx_val;
+ struct ksz_device *ksz_dev;
+ struct ksz_irq pirq;
+ u8 num;
};
struct ksz_device {
@@ -99,6 +118,7 @@ struct ksz_device {
struct regmap *regmap[3];
void *priv;
+ int irq;
struct gpio_desc *reset_gpio; /* Optional reset GPIO */
@@ -118,17 +138,20 @@ struct ksz_device {
unsigned long mib_read_interval;
u16 mirror_rx;
u16 mirror_tx;
- u32 features; /* chip specific features */
u16 port_mask;
+ struct mutex lock_irq; /* IRQ Access */
+ struct ksz_irq girq;
};
/* List of supported models */
enum ksz_model {
+ KSZ8563,
KSZ8795,
KSZ8794,
KSZ8765,
KSZ8830,
KSZ9477,
+ KSZ9896,
KSZ9897,
KSZ9893,
KSZ9567,
@@ -140,11 +163,13 @@ enum ksz_model {
};
enum ksz_chip_id {
+ KSZ8563_CHIP_ID = 0x8563,
KSZ8795_CHIP_ID = 0x8795,
KSZ8794_CHIP_ID = 0x8794,
KSZ8765_CHIP_ID = 0x8765,
KSZ8830_CHIP_ID = 0x8830,
KSZ9477_CHIP_ID = 0x00947700,
+ KSZ9896_CHIP_ID = 0x00989600,
KSZ9897_CHIP_ID = 0x00989700,
KSZ9893_CHIP_ID = 0x00989300,
KSZ9567_CHIP_ID = 0x00956700,
@@ -254,13 +279,15 @@ struct alu_struct {
struct ksz_dev_ops {
int (*setup)(struct dsa_switch *ds);
+ void (*teardown)(struct dsa_switch *ds);
u32 (*get_port_addr)(int port, int offset);
void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member);
void (*flush_dyn_mac_table)(struct ksz_device *dev, int port);
void (*port_cleanup)(struct ksz_device *dev, int port);
void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port);
- void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
- void (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
+ int (*set_ageing_time)(struct ksz_device *dev, unsigned int msecs);
+ int (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val);
+ int (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val);
void (*r_mib_cnt)(struct ksz_device *dev, int port, u16 addr,
u64 *cnt);
void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr,
@@ -330,6 +357,10 @@ static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val)
unsigned int value;
int ret = regmap_read(dev->regmap[0], reg, &value);
+ if (ret)
+ dev_err(dev->dev, "can't read 8bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
*val = value;
return ret;
}
@@ -339,6 +370,10 @@ static inline int ksz_read16(struct ksz_device *dev, u32 reg, u16 *val)
unsigned int value;
int ret = regmap_read(dev->regmap[1], reg, &value);
+ if (ret)
+ dev_err(dev->dev, "can't read 16bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
*val = value;
return ret;
}
@@ -348,6 +383,10 @@ static inline int ksz_read32(struct ksz_device *dev, u32 reg, u32 *val)
unsigned int value;
int ret = regmap_read(dev->regmap[2], reg, &value);
+ if (ret)
+ dev_err(dev->dev, "can't read 32bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
*val = value;
return ret;
}
@@ -358,7 +397,10 @@ static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val)
int ret;
ret = regmap_bulk_read(dev->regmap[2], reg, value, 2);
- if (!ret)
+ if (ret)
+ dev_err(dev->dev, "can't read 64bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+ else
*val = (u64)value[0] << 32 | value[1];
return ret;
@@ -366,17 +408,38 @@ static inline int ksz_read64(struct ksz_device *dev, u32 reg, u64 *val)
static inline int ksz_write8(struct ksz_device *dev, u32 reg, u8 value)
{
- return regmap_write(dev->regmap[0], reg, value);
+ int ret;
+
+ ret = regmap_write(dev->regmap[0], reg, value);
+ if (ret)
+ dev_err(dev->dev, "can't write 8bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
}
static inline int ksz_write16(struct ksz_device *dev, u32 reg, u16 value)
{
- return regmap_write(dev->regmap[1], reg, value);
+ int ret;
+
+ ret = regmap_write(dev->regmap[1], reg, value);
+ if (ret)
+ dev_err(dev->dev, "can't write 16bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
}
static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value)
{
- return regmap_write(dev->regmap[2], reg, value);
+ int ret;
+
+ ret = regmap_write(dev->regmap[2], reg, value);
+ if (ret)
+ dev_err(dev->dev, "can't write 32bit reg: 0x%x %pe\n", reg,
+ ERR_PTR(ret));
+
+ return ret;
}
static inline int ksz_write64(struct ksz_device *dev, u32 reg, u64 value)
@@ -391,40 +454,42 @@ static inline int ksz_write64(struct ksz_device *dev, u32 reg, u64 value)
return regmap_bulk_write(dev->regmap[2], reg, val, 2);
}
-static inline void ksz_pread8(struct ksz_device *dev, int port, int offset,
- u8 *data)
+static inline int ksz_pread8(struct ksz_device *dev, int port, int offset,
+ u8 *data)
{
- ksz_read8(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_read8(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
-static inline void ksz_pread16(struct ksz_device *dev, int port, int offset,
- u16 *data)
+static inline int ksz_pread16(struct ksz_device *dev, int port, int offset,
+ u16 *data)
{
- ksz_read16(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_read16(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
-static inline void ksz_pread32(struct ksz_device *dev, int port, int offset,
- u32 *data)
+static inline int ksz_pread32(struct ksz_device *dev, int port, int offset,
+ u32 *data)
{
- ksz_read32(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_read32(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
-static inline void ksz_pwrite8(struct ksz_device *dev, int port, int offset,
- u8 data)
+static inline int ksz_pwrite8(struct ksz_device *dev, int port, int offset,
+ u8 data)
{
- ksz_write8(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_write8(dev, dev->dev_ops->get_port_addr(port, offset), data);
}
-static inline void ksz_pwrite16(struct ksz_device *dev, int port, int offset,
- u16 data)
+static inline int ksz_pwrite16(struct ksz_device *dev, int port, int offset,
+ u16 data)
{
- ksz_write16(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_write16(dev, dev->dev_ops->get_port_addr(port, offset),
+ data);
}
-static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset,
- u32 data)
+static inline int ksz_pwrite32(struct ksz_device *dev, int port, int offset,
+ u32 data)
{
- ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset), data);
+ return ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset),
+ data);
}
static inline void ksz_prmw8(struct ksz_device *dev, int port, int offset,
@@ -483,6 +548,10 @@ static inline int is_lan937x(struct ksz_device *dev)
#define SW_REV_ID_M GENMASK(7, 4)
+/* KSZ9893, KSZ9563, KSZ8563 specific register */
+#define REG_CHIP_ID4 0x0f
+#define SKU_ID_KSZ8563 0x3c
+
/* Driver set switch broadcast storm protection at 10% rate. */
#define BROADCAST_STORM_PROT_RATE 10
@@ -497,10 +566,6 @@ static inline int is_lan937x(struct ksz_device *dev)
#define SW_START 0x01
-/* Used with variable features to indicate capabilities. */
-#define GBIT_SUPPORT BIT(0)
-#define IS_9893 BIT(2)
-
/* xMII configuration */
#define P_MII_DUPLEX_M BIT(6)
#define P_MII_100MBIT_M BIT(4)
@@ -511,6 +576,15 @@ static inline int is_lan937x(struct ksz_device *dev)
#define P_MII_MAC_MODE BIT(2)
#define P_MII_SEL_M 0x3
+/* Interrupt */
+#define REG_SW_PORT_INT_STATUS__1 0x001B
+#define REG_SW_PORT_INT_MASK__1 0x001F
+
+#define REG_PORT_INT_STATUS 0x001B
+#define REG_PORT_INT_MASK 0x001F
+
+#define PORT_SRC_PHY_INT 1
+
/* Regmap tables generation */
#define KSZ_SPI_OP_RD 3
#define KSZ_SPI_OP_WR 2
diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c
index 05bd089795f8..1b6ab891b986 100644
--- a/drivers/net/dsa/microchip/ksz_spi.c
+++ b/drivers/net/dsa/microchip/ksz_spi.c
@@ -66,7 +66,10 @@ static int ksz_spi_probe(struct spi_device *spi)
for (i = 0; i < ARRAY_SIZE(ksz8795_regmap_config); i++) {
rc = regmap_config[i];
rc.lock_arg = &dev->regmap_mutex;
+ rc.wr_table = chip->wr_table;
+ rc.rd_table = chip->rd_table;
dev->regmap[i] = devm_regmap_init_spi(spi, &rc);
+
if (IS_ERR(dev->regmap[i])) {
ret = PTR_ERR(dev->regmap[i]);
dev_err(&spi->dev,
@@ -85,6 +88,8 @@ static int ksz_spi_probe(struct spi_device *spi)
if (ret)
return ret;
+ dev->irq = spi->irq;
+
ret = ksz_switch_register(dev);
/* Main DSA driver may not be started yet. */
@@ -102,8 +107,6 @@ static void ksz_spi_remove(struct spi_device *spi)
if (dev)
ksz_switch_remove(dev);
-
- spi_set_drvdata(spi, NULL);
}
static void ksz_spi_shutdown(struct spi_device *spi)
@@ -146,6 +149,10 @@ static const struct of_device_id ksz_dt_ids[] = {
.compatible = "microchip,ksz9477",
.data = &ksz_switch_chips[KSZ9477]
},
+ {
+ .compatible = "microchip,ksz9896",
+ .data = &ksz_switch_chips[KSZ9896]
+ },
{
.compatible = "microchip,ksz9897",
.data = &ksz_switch_chips[KSZ9897]
@@ -160,7 +167,7 @@ static const struct of_device_id ksz_dt_ids[] = {
},
{
.compatible = "microchip,ksz8563",
- .data = &ksz_switch_chips[KSZ9893]
+ .data = &ksz_switch_chips[KSZ8563]
},
{
.compatible = "microchip,ksz9567",
@@ -197,6 +204,7 @@ static const struct spi_device_id ksz_spi_ids[] = {
{ "ksz8863" },
{ "ksz8873" },
{ "ksz9477" },
+ { "ksz9896" },
{ "ksz9897" },
{ "ksz9893" },
{ "ksz9563" },
@@ -226,6 +234,7 @@ static struct spi_driver ksz_spi_driver = {
module_spi_driver(ksz_spi_driver);
MODULE_ALIAS("spi:ksz9477");
+MODULE_ALIAS("spi:ksz9896");
MODULE_ALIAS("spi:ksz9897");
MODULE_ALIAS("spi:ksz9893");
MODULE_ALIAS("spi:ksz9563");
diff --git a/drivers/net/dsa/microchip/lan937x.h b/drivers/net/dsa/microchip/lan937x.h
index 4e0b1dccec27..8e9e66d6728d 100644
--- a/drivers/net/dsa/microchip/lan937x.h
+++ b/drivers/net/dsa/microchip/lan937x.h
@@ -8,14 +8,16 @@
int lan937x_reset_switch(struct ksz_device *dev);
int lan937x_setup(struct dsa_switch *ds);
+void lan937x_teardown(struct dsa_switch *ds);
void lan937x_port_setup(struct ksz_device *dev, int port, bool cpu_port);
void lan937x_config_cpu_port(struct dsa_switch *ds);
int lan937x_switch_init(struct ksz_device *dev);
void lan937x_switch_exit(struct ksz_device *dev);
-void lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
-void lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
+int lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data);
+int lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val);
int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu);
void lan937x_phylink_get_caps(struct ksz_device *dev, int port,
struct phylink_config *config);
void lan937x_setup_rgmii_delay(struct ksz_device *dev, int port);
+int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs);
#endif
diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c
index 5579644e8fde..7e4f307a0387 100644
--- a/drivers/net/dsa/microchip/lan937x_main.c
+++ b/drivers/net/dsa/microchip/lan937x_main.c
@@ -7,7 +7,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -128,81 +127,14 @@ static int lan937x_internal_phy_read(struct ksz_device *dev, int addr, int reg,
return ksz_read16(dev, REG_VPHY_IND_DATA__2, val);
}
-void lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
+int lan937x_r_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 *data)
{
- lan937x_internal_phy_read(dev, addr, reg, data);
+ return lan937x_internal_phy_read(dev, addr, reg, data);
}
-void lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
+int lan937x_w_phy(struct ksz_device *dev, u16 addr, u16 reg, u16 val)
{
- lan937x_internal_phy_write(dev, addr, reg, val);
-}
-
-static int lan937x_sw_mdio_read(struct mii_bus *bus, int addr, int regnum)
-{
- struct ksz_device *dev = bus->priv;
- u16 val;
- int ret;
-
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
- ret = lan937x_internal_phy_read(dev, addr, regnum, &val);
- if (ret < 0)
- return ret;
-
- return val;
-}
-
-static int lan937x_sw_mdio_write(struct mii_bus *bus, int addr, int regnum,
- u16 val)
-{
- struct ksz_device *dev = bus->priv;
-
- if (regnum & MII_ADDR_C45)
- return -EOPNOTSUPP;
-
- return lan937x_internal_phy_write(dev, addr, regnum, val);
-}
-
-static int lan937x_mdio_register(struct ksz_device *dev)
-{
- struct dsa_switch *ds = dev->ds;
- struct device_node *mdio_np;
- struct mii_bus *bus;
- int ret;
-
- mdio_np = of_get_child_by_name(dev->dev->of_node, "mdio");
- if (!mdio_np) {
- dev_err(ds->dev, "no MDIO bus node\n");
- return -ENODEV;
- }
-
- bus = devm_mdiobus_alloc(ds->dev);
- if (!bus) {
- of_node_put(mdio_np);
- return -ENOMEM;
- }
-
- bus->priv = dev;
- bus->read = lan937x_sw_mdio_read;
- bus->write = lan937x_sw_mdio_write;
- bus->name = "lan937x slave smi";
- snprintf(bus->id, MII_BUS_ID_SIZE, "SMI-%d", ds->index);
- bus->parent = ds->dev;
- bus->phy_mask = ~ds->phys_mii_mask;
-
- ds->slave_mii_bus = bus;
-
- ret = devm_of_mdiobus_register(ds->dev, bus, mdio_np);
- if (ret) {
- dev_err(ds->dev, "unable to register MDIO bus %s\n",
- bus->id);
- }
-
- of_node_put(mdio_np);
-
- return ret;
+ return lan937x_internal_phy_write(dev, addr, reg, val);
}
int lan937x_reset_switch(struct ksz_device *dev)
@@ -225,6 +157,10 @@ int lan937x_reset_switch(struct ksz_device *dev)
if (ret < 0)
return ret;
+ ret = ksz_write32(dev, REG_SW_INT_STATUS__4, POR_READY_INT);
+ if (ret < 0)
+ return ret;
+
ret = ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0xFF);
if (ret < 0)
return ret;
@@ -311,6 +247,23 @@ int lan937x_change_mtu(struct ksz_device *dev, int port, int new_mtu)
return 0;
}
+int lan937x_set_ageing_time(struct ksz_device *dev, unsigned int msecs)
+{
+ u32 secs = msecs / 1000;
+ u32 value;
+ int ret;
+
+ value = FIELD_GET(SW_AGE_PERIOD_7_0_M, secs);
+
+ ret = ksz_write8(dev, REG_SW_AGE_PERIOD__1, value);
+ if (ret < 0)
+ return ret;
+
+ value = FIELD_GET(SW_AGE_PERIOD_19_8_M, secs);
+
+ return ksz_write16(dev, REG_SW_AGE_PERIOD__2, value);
+}
+
static void lan937x_set_tune_adj(struct ksz_device *dev, int port,
u16 reg, u8 val)
{
@@ -379,6 +332,13 @@ void lan937x_setup_rgmii_delay(struct ksz_device *dev, int port)
}
}
+int lan937x_switch_init(struct ksz_device *dev)
+{
+ dev->port_mask = (1 << dev->info->port_cnt) - 1;
+
+ return 0;
+}
+
int lan937x_setup(struct dsa_switch *ds)
{
struct ksz_device *dev = ds->priv;
@@ -391,12 +351,6 @@ int lan937x_setup(struct dsa_switch *ds)
return ret;
}
- ret = lan937x_mdio_register(dev);
- if (ret < 0) {
- dev_err(dev->dev, "failed to register the mdio");
- return ret;
- }
-
/* The VLAN aware is a global setting. Mixed vlan
* filterings are not supported.
*/
@@ -422,11 +376,9 @@ int lan937x_setup(struct dsa_switch *ds)
return 0;
}
-int lan937x_switch_init(struct ksz_device *dev)
+void lan937x_teardown(struct dsa_switch *ds)
{
- dev->port_mask = (1 << dev->info->port_cnt) - 1;
- return 0;
}
void lan937x_switch_exit(struct ksz_device *dev)
diff --git a/drivers/net/dsa/microchip/lan937x_reg.h b/drivers/net/dsa/microchip/lan937x_reg.h
index ba4adaddb3ec..5bc16a4c4441 100644
--- a/drivers/net/dsa/microchip/lan937x_reg.h
+++ b/drivers/net/dsa/microchip/lan937x_reg.h
@@ -62,6 +62,12 @@
#define SW_FAST_AGING BIT(1)
#define SW_LINK_AUTO_AGING BIT(0)
+#define REG_SW_AGE_PERIOD__1 0x0313
+#define SW_AGE_PERIOD_7_0_M GENMASK(7, 0)
+
+#define REG_SW_AGE_PERIOD__2 0x0320
+#define SW_AGE_PERIOD_19_8_M GENMASK(19, 8)
+
#define REG_SW_MAC_CTRL_0 0x0330
#define SW_NEW_BACKOFF BIT(7)
#define SW_PAUSE_UNH_MODE BIT(1)
@@ -118,6 +124,18 @@
/* Port Registers */
/* 0 - Operation */
+#define REG_PORT_INT_STATUS 0x001B
+#define REG_PORT_INT_MASK 0x001F
+
+#define PORT_TAS_INT BIT(5)
+#define PORT_QCI_INT BIT(4)
+#define PORT_SGMII_INT BIT(3)
+#define PORT_PTP_INT BIT(2)
+#define PORT_PHY_INT BIT(1)
+#define PORT_ACL_INT BIT(0)
+
+#define PORT_SRC_PHY_INT 1
+
#define REG_PORT_CTRL_0 0x0020
#define PORT_MAC_LOOPBACK BIT(7)
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 409d5c3d76ea..e74c6b406172 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -2708,9 +2708,6 @@ mt7531_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
case PHY_INTERFACE_MODE_NA:
case PHY_INTERFACE_MODE_1000BASEX:
case PHY_INTERFACE_MODE_2500BASEX:
- if (phylink_autoneg_inband(mode))
- return -EINVAL;
-
return mt7531_sgmii_setup_mode_force(priv, port, interface);
default:
return -EINVAL;
@@ -2786,13 +2783,6 @@ unsupported:
return;
}
- if (phylink_autoneg_inband(mode) &&
- state->interface != PHY_INTERFACE_MODE_SGMII) {
- dev_err(ds->dev, "%s: in-band negotiation unsupported\n",
- __func__);
- return;
- }
-
mcr_cur = mt7530_read(priv, MT7530_PMCR_P(port));
mcr_new = mcr_cur;
mcr_new &= ~PMCR_LINK_SETTINGS_MASK;
@@ -2929,6 +2919,9 @@ static void mt753x_phylink_get_caps(struct dsa_switch *ds, int port,
config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
MAC_10 | MAC_100 | MAC_1000FD;
+ if ((priv->id == ID_MT7531) && mt753x_is_mac_port(port))
+ config->mac_capabilities |= MAC_2500FD;
+
/* This driver does not make use of the speed, duplex, pause or the
* advertisement in its mac_config, so it is safe to mark this driver
* as non-legacy.
@@ -2994,6 +2987,7 @@ mt7531_sgmii_pcs_get_state_an(struct mt7530_priv *priv, int port,
status = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
state->link = !!(status & MT7531_SGMII_LINK_STATUS);
+ state->an_complete = !!(status & MT7531_SGMII_AN_COMPLETE);
if (state->interface == PHY_INTERFACE_MODE_SGMII &&
(status & MT7531_SGMII_AN_ENABLE)) {
val = mt7530_read(priv, MT7531_PCS_SPEED_ABILITY(port));
@@ -3024,16 +3018,44 @@ mt7531_sgmii_pcs_get_state_an(struct mt7530_priv *priv, int port,
return 0;
}
+static void
+mt7531_sgmii_pcs_get_state_inband(struct mt7530_priv *priv, int port,
+ struct phylink_link_state *state)
+{
+ unsigned int val;
+
+ val = mt7530_read(priv, MT7531_PCS_CONTROL_1(port));
+ state->link = !!(val & MT7531_SGMII_LINK_STATUS);
+ if (!state->link)
+ return;
+
+ state->an_complete = state->link;
+
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+ state->speed = SPEED_2500;
+ else
+ state->speed = SPEED_1000;
+
+ state->duplex = DUPLEX_FULL;
+ state->pause = MLO_PAUSE_NONE;
+}
+
static void mt7531_pcs_get_state(struct phylink_pcs *pcs,
struct phylink_link_state *state)
{
struct mt7530_priv *priv = pcs_to_mt753x_pcs(pcs)->priv;
int port = pcs_to_mt753x_pcs(pcs)->port;
- if (state->interface == PHY_INTERFACE_MODE_SGMII)
+ if (state->interface == PHY_INTERFACE_MODE_SGMII) {
mt7531_sgmii_pcs_get_state_an(priv, port, state);
- else
- state->link = false;
+ return;
+ } else if ((state->interface == PHY_INTERFACE_MODE_1000BASEX) ||
+ (state->interface == PHY_INTERFACE_MODE_2500BASEX)) {
+ mt7531_sgmii_pcs_get_state_inband(priv, port, state);
+ return;
+ }
+
+ state->link = false;
}
static int mt753x_pcs_config(struct phylink_pcs *pcs, unsigned int mode,
@@ -3074,6 +3096,8 @@ mt753x_setup(struct dsa_switch *ds)
priv->pcs[i].pcs.ops = priv->info->pcs_ops;
priv->pcs[i].priv = priv;
priv->pcs[i].port = i;
+ if (mt753x_is_mac_port(i))
+ priv->pcs[i].pcs.poll = 1;
}
ret = priv->info->sw_setup(ds);
@@ -3307,8 +3331,6 @@ mt7530_remove(struct mdio_device *mdiodev)
dsa_unregister_switch(priv->ds);
mutex_destroy(&priv->reg_mutex);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void mt7530_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h
index e509af95c354..e8d966435350 100644
--- a/drivers/net/dsa/mt7530.h
+++ b/drivers/net/dsa/mt7530.h
@@ -373,6 +373,7 @@ enum mt7530_vlan_port_acc_frm {
#define MT7531_SGMII_LINK_STATUS BIT(18)
#define MT7531_SGMII_AN_ENABLE BIT(12)
#define MT7531_SGMII_AN_RESTART BIT(9)
+#define MT7531_SGMII_AN_COMPLETE BIT(21)
/* Register for SGMII PCS_SPPED_ABILITY */
#define MT7531_PCS_SPEED_ABILITY(p) MT7531_SGMII_REG(p, 0x08)
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 83dca9179aa0..fdda62d6eb16 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -297,8 +297,6 @@ static void mv88e6060_remove(struct mdio_device *mdiodev)
return;
dsa_unregister_switch(ds);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void mv88e6060_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 07e9a4da924c..2479be3a1e35 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -816,6 +816,14 @@ static void mv88e6393x_phylink_get_caps(struct mv88e6xxx_chip *chip, int port,
MAC_10000FD;
}
}
+
+ if (port == 0) {
+ __set_bit(PHY_INTERFACE_MODE_RMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_ID, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_RXID, supported);
+ __set_bit(PHY_INTERFACE_MODE_RGMII_TXID, supported);
+ }
}
static void mv88e6xxx_get_caps(struct dsa_switch *ds, int port,
@@ -1128,7 +1136,7 @@ static void mv88e6xxx_atu_vtu_get_strings(uint8_t *data)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(mv88e6xxx_atu_vtu_stats_strings); i++)
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
mv88e6xxx_atu_vtu_stats_strings[i],
ETH_GSTRING_LEN);
}
@@ -6585,14 +6593,17 @@ out:
static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
struct dsa_lag lag,
- struct netdev_lag_upper_info *info)
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
struct dsa_port *dp;
int members = 0;
- if (!mv88e6xxx_has_lag(chip))
+ if (!mv88e6xxx_has_lag(chip)) {
+ NL_SET_ERR_MSG_MOD(extack, "Chip does not support LAG offload");
return false;
+ }
if (!lag.id)
return false;
@@ -6601,14 +6612,20 @@ static bool mv88e6xxx_lag_can_offload(struct dsa_switch *ds,
/* Includes the port joining the LAG */
members++;
- if (members > 8)
+ if (members > 8) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload more than 8 LAG ports");
return false;
+ }
/* We could potentially relax this to include active
* backup in the future.
*/
- if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload LAG using hash TX type");
return false;
+ }
/* Ideally we would also validate that the hash type matches
* the hardware. Alas, this is always set to unknown on team
@@ -6761,12 +6778,13 @@ static int mv88e6xxx_port_lag_change(struct dsa_switch *ds, int port)
static int mv88e6xxx_port_lag_join(struct dsa_switch *ds, int port,
struct dsa_lag lag,
- struct netdev_lag_upper_info *info)
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err, id;
- if (!mv88e6xxx_lag_can_offload(ds, lag, info))
+ if (!mv88e6xxx_lag_can_offload(ds, lag, info, extack))
return -EOPNOTSUPP;
/* DSA LAG IDs are one-based */
@@ -6819,12 +6837,13 @@ static int mv88e6xxx_crosschip_lag_change(struct dsa_switch *ds, int sw_index,
static int mv88e6xxx_crosschip_lag_join(struct dsa_switch *ds, int sw_index,
int port, struct dsa_lag lag,
- struct netdev_lag_upper_info *info)
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct mv88e6xxx_chip *chip = ds->priv;
int err;
- if (!mv88e6xxx_lag_can_offload(ds, lag, info))
+ if (!mv88e6xxx_lag_can_offload(ds, lag, info, extack))
return -EOPNOTSUPP;
mv88e6xxx_reg_lock(chip);
@@ -7166,8 +7185,6 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
mv88e6xxx_g1_irq_free(chip);
else
mv88e6xxx_irq_poll_free(chip);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void mv88e6xxx_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 807aeaad9830..7536b8b0ad01 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -298,7 +298,7 @@
#define MV88E6352_G2_SCRATCH_CONFIG_DATA1 0x71
#define MV88E6352_G2_SCRATCH_CONFIG_DATA1_NO_CPU BIT(2)
#define MV88E6352_G2_SCRATCH_CONFIG_DATA2 0x72
-#define MV88E6352_G2_SCRATCH_CONFIG_DATA2_P0_MODE_MASK 0x3
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA2_P0_MODE_MASK 0xf
#define MV88E6352_G2_SCRATCH_CONFIG_DATA3 0x73
#define MV88E6352_G2_SCRATCH_CONFIG_DATA3_S_SEL BIT(1)
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 90c55f23b7c9..5c4195c635b0 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -517,6 +517,12 @@ static int mv88e6xxx_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
case PHY_INTERFACE_MODE_RMII:
cmode = MV88E6XXX_PORT_STS_CMODE_RMII;
break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ cmode = MV88E6XXX_PORT_STS_CMODE_RGMII;
+ break;
case PHY_INTERFACE_MODE_1000BASEX:
cmode = MV88E6XXX_PORT_STS_CMODE_1000BASEX;
break;
@@ -634,6 +640,19 @@ int mv88e6393x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
if (port != 0 && port != 9 && port != 10)
return -EOPNOTSUPP;
+ if (port == 9 || port == 10) {
+ switch (mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return -EINVAL;
+ default:
+ break;
+ }
+ }
+
/* mv88e6393x errata 4.5: EEE should be disabled on SERDES ports */
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_MAC_CTL, ®);
if (err)
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index aadb0bd7c24f..dd3a18cc89dd 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -42,6 +42,25 @@ static struct net_device *felix_classify_db(struct dsa_db db)
}
}
+static int felix_cpu_port_for_master(struct dsa_switch *ds,
+ struct net_device *master)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct dsa_port *cpu_dp;
+ int lag;
+
+ if (netif_is_lag_master(master)) {
+ mutex_lock(&ocelot->fwd_domain_lock);
+ lag = ocelot_bond_get_id(ocelot, master);
+ mutex_unlock(&ocelot->fwd_domain_lock);
+
+ return lag;
+ }
+
+ cpu_dp = master->dsa_ptr;
+ return cpu_dp->index;
+}
+
/* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that
* the tagger can perform RX source port identification.
*/
@@ -422,6 +441,40 @@ static unsigned long felix_tag_npi_get_host_fwd_mask(struct dsa_switch *ds)
return BIT(ocelot->num_phys_ports);
}
+static int felix_tag_npi_change_master(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port), *other_dp;
+ struct ocelot *ocelot = ds->priv;
+
+ if (netif_is_lag_master(master)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "LAG DSA master only supported using ocelot-8021q");
+ return -EOPNOTSUPP;
+ }
+
+ /* Changing the NPI port breaks user ports still assigned to the old
+ * one, so only allow it while they're down, and don't allow them to
+ * come back up until they're all changed to the new one.
+ */
+ dsa_switch_for_each_user_port(other_dp, ds) {
+ struct net_device *slave = other_dp->slave;
+
+ if (other_dp != dp && (slave->flags & IFF_UP) &&
+ dsa_port_to_master(other_dp) != master) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot change while old master still has users");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ felix_npi_port_deinit(ocelot, ocelot->npi);
+ felix_npi_port_init(ocelot, felix_cpu_port_for_master(ds, master));
+
+ return 0;
+}
+
/* Alternatively to using the NPI functionality, that same hardware MAC
* connected internally to the enetc or fman DSA master can be configured to
* use the software-defined tag_8021q frame format. As far as the hardware is
@@ -433,6 +486,7 @@ static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = {
.setup = felix_tag_npi_setup,
.teardown = felix_tag_npi_teardown,
.get_host_fwd_mask = felix_tag_npi_get_host_fwd_mask,
+ .change_master = felix_tag_npi_change_master,
};
static int felix_tag_8021q_setup(struct dsa_switch *ds)
@@ -445,6 +499,9 @@ static int felix_tag_8021q_setup(struct dsa_switch *ds)
if (err)
return err;
+ dsa_switch_for_each_cpu_port(dp, ds)
+ ocelot_port_setup_dsa_8021q_cpu(ocelot, dp->index);
+
dsa_switch_for_each_user_port(dp, ds)
ocelot_port_assign_dsa_8021q_cpu(ocelot, dp->index,
dp->cpu_dp->index);
@@ -493,6 +550,9 @@ static void felix_tag_8021q_teardown(struct dsa_switch *ds)
dsa_switch_for_each_user_port(dp, ds)
ocelot_port_unassign_dsa_8021q_cpu(ocelot, dp->index);
+ dsa_switch_for_each_cpu_port(dp, ds)
+ ocelot_port_teardown_dsa_8021q_cpu(ocelot, dp->index);
+
dsa_tag_8021q_unregister(ds);
}
@@ -501,10 +561,24 @@ static unsigned long felix_tag_8021q_get_host_fwd_mask(struct dsa_switch *ds)
return dsa_cpu_ports(ds);
}
+static int felix_tag_8021q_change_master(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
+{
+ int cpu = felix_cpu_port_for_master(ds, master);
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_unassign_dsa_8021q_cpu(ocelot, port);
+ ocelot_port_assign_dsa_8021q_cpu(ocelot, port, cpu);
+
+ return felix_update_trapping_destinations(ds, true);
+}
+
static const struct felix_tag_proto_ops felix_tag_8021q_proto_ops = {
.setup = felix_tag_8021q_setup,
.teardown = felix_tag_8021q_teardown,
.get_host_fwd_mask = felix_tag_8021q_get_host_fwd_mask,
+ .change_master = felix_tag_8021q_change_master,
};
static void felix_set_host_flood(struct dsa_switch *ds, unsigned long mask,
@@ -667,6 +741,16 @@ static void felix_port_set_host_flood(struct dsa_switch *ds, int port,
!!felix->host_flood_mc_mask, true);
}
+static int felix_port_change_master(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot *ocelot = ds->priv;
+ struct felix *felix = ocelot_to_felix(ocelot);
+
+ return felix->tag_proto_ops->change_master(ds, port, master, extack);
+}
+
static int felix_set_ageing_time(struct dsa_switch *ds,
unsigned int ageing_time)
{
@@ -855,11 +939,21 @@ static void felix_bridge_leave(struct dsa_switch *ds, int port,
static int felix_lag_join(struct dsa_switch *ds, int port,
struct dsa_lag lag,
- struct netdev_lag_upper_info *info)
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct ocelot *ocelot = ds->priv;
+ int err;
- return ocelot_port_lag_join(ocelot, port, lag.dev, info);
+ err = ocelot_port_lag_join(ocelot, port, lag.dev, info, extack);
+ if (err)
+ return err;
+
+ /* Update the logical LAG port that serves as tag_8021q CPU port */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ return felix_port_change_master(ds, port, lag.dev, extack);
}
static int felix_lag_leave(struct dsa_switch *ds, int port,
@@ -869,7 +963,11 @@ static int felix_lag_leave(struct dsa_switch *ds, int port,
ocelot_port_lag_leave(ocelot, port, lag.dev);
- return 0;
+ /* Update the logical LAG port that serves as tag_8021q CPU port */
+ if (!dsa_is_cpu_port(ds, port))
+ return 0;
+
+ return felix_port_change_master(ds, port, lag.dev, NULL);
}
static int felix_lag_change(struct dsa_switch *ds, int port)
@@ -1007,6 +1105,27 @@ static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
felix->info->port_sched_speed_set(ocelot, port, speed);
}
+static int felix_port_enable(struct dsa_switch *ds, int port,
+ struct phy_device *phydev)
+{
+ struct dsa_port *dp = dsa_to_port(ds, port);
+ struct ocelot *ocelot = ds->priv;
+
+ if (!dsa_port_is_user(dp))
+ return 0;
+
+ if (ocelot->npi >= 0) {
+ struct net_device *master = dsa_port_to_master(dp);
+
+ if (felix_cpu_port_for_master(ds, master) != ocelot->npi) {
+ dev_err(ds->dev, "Multiple masters are not allowed\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
{
int i;
@@ -1028,6 +1147,55 @@ static void felix_port_qos_map_init(struct ocelot *ocelot, int port)
}
}
+static void felix_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_stats64(ocelot, port, stats);
+}
+
+static void felix_get_pause_stats(struct dsa_switch *ds, int port,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_pause_stats(ocelot, port, pause_stats);
+}
+
+static void felix_get_rmon_stats(struct dsa_switch *ds, int port,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_rmon_stats(ocelot, port, rmon_stats, ranges);
+}
+
+static void felix_get_eth_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_eth_ctrl_stats(ocelot, port, ctrl_stats);
+}
+
+static void felix_get_eth_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_eth_mac_stats(ocelot, port, mac_stats);
+}
+
+static void felix_get_eth_phy_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct ocelot *ocelot = ds->priv;
+
+ ocelot_port_get_eth_phy_stats(ocelot, port, phy_stats);
+}
+
static void felix_get_strings(struct dsa_switch *ds, int port,
u32 stringset, u8 *data)
{
@@ -1144,11 +1312,55 @@ static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes)
return err;
}
+static struct regmap *felix_request_regmap_by_name(struct felix *felix,
+ const char *resource_name)
+{
+ struct ocelot *ocelot = &felix->ocelot;
+ struct resource res;
+ int i;
+
+ for (i = 0; i < felix->info->num_resources; i++) {
+ if (strcmp(resource_name, felix->info->resources[i].name))
+ continue;
+
+ memcpy(&res, &felix->info->resources[i], sizeof(res));
+ res.start += felix->switch_base;
+ res.end += felix->switch_base;
+
+ return ocelot_regmap_init(ocelot, &res);
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+static struct regmap *felix_request_regmap(struct felix *felix,
+ enum ocelot_target target)
+{
+ const char *resource_name = felix->info->resource_names[target];
+
+ /* If the driver didn't provide a resource name for the target,
+ * the resource is optional.
+ */
+ if (!resource_name)
+ return NULL;
+
+ return felix_request_regmap_by_name(felix, resource_name);
+}
+
+static struct regmap *felix_request_port_regmap(struct felix *felix, int port)
+{
+ char resource_name[32];
+
+ sprintf(resource_name, "port%d", port);
+
+ return felix_request_regmap_by_name(felix, resource_name);
+}
+
static int felix_init_structs(struct felix *felix, int num_phys_ports)
{
struct ocelot *ocelot = &felix->ocelot;
phy_interface_t *port_phy_modes;
- struct resource res;
+ struct regmap *target;
int port, i, err;
ocelot->num_phys_ports = num_phys_ports;
@@ -1182,20 +1394,11 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
}
for (i = 0; i < TARGET_MAX; i++) {
- struct regmap *target;
-
- if (!felix->info->target_io_res[i].name)
- continue;
-
- memcpy(&res, &felix->info->target_io_res[i], sizeof(res));
- res.flags = IORESOURCE_MEM;
- res.start += felix->switch_base;
- res.end += felix->switch_base;
-
- target = felix->info->init_regmap(ocelot, &res);
+ target = felix_request_regmap(felix, i);
if (IS_ERR(target)) {
dev_err(ocelot->dev,
- "Failed to map device memory space\n");
+ "Failed to map device memory space: %pe\n",
+ target);
kfree(port_phy_modes);
return PTR_ERR(target);
}
@@ -1212,7 +1415,6 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
for (port = 0; port < num_phys_ports; port++) {
struct ocelot_port *ocelot_port;
- struct regmap *target;
ocelot_port = devm_kzalloc(ocelot->dev,
sizeof(struct ocelot_port),
@@ -1224,16 +1426,11 @@ static int felix_init_structs(struct felix *felix, int num_phys_ports)
return -ENOMEM;
}
- memcpy(&res, &felix->info->port_io_res[port], sizeof(res));
- res.flags = IORESOURCE_MEM;
- res.start += felix->switch_base;
- res.end += felix->switch_base;
-
- target = felix->info->init_regmap(ocelot, &res);
+ target = felix_request_port_regmap(felix, port);
if (IS_ERR(target)) {
dev_err(ocelot->dev,
- "Failed to map memory space for port %d\n",
- port);
+ "Failed to map memory space for port %d: %pe\n",
+ port, target);
kfree(port_phy_modes);
return PTR_ERR(target);
}
@@ -1842,6 +2039,12 @@ const struct dsa_switch_ops felix_switch_ops = {
.setup = felix_setup,
.teardown = felix_teardown,
.set_ageing_time = felix_set_ageing_time,
+ .get_stats64 = felix_get_stats64,
+ .get_pause_stats = felix_get_pause_stats,
+ .get_rmon_stats = felix_get_rmon_stats,
+ .get_eth_ctrl_stats = felix_get_eth_ctrl_stats,
+ .get_eth_mac_stats = felix_get_eth_mac_stats,
+ .get_eth_phy_stats = felix_get_eth_phy_stats,
.get_strings = felix_get_strings,
.get_ethtool_stats = felix_get_ethtool_stats,
.get_sset_count = felix_get_sset_count,
@@ -1851,6 +2054,7 @@ const struct dsa_switch_ops felix_switch_ops = {
.phylink_mac_select_pcs = felix_phylink_mac_select_pcs,
.phylink_mac_link_down = felix_phylink_mac_link_down,
.phylink_mac_link_up = felix_phylink_mac_link_up,
+ .port_enable = felix_port_enable,
.port_fast_age = felix_port_fast_age,
.port_fdb_dump = felix_fdb_dump,
.port_fdb_add = felix_fdb_add,
@@ -1906,6 +2110,7 @@ const struct dsa_switch_ops felix_switch_ops = {
.port_add_dscp_prio = felix_port_add_dscp_prio,
.port_del_dscp_prio = felix_port_del_dscp_prio,
.port_set_host_flood = felix_port_set_host_flood,
+ .port_change_master = felix_port_change_master,
};
struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port)
diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h
index deb8dde1fc19..c9c29999c336 100644
--- a/drivers/net/dsa/ocelot/felix.h
+++ b/drivers/net/dsa/ocelot/felix.h
@@ -16,9 +16,13 @@
/* Platform-specific information */
struct felix_info {
- const struct resource *target_io_res;
- const struct resource *port_io_res;
- const struct resource *imdio_res;
+ /* Hardcoded resources provided by the hardware instantiation. */
+ const struct resource *resources;
+ size_t num_resources;
+ /* Names of the mandatory resources that will be requested during
+ * probe. Must have TARGET_MAX elements, since it is indexed by target.
+ */
+ const char *const *resource_names;
const struct reg_field *regfields;
const u32 *const *map;
const struct ocelot_ops *ops;
@@ -56,8 +60,6 @@ struct felix_info {
void (*tas_guard_bands_update)(struct ocelot *ocelot, int port);
void (*port_sched_speed_set)(struct ocelot *ocelot, int port,
u32 speed);
- struct regmap *(*init_regmap)(struct ocelot *ocelot,
- struct resource *res);
};
/* Methods for initializing the hardware resources specific to a tagging
@@ -71,6 +73,9 @@ struct felix_tag_proto_ops {
int (*setup)(struct dsa_switch *ds);
void (*teardown)(struct dsa_switch *ds);
unsigned long (*get_host_fwd_mask)(struct dsa_switch *ds);
+ int (*change_master)(struct dsa_switch *ds, int port,
+ struct net_device *master,
+ struct netlink_ext_ack *extack);
};
extern const struct dsa_switch_ops felix_switch_ops;
@@ -83,7 +88,6 @@ struct felix {
struct mii_bus *imdio;
struct phylink_pcs **pcs;
resource_size_t switch_base;
- resource_size_t imdio_base;
enum dsa_tag_protocol tag_proto;
const struct felix_tag_proto_ops *tag_proto_ops;
struct kthread_worker *xmit_worker;
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index f8f19a85744c..26a35ae322d1 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -348,7 +348,7 @@ static const u32 vsc9959_sys_regmap[] = {
REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00026c),
REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000270),
REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000274),
- REG(SYS_COUNT_TX_AGING, 0x000278),
+ REG(SYS_COUNT_TX_AGED, 0x000278),
REG(SYS_COUNT_DROP_LOCAL, 0x000400),
REG(SYS_COUNT_DROP_TAIL, 0x000404),
REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000408),
@@ -367,6 +367,10 @@ static const u32 vsc9959_sys_regmap[] = {
REG(SYS_COUNT_DROP_GREEN_PRIO_5, 0x00043c),
REG(SYS_COUNT_DROP_GREEN_PRIO_6, 0x000440),
REG(SYS_COUNT_DROP_GREEN_PRIO_7, 0x000444),
+ REG(SYS_COUNT_SF_MATCHING_FRAMES, 0x000800),
+ REG(SYS_COUNT_SF_NOT_PASSING_FRAMES, 0x000804),
+ REG(SYS_COUNT_SF_NOT_PASSING_SDU, 0x000808),
+ REG(SYS_COUNT_SF_RED_FRAMES, 0x00080c),
REG(SYS_RESET_CFG, 0x000e00),
REG(SYS_SR_ETYPE_CFG, 0x000e04),
REG(SYS_VLAN_ETYPE_CFG, 0x000e08),
@@ -388,7 +392,6 @@ static const u32 vsc9959_sys_regmap[] = {
REG_RESERVED(SYS_MMGT_FAST),
REG_RESERVED(SYS_EVENTS_DIF),
REG_RESERVED(SYS_EVENTS_CORE),
- REG(SYS_CNT, 0x000000),
REG(SYS_PTP_STATUS, 0x000f14),
REG(SYS_PTP_TXSTAMP, 0x000f18),
REG(SYS_PTP_NXT, 0x000f1c),
@@ -474,100 +477,43 @@ static const u32 *vsc9959_regmap[TARGET_MAX] = {
};
/* Addresses are relative to the PCI device's base address */
-static const struct resource vsc9959_target_io_res[TARGET_MAX] = {
- [ANA] = {
- .start = 0x0280000,
- .end = 0x028ffff,
- .name = "ana",
- },
- [QS] = {
- .start = 0x0080000,
- .end = 0x00800ff,
- .name = "qs",
- },
- [QSYS] = {
- .start = 0x0200000,
- .end = 0x021ffff,
- .name = "qsys",
- },
- [REW] = {
- .start = 0x0030000,
- .end = 0x003ffff,
- .name = "rew",
- },
- [SYS] = {
- .start = 0x0010000,
- .end = 0x001ffff,
- .name = "sys",
- },
- [S0] = {
- .start = 0x0040000,
- .end = 0x00403ff,
- .name = "s0",
- },
- [S1] = {
- .start = 0x0050000,
- .end = 0x00503ff,
- .name = "s1",
- },
- [S2] = {
- .start = 0x0060000,
- .end = 0x00603ff,
- .name = "s2",
- },
- [PTP] = {
- .start = 0x0090000,
- .end = 0x00900cb,
- .name = "ptp",
- },
- [GCB] = {
- .start = 0x0070000,
- .end = 0x00701ff,
- .name = "devcpu_gcb",
- },
+static const struct resource vsc9959_resources[] = {
+ DEFINE_RES_MEM_NAMED(0x0010000, 0x0010000, "sys"),
+ DEFINE_RES_MEM_NAMED(0x0030000, 0x0010000, "rew"),
+ DEFINE_RES_MEM_NAMED(0x0040000, 0x0000400, "s0"),
+ DEFINE_RES_MEM_NAMED(0x0050000, 0x0000400, "s1"),
+ DEFINE_RES_MEM_NAMED(0x0060000, 0x0000400, "s2"),
+ DEFINE_RES_MEM_NAMED(0x0070000, 0x0000200, "devcpu_gcb"),
+ DEFINE_RES_MEM_NAMED(0x0080000, 0x0000100, "qs"),
+ DEFINE_RES_MEM_NAMED(0x0090000, 0x00000cc, "ptp"),
+ DEFINE_RES_MEM_NAMED(0x0100000, 0x0010000, "port0"),
+ DEFINE_RES_MEM_NAMED(0x0110000, 0x0010000, "port1"),
+ DEFINE_RES_MEM_NAMED(0x0120000, 0x0010000, "port2"),
+ DEFINE_RES_MEM_NAMED(0x0130000, 0x0010000, "port3"),
+ DEFINE_RES_MEM_NAMED(0x0140000, 0x0010000, "port4"),
+ DEFINE_RES_MEM_NAMED(0x0150000, 0x0010000, "port5"),
+ DEFINE_RES_MEM_NAMED(0x0200000, 0x0020000, "qsys"),
+ DEFINE_RES_MEM_NAMED(0x0280000, 0x0010000, "ana"),
};
-static const struct resource vsc9959_port_io_res[] = {
- {
- .start = 0x0100000,
- .end = 0x010ffff,
- .name = "port0",
- },
- {
- .start = 0x0110000,
- .end = 0x011ffff,
- .name = "port1",
- },
- {
- .start = 0x0120000,
- .end = 0x012ffff,
- .name = "port2",
- },
- {
- .start = 0x0130000,
- .end = 0x013ffff,
- .name = "port3",
- },
- {
- .start = 0x0140000,
- .end = 0x014ffff,
- .name = "port4",
- },
- {
- .start = 0x0150000,
- .end = 0x015ffff,
- .name = "port5",
- },
+static const char * const vsc9959_resource_names[TARGET_MAX] = {
+ [SYS] = "sys",
+ [REW] = "rew",
+ [S0] = "s0",
+ [S1] = "s1",
+ [S2] = "s2",
+ [GCB] = "devcpu_gcb",
+ [QS] = "qs",
+ [PTP] = "ptp",
+ [QSYS] = "qsys",
+ [ANA] = "ana",
};
/* Port MAC 0 Internal MDIO bus through which the SerDes acting as an
* SGMII/QSGMII MAC PCS can be found.
*/
-static const struct resource vsc9959_imdio_res = {
- .start = 0x8030,
- .end = 0x8040,
- .name = "imdio",
-};
+static const struct resource vsc9959_imdio_res =
+ DEFINE_RES_MEM_NAMED(0x8030, 0x8040, "imdio");
static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = {
[ANA_ADVLEARN_VLAN_CHK] = REG_FIELD(ANA_ADVLEARN, 6, 6),
@@ -620,378 +566,7 @@ static const struct reg_field vsc9959_regfields[REGFIELD_MAX] = {
};
static const struct ocelot_stat_layout vsc9959_stats_layout[OCELOT_NUM_STATS] = {
- [OCELOT_STAT_RX_OCTETS] = {
- .name = "rx_octets",
- .reg = SYS_COUNT_RX_OCTETS,
- },
- [OCELOT_STAT_RX_UNICAST] = {
- .name = "rx_unicast",
- .reg = SYS_COUNT_RX_UNICAST,
- },
- [OCELOT_STAT_RX_MULTICAST] = {
- .name = "rx_multicast",
- .reg = SYS_COUNT_RX_MULTICAST,
- },
- [OCELOT_STAT_RX_BROADCAST] = {
- .name = "rx_broadcast",
- .reg = SYS_COUNT_RX_BROADCAST,
- },
- [OCELOT_STAT_RX_SHORTS] = {
- .name = "rx_shorts",
- .reg = SYS_COUNT_RX_SHORTS,
- },
- [OCELOT_STAT_RX_FRAGMENTS] = {
- .name = "rx_fragments",
- .reg = SYS_COUNT_RX_FRAGMENTS,
- },
- [OCELOT_STAT_RX_JABBERS] = {
- .name = "rx_jabbers",
- .reg = SYS_COUNT_RX_JABBERS,
- },
- [OCELOT_STAT_RX_CRC_ALIGN_ERRS] = {
- .name = "rx_crc_align_errs",
- .reg = SYS_COUNT_RX_CRC_ALIGN_ERRS,
- },
- [OCELOT_STAT_RX_SYM_ERRS] = {
- .name = "rx_sym_errs",
- .reg = SYS_COUNT_RX_SYM_ERRS,
- },
- [OCELOT_STAT_RX_64] = {
- .name = "rx_frames_below_65_octets",
- .reg = SYS_COUNT_RX_64,
- },
- [OCELOT_STAT_RX_65_127] = {
- .name = "rx_frames_65_to_127_octets",
- .reg = SYS_COUNT_RX_65_127,
- },
- [OCELOT_STAT_RX_128_255] = {
- .name = "rx_frames_128_to_255_octets",
- .reg = SYS_COUNT_RX_128_255,
- },
- [OCELOT_STAT_RX_256_511] = {
- .name = "rx_frames_256_to_511_octets",
- .reg = SYS_COUNT_RX_256_511,
- },
- [OCELOT_STAT_RX_512_1023] = {
- .name = "rx_frames_512_to_1023_octets",
- .reg = SYS_COUNT_RX_512_1023,
- },
- [OCELOT_STAT_RX_1024_1526] = {
- .name = "rx_frames_1024_to_1526_octets",
- .reg = SYS_COUNT_RX_1024_1526,
- },
- [OCELOT_STAT_RX_1527_MAX] = {
- .name = "rx_frames_over_1526_octets",
- .reg = SYS_COUNT_RX_1527_MAX,
- },
- [OCELOT_STAT_RX_PAUSE] = {
- .name = "rx_pause",
- .reg = SYS_COUNT_RX_PAUSE,
- },
- [OCELOT_STAT_RX_CONTROL] = {
- .name = "rx_control",
- .reg = SYS_COUNT_RX_CONTROL,
- },
- [OCELOT_STAT_RX_LONGS] = {
- .name = "rx_longs",
- .reg = SYS_COUNT_RX_LONGS,
- },
- [OCELOT_STAT_RX_CLASSIFIED_DROPS] = {
- .name = "rx_classified_drops",
- .reg = SYS_COUNT_RX_CLASSIFIED_DROPS,
- },
- [OCELOT_STAT_RX_RED_PRIO_0] = {
- .name = "rx_red_prio_0",
- .reg = SYS_COUNT_RX_RED_PRIO_0,
- },
- [OCELOT_STAT_RX_RED_PRIO_1] = {
- .name = "rx_red_prio_1",
- .reg = SYS_COUNT_RX_RED_PRIO_1,
- },
- [OCELOT_STAT_RX_RED_PRIO_2] = {
- .name = "rx_red_prio_2",
- .reg = SYS_COUNT_RX_RED_PRIO_2,
- },
- [OCELOT_STAT_RX_RED_PRIO_3] = {
- .name = "rx_red_prio_3",
- .reg = SYS_COUNT_RX_RED_PRIO_3,
- },
- [OCELOT_STAT_RX_RED_PRIO_4] = {
- .name = "rx_red_prio_4",
- .reg = SYS_COUNT_RX_RED_PRIO_4,
- },
- [OCELOT_STAT_RX_RED_PRIO_5] = {
- .name = "rx_red_prio_5",
- .reg = SYS_COUNT_RX_RED_PRIO_5,
- },
- [OCELOT_STAT_RX_RED_PRIO_6] = {
- .name = "rx_red_prio_6",
- .reg = SYS_COUNT_RX_RED_PRIO_6,
- },
- [OCELOT_STAT_RX_RED_PRIO_7] = {
- .name = "rx_red_prio_7",
- .reg = SYS_COUNT_RX_RED_PRIO_7,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_0] = {
- .name = "rx_yellow_prio_0",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_0,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_1] = {
- .name = "rx_yellow_prio_1",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_1,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_2] = {
- .name = "rx_yellow_prio_2",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_2,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_3] = {
- .name = "rx_yellow_prio_3",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_3,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_4] = {
- .name = "rx_yellow_prio_4",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_4,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_5] = {
- .name = "rx_yellow_prio_5",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_5,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_6] = {
- .name = "rx_yellow_prio_6",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_6,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_7] = {
- .name = "rx_yellow_prio_7",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_7,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_0] = {
- .name = "rx_green_prio_0",
- .reg = SYS_COUNT_RX_GREEN_PRIO_0,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_1] = {
- .name = "rx_green_prio_1",
- .reg = SYS_COUNT_RX_GREEN_PRIO_1,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_2] = {
- .name = "rx_green_prio_2",
- .reg = SYS_COUNT_RX_GREEN_PRIO_2,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_3] = {
- .name = "rx_green_prio_3",
- .reg = SYS_COUNT_RX_GREEN_PRIO_3,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_4] = {
- .name = "rx_green_prio_4",
- .reg = SYS_COUNT_RX_GREEN_PRIO_4,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_5] = {
- .name = "rx_green_prio_5",
- .reg = SYS_COUNT_RX_GREEN_PRIO_5,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_6] = {
- .name = "rx_green_prio_6",
- .reg = SYS_COUNT_RX_GREEN_PRIO_6,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_7] = {
- .name = "rx_green_prio_7",
- .reg = SYS_COUNT_RX_GREEN_PRIO_7,
- },
- [OCELOT_STAT_TX_OCTETS] = {
- .name = "tx_octets",
- .reg = SYS_COUNT_TX_OCTETS,
- },
- [OCELOT_STAT_TX_UNICAST] = {
- .name = "tx_unicast",
- .reg = SYS_COUNT_TX_UNICAST,
- },
- [OCELOT_STAT_TX_MULTICAST] = {
- .name = "tx_multicast",
- .reg = SYS_COUNT_TX_MULTICAST,
- },
- [OCELOT_STAT_TX_BROADCAST] = {
- .name = "tx_broadcast",
- .reg = SYS_COUNT_TX_BROADCAST,
- },
- [OCELOT_STAT_TX_COLLISION] = {
- .name = "tx_collision",
- .reg = SYS_COUNT_TX_COLLISION,
- },
- [OCELOT_STAT_TX_DROPS] = {
- .name = "tx_drops",
- .reg = SYS_COUNT_TX_DROPS,
- },
- [OCELOT_STAT_TX_PAUSE] = {
- .name = "tx_pause",
- .reg = SYS_COUNT_TX_PAUSE,
- },
- [OCELOT_STAT_TX_64] = {
- .name = "tx_frames_below_65_octets",
- .reg = SYS_COUNT_TX_64,
- },
- [OCELOT_STAT_TX_65_127] = {
- .name = "tx_frames_65_to_127_octets",
- .reg = SYS_COUNT_TX_65_127,
- },
- [OCELOT_STAT_TX_128_255] = {
- .name = "tx_frames_128_255_octets",
- .reg = SYS_COUNT_TX_128_255,
- },
- [OCELOT_STAT_TX_256_511] = {
- .name = "tx_frames_256_511_octets",
- .reg = SYS_COUNT_TX_256_511,
- },
- [OCELOT_STAT_TX_512_1023] = {
- .name = "tx_frames_512_1023_octets",
- .reg = SYS_COUNT_TX_512_1023,
- },
- [OCELOT_STAT_TX_1024_1526] = {
- .name = "tx_frames_1024_1526_octets",
- .reg = SYS_COUNT_TX_1024_1526,
- },
- [OCELOT_STAT_TX_1527_MAX] = {
- .name = "tx_frames_over_1526_octets",
- .reg = SYS_COUNT_TX_1527_MAX,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_0] = {
- .name = "tx_yellow_prio_0",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_0,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_1] = {
- .name = "tx_yellow_prio_1",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_1,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_2] = {
- .name = "tx_yellow_prio_2",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_2,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_3] = {
- .name = "tx_yellow_prio_3",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_3,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_4] = {
- .name = "tx_yellow_prio_4",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_4,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_5] = {
- .name = "tx_yellow_prio_5",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_5,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_6] = {
- .name = "tx_yellow_prio_6",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_6,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_7] = {
- .name = "tx_yellow_prio_7",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_7,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_0] = {
- .name = "tx_green_prio_0",
- .reg = SYS_COUNT_TX_GREEN_PRIO_0,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_1] = {
- .name = "tx_green_prio_1",
- .reg = SYS_COUNT_TX_GREEN_PRIO_1,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_2] = {
- .name = "tx_green_prio_2",
- .reg = SYS_COUNT_TX_GREEN_PRIO_2,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_3] = {
- .name = "tx_green_prio_3",
- .reg = SYS_COUNT_TX_GREEN_PRIO_3,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_4] = {
- .name = "tx_green_prio_4",
- .reg = SYS_COUNT_TX_GREEN_PRIO_4,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_5] = {
- .name = "tx_green_prio_5",
- .reg = SYS_COUNT_TX_GREEN_PRIO_5,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_6] = {
- .name = "tx_green_prio_6",
- .reg = SYS_COUNT_TX_GREEN_PRIO_6,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_7] = {
- .name = "tx_green_prio_7",
- .reg = SYS_COUNT_TX_GREEN_PRIO_7,
- },
- [OCELOT_STAT_TX_AGED] = {
- .name = "tx_aged",
- .reg = SYS_COUNT_TX_AGING,
- },
- [OCELOT_STAT_DROP_LOCAL] = {
- .name = "drop_local",
- .reg = SYS_COUNT_DROP_LOCAL,
- },
- [OCELOT_STAT_DROP_TAIL] = {
- .name = "drop_tail",
- .reg = SYS_COUNT_DROP_TAIL,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_0] = {
- .name = "drop_yellow_prio_0",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_0,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_1] = {
- .name = "drop_yellow_prio_1",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_1,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_2] = {
- .name = "drop_yellow_prio_2",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_2,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_3] = {
- .name = "drop_yellow_prio_3",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_3,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_4] = {
- .name = "drop_yellow_prio_4",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_4,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_5] = {
- .name = "drop_yellow_prio_5",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_5,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_6] = {
- .name = "drop_yellow_prio_6",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_6,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_7] = {
- .name = "drop_yellow_prio_7",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_7,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_0] = {
- .name = "drop_green_prio_0",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_0,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_1] = {
- .name = "drop_green_prio_1",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_1,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_2] = {
- .name = "drop_green_prio_2",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_2,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_3] = {
- .name = "drop_green_prio_3",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_3,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_4] = {
- .name = "drop_green_prio_4",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_4,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_5] = {
- .name = "drop_green_prio_5",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_5,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_6] = {
- .name = "drop_green_prio_6",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_6,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_7] = {
- .name = "drop_green_prio_7",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_7,
- },
+ OCELOT_COMMON_STATS,
};
static const struct vcap_field vsc9959_vcap_es0_keys[] = {
@@ -1371,9 +946,11 @@ static void vsc9959_wm_stat(u32 val, u32 *inuse, u32 *maxuse)
static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
{
+ struct pci_dev *pdev = to_pci_dev(ocelot->dev);
struct felix *felix = ocelot_to_felix(ocelot);
struct enetc_mdio_priv *mdio_priv;
struct device *dev = ocelot->dev;
+ resource_size_t imdio_base;
void __iomem *imdio_regs;
struct resource res;
struct enetc_hw *hw;
@@ -1389,10 +966,11 @@ static int vsc9959_mdio_bus_alloc(struct ocelot *ocelot)
return -ENOMEM;
}
- memcpy(&res, felix->info->imdio_res, sizeof(res));
- res.flags = IORESOURCE_MEM;
- res.start += felix->imdio_base;
- res.end += felix->imdio_base;
+ imdio_base = pci_resource_start(pdev, VSC9959_IMDIO_PCI_BAR);
+
+ memcpy(&res, &vsc9959_imdio_res, sizeof(res));
+ res.start += imdio_base;
+ res.end += imdio_base;
imdio_regs = devm_ioremap_resource(dev, &res);
if (IS_ERR(imdio_regs))
@@ -1616,6 +1194,14 @@ static u32 vsc9959_port_qmaxsdu_get(struct ocelot *ocelot, int port, int tc)
}
}
+static u32 vsc9959_tas_tc_max_sdu(struct tc_taprio_qopt_offload *taprio, int tc)
+{
+ if (!taprio || !taprio->max_sdu[tc])
+ return 0;
+
+ return taprio->max_sdu[tc] + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN;
+}
+
/* Update QSYS_PORT_MAX_SDU to make sure the static guard bands added by the
* switch (see the ALWAYS_GUARD_BAND_SCH_Q comment) are correct at all MTU
* values (the default value is 1518). Also, for traffic class windows smaller
@@ -1625,6 +1211,7 @@ static u32 vsc9959_port_qmaxsdu_get(struct ocelot *ocelot, int port, int tc)
static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ struct tc_taprio_qopt_offload *taprio;
u64 min_gate_len[OCELOT_NUM_TC];
int speed, picos_per_byte;
u64 needed_bit_time_ps;
@@ -1634,6 +1221,8 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
lockdep_assert_held(&ocelot->tas_lock);
+ taprio = ocelot_port->taprio;
+
val = ocelot_read_rix(ocelot, QSYS_TAG_CONFIG, port);
tas_speed = QSYS_TAG_CONFIG_LINK_SPEED_X(val);
@@ -1670,11 +1259,12 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
"port %d: max frame size %d needs %llu ps at speed %d\n",
port, maxlen, needed_bit_time_ps, speed);
- vsc9959_tas_min_gate_lengths(ocelot_port->taprio, min_gate_len);
+ vsc9959_tas_min_gate_lengths(taprio, min_gate_len);
mutex_lock(&ocelot->fwd_domain_lock);
for (tc = 0; tc < OCELOT_NUM_TC; tc++) {
+ u32 requested_max_sdu = vsc9959_tas_tc_max_sdu(taprio, tc);
u64 remaining_gate_len_ps;
u32 max_sdu;
@@ -1685,7 +1275,7 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
/* Setting QMAXSDU_CFG to 0 disables oversized frame
* dropping.
*/
- max_sdu = 0;
+ max_sdu = requested_max_sdu;
dev_dbg(ocelot->dev,
"port %d tc %d min gate len %llu"
", sending all frames\n",
@@ -1716,6 +1306,10 @@ static void vsc9959_tas_guard_bands_update(struct ocelot *ocelot, int port)
*/
if (max_sdu > 20)
max_sdu -= 20;
+
+ if (requested_max_sdu && requested_max_sdu < max_sdu)
+ max_sdu = requested_max_sdu;
+
dev_info(ocelot->dev,
"port %d tc %d min gate length %llu"
" ns not enough for max frame size %d at %d"
@@ -2005,6 +1599,21 @@ static int vsc9959_qos_port_cbs_set(struct dsa_switch *ds, int port,
return 0;
}
+static int vsc9959_qos_query_caps(struct tc_query_caps_base *base)
+{
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ caps->supports_queue_max_sdu = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
enum tc_setup_type type,
void *type_data)
@@ -2012,6 +1621,8 @@ static int vsc9959_port_setup_tc(struct dsa_switch *ds, int port,
struct ocelot *ocelot = ds->priv;
switch (type) {
+ case TC_QUERY_CAPS:
+ return vsc9959_qos_query_caps(type_data);
case TC_SETUP_QDISC_TAPRIO:
return vsc9959_qos_port_tas_set(ocelot, port, type_data);
case TC_SETUP_QDISC_CBS:
@@ -2043,7 +1654,15 @@ struct felix_stream {
u32 ssid;
};
+struct felix_stream_filter_counters {
+ u64 match;
+ u64 not_pass_gate;
+ u64 not_pass_sdu;
+ u64 red;
+};
+
struct felix_stream_filter {
+ struct felix_stream_filter_counters stats;
struct list_head list;
refcount_t refcount;
u32 index;
@@ -2058,13 +1677,6 @@ struct felix_stream_filter {
u32 maxsdu;
};
-struct felix_stream_filter_counters {
- u32 match;
- u32 not_pass_gate;
- u32 not_pass_sdu;
- u32 red;
-};
-
struct felix_stream_gate {
u32 index;
u8 enable;
@@ -2568,29 +2180,6 @@ static void vsc9959_psfp_sgi_table_del(struct ocelot *ocelot,
}
}
-static void vsc9959_psfp_counters_get(struct ocelot *ocelot, u32 index,
- struct felix_stream_filter_counters *counters)
-{
- spin_lock(&ocelot->stats_lock);
-
- ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(index),
- SYS_STAT_CFG_STAT_VIEW_M,
- SYS_STAT_CFG);
-
- counters->match = ocelot_read_gix(ocelot, SYS_CNT, 0x200);
- counters->not_pass_gate = ocelot_read_gix(ocelot, SYS_CNT, 0x201);
- counters->not_pass_sdu = ocelot_read_gix(ocelot, SYS_CNT, 0x202);
- counters->red = ocelot_read_gix(ocelot, SYS_CNT, 0x203);
-
- /* Clear the PSFP counter. */
- ocelot_write(ocelot,
- SYS_STAT_CFG_STAT_VIEW(index) |
- SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
- SYS_STAT_CFG);
-
- spin_unlock(&ocelot->stats_lock);
-}
-
static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
struct flow_cls_offload *f)
{
@@ -2615,6 +2204,8 @@ static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
return ret;
}
+ mutex_lock(&psfp->lock);
+
flow_action_for_each(i, a, &f->rule->action) {
switch (a->id) {
case FLOW_ACTION_GATE:
@@ -2656,6 +2247,7 @@ static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
sfi.maxsdu = a->police.mtu;
break;
default:
+ mutex_unlock(&psfp->lock);
return -EOPNOTSUPP;
}
}
@@ -2725,6 +2317,8 @@ static int vsc9959_psfp_filter_add(struct ocelot *ocelot, int port,
goto err;
}
+ mutex_unlock(&psfp->lock);
+
return 0;
err:
@@ -2734,6 +2328,8 @@ err:
if (sfi.fm_valid)
ocelot_vcap_policer_del(ocelot, sfi.fmid);
+ mutex_unlock(&psfp->lock);
+
return ret;
}
@@ -2741,18 +2337,22 @@ static int vsc9959_psfp_filter_del(struct ocelot *ocelot,
struct flow_cls_offload *f)
{
struct felix_stream *stream, tmp, *stream_entry;
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
static struct felix_stream_filter *sfi;
- struct ocelot_psfp_list *psfp;
- psfp = &ocelot->psfp;
+ mutex_lock(&psfp->lock);
stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie);
- if (!stream)
+ if (!stream) {
+ mutex_unlock(&psfp->lock);
return -ENOMEM;
+ }
sfi = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, stream->sfid);
- if (!sfi)
+ if (!sfi) {
+ mutex_unlock(&psfp->lock);
return -ENOMEM;
+ }
if (sfi->sg_valid)
vsc9959_psfp_sgi_table_del(ocelot, sfi->sgid);
@@ -2778,27 +2378,83 @@ static int vsc9959_psfp_filter_del(struct ocelot *ocelot,
stream_entry->ports);
}
+ mutex_unlock(&psfp->lock);
+
return 0;
}
+static void vsc9959_update_sfid_stats(struct ocelot *ocelot,
+ struct felix_stream_filter *sfi)
+{
+ struct felix_stream_filter_counters *s = &sfi->stats;
+ u32 match, not_pass_gate, not_pass_sdu, red;
+ u32 sfid = sfi->index;
+
+ lockdep_assert_held(&ocelot->stat_view_lock);
+
+ ocelot_rmw(ocelot, SYS_STAT_CFG_STAT_VIEW(sfid),
+ SYS_STAT_CFG_STAT_VIEW_M,
+ SYS_STAT_CFG);
+
+ match = ocelot_read(ocelot, SYS_COUNT_SF_MATCHING_FRAMES);
+ not_pass_gate = ocelot_read(ocelot, SYS_COUNT_SF_NOT_PASSING_FRAMES);
+ not_pass_sdu = ocelot_read(ocelot, SYS_COUNT_SF_NOT_PASSING_SDU);
+ red = ocelot_read(ocelot, SYS_COUNT_SF_RED_FRAMES);
+
+ /* Clear the PSFP counter. */
+ ocelot_write(ocelot,
+ SYS_STAT_CFG_STAT_VIEW(sfid) |
+ SYS_STAT_CFG_STAT_CLEAR_SHOT(0x10),
+ SYS_STAT_CFG);
+
+ s->match += match;
+ s->not_pass_gate += not_pass_gate;
+ s->not_pass_sdu += not_pass_sdu;
+ s->red += red;
+}
+
+/* Caller must hold &ocelot->stat_view_lock */
+static void vsc9959_update_stats(struct ocelot *ocelot)
+{
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
+ struct felix_stream_filter *sfi;
+
+ mutex_lock(&psfp->lock);
+
+ list_for_each_entry(sfi, &psfp->sfi_list, list)
+ vsc9959_update_sfid_stats(ocelot, sfi);
+
+ mutex_unlock(&psfp->lock);
+}
+
static int vsc9959_psfp_stats_get(struct ocelot *ocelot,
struct flow_cls_offload *f,
struct flow_stats *stats)
{
- struct felix_stream_filter_counters counters;
- struct ocelot_psfp_list *psfp;
+ struct ocelot_psfp_list *psfp = &ocelot->psfp;
+ struct felix_stream_filter_counters *s;
+ static struct felix_stream_filter *sfi;
struct felix_stream *stream;
- psfp = &ocelot->psfp;
stream = vsc9959_stream_table_get(&psfp->stream_list, f->cookie);
if (!stream)
return -ENOMEM;
- vsc9959_psfp_counters_get(ocelot, stream->sfid, &counters);
+ sfi = vsc9959_psfp_sfi_table_get(&psfp->sfi_list, stream->sfid);
+ if (!sfi)
+ return -EINVAL;
- stats->pkts = counters.match;
- stats->drops = counters.not_pass_gate + counters.not_pass_sdu +
- counters.red;
+ mutex_lock(&ocelot->stat_view_lock);
+
+ vsc9959_update_sfid_stats(ocelot, sfi);
+
+ s = &sfi->stats;
+ stats->pkts = s->match;
+ stats->drops = s->not_pass_gate + s->not_pass_sdu + s->red;
+
+ memset(s, 0, sizeof(*s));
+
+ mutex_unlock(&ocelot->stat_view_lock);
return 0;
}
@@ -2810,6 +2466,7 @@ static void vsc9959_psfp_init(struct ocelot *ocelot)
INIT_LIST_HEAD(&psfp->stream_list);
INIT_LIST_HEAD(&psfp->sfi_list);
INIT_LIST_HEAD(&psfp->sgi_list);
+ mutex_init(&psfp->lock);
}
/* When using cut-through forwarding and the egress port runs at a higher data
@@ -2908,12 +2565,13 @@ static const struct ocelot_ops vsc9959_ops = {
.psfp_stats_get = vsc9959_psfp_stats_get,
.cut_through_fwd = vsc9959_cut_through_fwd,
.tas_clock_adjust = vsc9959_tas_clock_adjust,
+ .update_stats = vsc9959_update_stats,
};
static const struct felix_info felix_info_vsc9959 = {
- .target_io_res = vsc9959_target_io_res,
- .port_io_res = vsc9959_port_io_res,
- .imdio_res = &vsc9959_imdio_res,
+ .resources = vsc9959_resources,
+ .num_resources = ARRAY_SIZE(vsc9959_resources),
+ .resource_names = vsc9959_resource_names,
.regfields = vsc9959_regfields,
.map = vsc9959_regmap,
.ops = &vsc9959_ops,
@@ -2935,7 +2593,6 @@ static const struct felix_info felix_info_vsc9959 = {
.port_setup_tc = vsc9959_port_setup_tc,
.port_sched_speed_set = vsc9959_sched_speed_set,
.tas_guard_bands_update = vsc9959_tas_guard_bands_update,
- .init_regmap = ocelot_regmap_init,
};
static irqreturn_t felix_irq_handler(int irq, void *data)
@@ -2987,7 +2644,6 @@ static int felix_pci_probe(struct pci_dev *pdev,
ocelot->num_flooding_pgids = OCELOT_NUM_TC;
felix->info = &felix_info_vsc9959;
felix->switch_base = pci_resource_start(pdev, VSC9959_SWITCH_PCI_BAR);
- felix->imdio_base = pci_resource_start(pdev, VSC9959_IMDIO_PCI_BAR);
pci_set_master(pdev);
@@ -3048,8 +2704,6 @@ static void felix_pci_remove(struct pci_dev *pdev)
kfree(felix);
pci_disable_device(pdev);
-
- pci_set_drvdata(pdev, NULL);
}
static void felix_pci_shutdown(struct pci_dev *pdev)
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index b34f4cdfe814..7af33b2c685d 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -343,7 +343,7 @@ static const u32 vsc9953_sys_regmap[] = {
REG(SYS_COUNT_TX_GREEN_PRIO_5, 0x00016c),
REG(SYS_COUNT_TX_GREEN_PRIO_6, 0x000170),
REG(SYS_COUNT_TX_GREEN_PRIO_7, 0x000174),
- REG(SYS_COUNT_TX_AGING, 0x000178),
+ REG(SYS_COUNT_TX_AGED, 0x000178),
REG(SYS_COUNT_DROP_LOCAL, 0x000200),
REG(SYS_COUNT_DROP_TAIL, 0x000204),
REG(SYS_COUNT_DROP_YELLOW_PRIO_0, 0x000208),
@@ -383,7 +383,6 @@ static const u32 vsc9953_sys_regmap[] = {
REG_RESERVED(SYS_MMGT_FAST),
REG_RESERVED(SYS_EVENTS_DIF),
REG_RESERVED(SYS_EVENTS_CORE),
- REG_RESERVED(SYS_CNT),
REG_RESERVED(SYS_PTP_STATUS),
REG_RESERVED(SYS_PTP_TXSTAMP),
REG_RESERVED(SYS_PTP_NXT),
@@ -459,110 +458,40 @@ static const u32 *vsc9953_regmap[TARGET_MAX] = {
};
/* Addresses are relative to the device's base address */
-static const struct resource vsc9953_target_io_res[TARGET_MAX] = {
- [ANA] = {
- .start = 0x0280000,
- .end = 0x028ffff,
- .name = "ana",
- },
- [QS] = {
- .start = 0x0080000,
- .end = 0x00800ff,
- .name = "qs",
- },
- [QSYS] = {
- .start = 0x0200000,
- .end = 0x021ffff,
- .name = "qsys",
- },
- [REW] = {
- .start = 0x0030000,
- .end = 0x003ffff,
- .name = "rew",
- },
- [SYS] = {
- .start = 0x0010000,
- .end = 0x001ffff,
- .name = "sys",
- },
- [S0] = {
- .start = 0x0040000,
- .end = 0x00403ff,
- .name = "s0",
- },
- [S1] = {
- .start = 0x0050000,
- .end = 0x00503ff,
- .name = "s1",
- },
- [S2] = {
- .start = 0x0060000,
- .end = 0x00603ff,
- .name = "s2",
- },
- [PTP] = {
- .start = 0x0090000,
- .end = 0x00900cb,
- .name = "ptp",
- },
- [GCB] = {
- .start = 0x0070000,
- .end = 0x00701ff,
- .name = "devcpu_gcb",
- },
+static const struct resource vsc9953_resources[] = {
+ DEFINE_RES_MEM_NAMED(0x0010000, 0x0010000, "sys"),
+ DEFINE_RES_MEM_NAMED(0x0030000, 0x0010000, "rew"),
+ DEFINE_RES_MEM_NAMED(0x0040000, 0x0000400, "s0"),
+ DEFINE_RES_MEM_NAMED(0x0050000, 0x0000400, "s1"),
+ DEFINE_RES_MEM_NAMED(0x0060000, 0x0000400, "s2"),
+ DEFINE_RES_MEM_NAMED(0x0070000, 0x0000200, "devcpu_gcb"),
+ DEFINE_RES_MEM_NAMED(0x0080000, 0x0000100, "qs"),
+ DEFINE_RES_MEM_NAMED(0x0090000, 0x00000cc, "ptp"),
+ DEFINE_RES_MEM_NAMED(0x0100000, 0x0010000, "port0"),
+ DEFINE_RES_MEM_NAMED(0x0110000, 0x0010000, "port1"),
+ DEFINE_RES_MEM_NAMED(0x0120000, 0x0010000, "port2"),
+ DEFINE_RES_MEM_NAMED(0x0130000, 0x0010000, "port3"),
+ DEFINE_RES_MEM_NAMED(0x0140000, 0x0010000, "port4"),
+ DEFINE_RES_MEM_NAMED(0x0150000, 0x0010000, "port5"),
+ DEFINE_RES_MEM_NAMED(0x0160000, 0x0010000, "port6"),
+ DEFINE_RES_MEM_NAMED(0x0170000, 0x0010000, "port7"),
+ DEFINE_RES_MEM_NAMED(0x0180000, 0x0010000, "port8"),
+ DEFINE_RES_MEM_NAMED(0x0190000, 0x0010000, "port9"),
+ DEFINE_RES_MEM_NAMED(0x0200000, 0x0020000, "qsys"),
+ DEFINE_RES_MEM_NAMED(0x0280000, 0x0010000, "ana"),
};
-static const struct resource vsc9953_port_io_res[] = {
- {
- .start = 0x0100000,
- .end = 0x010ffff,
- .name = "port0",
- },
- {
- .start = 0x0110000,
- .end = 0x011ffff,
- .name = "port1",
- },
- {
- .start = 0x0120000,
- .end = 0x012ffff,
- .name = "port2",
- },
- {
- .start = 0x0130000,
- .end = 0x013ffff,
- .name = "port3",
- },
- {
- .start = 0x0140000,
- .end = 0x014ffff,
- .name = "port4",
- },
- {
- .start = 0x0150000,
- .end = 0x015ffff,
- .name = "port5",
- },
- {
- .start = 0x0160000,
- .end = 0x016ffff,
- .name = "port6",
- },
- {
- .start = 0x0170000,
- .end = 0x017ffff,
- .name = "port7",
- },
- {
- .start = 0x0180000,
- .end = 0x018ffff,
- .name = "port8",
- },
- {
- .start = 0x0190000,
- .end = 0x019ffff,
- .name = "port9",
- },
+static const char * const vsc9953_resource_names[TARGET_MAX] = {
+ [SYS] = "sys",
+ [REW] = "rew",
+ [S0] = "s0",
+ [S1] = "s1",
+ [S2] = "s2",
+ [GCB] = "devcpu_gcb",
+ [QS] = "qs",
+ [PTP] = "ptp",
+ [QSYS] = "qsys",
+ [ANA] = "ana",
};
static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = {
@@ -615,378 +544,7 @@ static const struct reg_field vsc9953_regfields[REGFIELD_MAX] = {
};
static const struct ocelot_stat_layout vsc9953_stats_layout[OCELOT_NUM_STATS] = {
- [OCELOT_STAT_RX_OCTETS] = {
- .name = "rx_octets",
- .reg = SYS_COUNT_RX_OCTETS,
- },
- [OCELOT_STAT_RX_UNICAST] = {
- .name = "rx_unicast",
- .reg = SYS_COUNT_RX_UNICAST,
- },
- [OCELOT_STAT_RX_MULTICAST] = {
- .name = "rx_multicast",
- .reg = SYS_COUNT_RX_MULTICAST,
- },
- [OCELOT_STAT_RX_BROADCAST] = {
- .name = "rx_broadcast",
- .reg = SYS_COUNT_RX_BROADCAST,
- },
- [OCELOT_STAT_RX_SHORTS] = {
- .name = "rx_shorts",
- .reg = SYS_COUNT_RX_SHORTS,
- },
- [OCELOT_STAT_RX_FRAGMENTS] = {
- .name = "rx_fragments",
- .reg = SYS_COUNT_RX_FRAGMENTS,
- },
- [OCELOT_STAT_RX_JABBERS] = {
- .name = "rx_jabbers",
- .reg = SYS_COUNT_RX_JABBERS,
- },
- [OCELOT_STAT_RX_CRC_ALIGN_ERRS] = {
- .name = "rx_crc_align_errs",
- .reg = SYS_COUNT_RX_CRC_ALIGN_ERRS,
- },
- [OCELOT_STAT_RX_SYM_ERRS] = {
- .name = "rx_sym_errs",
- .reg = SYS_COUNT_RX_SYM_ERRS,
- },
- [OCELOT_STAT_RX_64] = {
- .name = "rx_frames_below_65_octets",
- .reg = SYS_COUNT_RX_64,
- },
- [OCELOT_STAT_RX_65_127] = {
- .name = "rx_frames_65_to_127_octets",
- .reg = SYS_COUNT_RX_65_127,
- },
- [OCELOT_STAT_RX_128_255] = {
- .name = "rx_frames_128_to_255_octets",
- .reg = SYS_COUNT_RX_128_255,
- },
- [OCELOT_STAT_RX_256_511] = {
- .name = "rx_frames_256_to_511_octets",
- .reg = SYS_COUNT_RX_256_511,
- },
- [OCELOT_STAT_RX_512_1023] = {
- .name = "rx_frames_512_to_1023_octets",
- .reg = SYS_COUNT_RX_512_1023,
- },
- [OCELOT_STAT_RX_1024_1526] = {
- .name = "rx_frames_1024_to_1526_octets",
- .reg = SYS_COUNT_RX_1024_1526,
- },
- [OCELOT_STAT_RX_1527_MAX] = {
- .name = "rx_frames_over_1526_octets",
- .reg = SYS_COUNT_RX_1527_MAX,
- },
- [OCELOT_STAT_RX_PAUSE] = {
- .name = "rx_pause",
- .reg = SYS_COUNT_RX_PAUSE,
- },
- [OCELOT_STAT_RX_CONTROL] = {
- .name = "rx_control",
- .reg = SYS_COUNT_RX_CONTROL,
- },
- [OCELOT_STAT_RX_LONGS] = {
- .name = "rx_longs",
- .reg = SYS_COUNT_RX_LONGS,
- },
- [OCELOT_STAT_RX_CLASSIFIED_DROPS] = {
- .name = "rx_classified_drops",
- .reg = SYS_COUNT_RX_CLASSIFIED_DROPS,
- },
- [OCELOT_STAT_RX_RED_PRIO_0] = {
- .name = "rx_red_prio_0",
- .reg = SYS_COUNT_RX_RED_PRIO_0,
- },
- [OCELOT_STAT_RX_RED_PRIO_1] = {
- .name = "rx_red_prio_1",
- .reg = SYS_COUNT_RX_RED_PRIO_1,
- },
- [OCELOT_STAT_RX_RED_PRIO_2] = {
- .name = "rx_red_prio_2",
- .reg = SYS_COUNT_RX_RED_PRIO_2,
- },
- [OCELOT_STAT_RX_RED_PRIO_3] = {
- .name = "rx_red_prio_3",
- .reg = SYS_COUNT_RX_RED_PRIO_3,
- },
- [OCELOT_STAT_RX_RED_PRIO_4] = {
- .name = "rx_red_prio_4",
- .reg = SYS_COUNT_RX_RED_PRIO_4,
- },
- [OCELOT_STAT_RX_RED_PRIO_5] = {
- .name = "rx_red_prio_5",
- .reg = SYS_COUNT_RX_RED_PRIO_5,
- },
- [OCELOT_STAT_RX_RED_PRIO_6] = {
- .name = "rx_red_prio_6",
- .reg = SYS_COUNT_RX_RED_PRIO_6,
- },
- [OCELOT_STAT_RX_RED_PRIO_7] = {
- .name = "rx_red_prio_7",
- .reg = SYS_COUNT_RX_RED_PRIO_7,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_0] = {
- .name = "rx_yellow_prio_0",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_0,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_1] = {
- .name = "rx_yellow_prio_1",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_1,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_2] = {
- .name = "rx_yellow_prio_2",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_2,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_3] = {
- .name = "rx_yellow_prio_3",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_3,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_4] = {
- .name = "rx_yellow_prio_4",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_4,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_5] = {
- .name = "rx_yellow_prio_5",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_5,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_6] = {
- .name = "rx_yellow_prio_6",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_6,
- },
- [OCELOT_STAT_RX_YELLOW_PRIO_7] = {
- .name = "rx_yellow_prio_7",
- .reg = SYS_COUNT_RX_YELLOW_PRIO_7,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_0] = {
- .name = "rx_green_prio_0",
- .reg = SYS_COUNT_RX_GREEN_PRIO_0,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_1] = {
- .name = "rx_green_prio_1",
- .reg = SYS_COUNT_RX_GREEN_PRIO_1,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_2] = {
- .name = "rx_green_prio_2",
- .reg = SYS_COUNT_RX_GREEN_PRIO_2,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_3] = {
- .name = "rx_green_prio_3",
- .reg = SYS_COUNT_RX_GREEN_PRIO_3,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_4] = {
- .name = "rx_green_prio_4",
- .reg = SYS_COUNT_RX_GREEN_PRIO_4,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_5] = {
- .name = "rx_green_prio_5",
- .reg = SYS_COUNT_RX_GREEN_PRIO_5,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_6] = {
- .name = "rx_green_prio_6",
- .reg = SYS_COUNT_RX_GREEN_PRIO_6,
- },
- [OCELOT_STAT_RX_GREEN_PRIO_7] = {
- .name = "rx_green_prio_7",
- .reg = SYS_COUNT_RX_GREEN_PRIO_7,
- },
- [OCELOT_STAT_TX_OCTETS] = {
- .name = "tx_octets",
- .reg = SYS_COUNT_TX_OCTETS,
- },
- [OCELOT_STAT_TX_UNICAST] = {
- .name = "tx_unicast",
- .reg = SYS_COUNT_TX_UNICAST,
- },
- [OCELOT_STAT_TX_MULTICAST] = {
- .name = "tx_multicast",
- .reg = SYS_COUNT_TX_MULTICAST,
- },
- [OCELOT_STAT_TX_BROADCAST] = {
- .name = "tx_broadcast",
- .reg = SYS_COUNT_TX_BROADCAST,
- },
- [OCELOT_STAT_TX_COLLISION] = {
- .name = "tx_collision",
- .reg = SYS_COUNT_TX_COLLISION,
- },
- [OCELOT_STAT_TX_DROPS] = {
- .name = "tx_drops",
- .reg = SYS_COUNT_TX_DROPS,
- },
- [OCELOT_STAT_TX_PAUSE] = {
- .name = "tx_pause",
- .reg = SYS_COUNT_TX_PAUSE,
- },
- [OCELOT_STAT_TX_64] = {
- .name = "tx_frames_below_65_octets",
- .reg = SYS_COUNT_TX_64,
- },
- [OCELOT_STAT_TX_65_127] = {
- .name = "tx_frames_65_to_127_octets",
- .reg = SYS_COUNT_TX_65_127,
- },
- [OCELOT_STAT_TX_128_255] = {
- .name = "tx_frames_128_255_octets",
- .reg = SYS_COUNT_TX_128_255,
- },
- [OCELOT_STAT_TX_256_511] = {
- .name = "tx_frames_256_511_octets",
- .reg = SYS_COUNT_TX_256_511,
- },
- [OCELOT_STAT_TX_512_1023] = {
- .name = "tx_frames_512_1023_octets",
- .reg = SYS_COUNT_TX_512_1023,
- },
- [OCELOT_STAT_TX_1024_1526] = {
- .name = "tx_frames_1024_1526_octets",
- .reg = SYS_COUNT_TX_1024_1526,
- },
- [OCELOT_STAT_TX_1527_MAX] = {
- .name = "tx_frames_over_1526_octets",
- .reg = SYS_COUNT_TX_1527_MAX,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_0] = {
- .name = "tx_yellow_prio_0",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_0,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_1] = {
- .name = "tx_yellow_prio_1",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_1,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_2] = {
- .name = "tx_yellow_prio_2",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_2,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_3] = {
- .name = "tx_yellow_prio_3",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_3,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_4] = {
- .name = "tx_yellow_prio_4",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_4,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_5] = {
- .name = "tx_yellow_prio_5",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_5,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_6] = {
- .name = "tx_yellow_prio_6",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_6,
- },
- [OCELOT_STAT_TX_YELLOW_PRIO_7] = {
- .name = "tx_yellow_prio_7",
- .reg = SYS_COUNT_TX_YELLOW_PRIO_7,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_0] = {
- .name = "tx_green_prio_0",
- .reg = SYS_COUNT_TX_GREEN_PRIO_0,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_1] = {
- .name = "tx_green_prio_1",
- .reg = SYS_COUNT_TX_GREEN_PRIO_1,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_2] = {
- .name = "tx_green_prio_2",
- .reg = SYS_COUNT_TX_GREEN_PRIO_2,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_3] = {
- .name = "tx_green_prio_3",
- .reg = SYS_COUNT_TX_GREEN_PRIO_3,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_4] = {
- .name = "tx_green_prio_4",
- .reg = SYS_COUNT_TX_GREEN_PRIO_4,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_5] = {
- .name = "tx_green_prio_5",
- .reg = SYS_COUNT_TX_GREEN_PRIO_5,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_6] = {
- .name = "tx_green_prio_6",
- .reg = SYS_COUNT_TX_GREEN_PRIO_6,
- },
- [OCELOT_STAT_TX_GREEN_PRIO_7] = {
- .name = "tx_green_prio_7",
- .reg = SYS_COUNT_TX_GREEN_PRIO_7,
- },
- [OCELOT_STAT_TX_AGED] = {
- .name = "tx_aged",
- .reg = SYS_COUNT_TX_AGING,
- },
- [OCELOT_STAT_DROP_LOCAL] = {
- .name = "drop_local",
- .reg = SYS_COUNT_DROP_LOCAL,
- },
- [OCELOT_STAT_DROP_TAIL] = {
- .name = "drop_tail",
- .reg = SYS_COUNT_DROP_TAIL,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_0] = {
- .name = "drop_yellow_prio_0",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_0,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_1] = {
- .name = "drop_yellow_prio_1",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_1,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_2] = {
- .name = "drop_yellow_prio_2",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_2,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_3] = {
- .name = "drop_yellow_prio_3",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_3,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_4] = {
- .name = "drop_yellow_prio_4",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_4,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_5] = {
- .name = "drop_yellow_prio_5",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_5,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_6] = {
- .name = "drop_yellow_prio_6",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_6,
- },
- [OCELOT_STAT_DROP_YELLOW_PRIO_7] = {
- .name = "drop_yellow_prio_7",
- .reg = SYS_COUNT_DROP_YELLOW_PRIO_7,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_0] = {
- .name = "drop_green_prio_0",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_0,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_1] = {
- .name = "drop_green_prio_1",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_1,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_2] = {
- .name = "drop_green_prio_2",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_2,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_3] = {
- .name = "drop_green_prio_3",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_3,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_4] = {
- .name = "drop_green_prio_4",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_4,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_5] = {
- .name = "drop_green_prio_5",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_5,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_6] = {
- .name = "drop_green_prio_6",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_6,
- },
- [OCELOT_STAT_DROP_GREEN_PRIO_7] = {
- .name = "drop_green_prio_7",
- .reg = SYS_COUNT_DROP_GREEN_PRIO_7,
- },
+ OCELOT_COMMON_STATS,
};
static const struct vcap_field vsc9953_vcap_es0_keys[] = {
@@ -1432,8 +990,9 @@ static void vsc9953_mdio_bus_free(struct ocelot *ocelot)
}
static const struct felix_info seville_info_vsc9953 = {
- .target_io_res = vsc9953_target_io_res,
- .port_io_res = vsc9953_port_io_res,
+ .resources = vsc9953_resources,
+ .num_resources = ARRAY_SIZE(vsc9953_resources),
+ .resource_names = vsc9953_resource_names,
.regfields = vsc9953_regfields,
.map = vsc9953_regmap,
.ops = &vsc9953_ops,
@@ -1450,7 +1009,6 @@ static const struct felix_info seville_info_vsc9953 = {
.mdio_bus_free = vsc9953_mdio_bus_free,
.phylink_validate = vsc9953_phylink_validate,
.port_modes = vsc9953_port_modes,
- .init_regmap = ocelot_regmap_init,
};
static int seville_probe(struct platform_device *pdev)
@@ -1525,8 +1083,6 @@ static int seville_remove(struct platform_device *pdev)
kfree(felix->ds);
kfree(felix);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index 0796b7cf8cae..e7b98b864fa1 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -1099,8 +1099,6 @@ static void ar9331_sw_remove(struct mdio_device *mdiodev)
dsa_unregister_switch(&priv->ds);
reset_control_assert(priv->sw_reset);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void ar9331_sw_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c
index c181346388a4..5669c92c93f7 100644
--- a/drivers/net/dsa/qca/qca8k-8xxx.c
+++ b/drivers/net/dsa/qca/qca8k-8xxx.c
@@ -1957,8 +1957,6 @@ qca8k_sw_remove(struct mdio_device *mdiodev)
qca8k_port_set_status(priv, i, 0);
dsa_unregister_switch(priv->ds);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void qca8k_sw_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c
index bba95613e218..fb45b598847b 100644
--- a/drivers/net/dsa/qca/qca8k-common.c
+++ b/drivers/net/dsa/qca/qca8k-common.c
@@ -1017,7 +1017,8 @@ int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
static bool qca8k_lag_can_offload(struct dsa_switch *ds,
struct dsa_lag lag,
- struct netdev_lag_upper_info *info)
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
struct dsa_port *dp;
int members = 0;
@@ -1029,15 +1030,24 @@ static bool qca8k_lag_can_offload(struct dsa_switch *ds,
/* Includes the port joining the LAG */
members++;
- if (members > QCA8K_NUM_PORTS_FOR_LAG)
+ if (members > QCA8K_NUM_PORTS_FOR_LAG) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot offload more than 4 LAG ports");
return false;
+ }
- if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ if (info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload LAG using hash TX type");
return false;
+ }
if (info->hash_type != NETDEV_LAG_HASH_L2 &&
- info->hash_type != NETDEV_LAG_HASH_L23)
+ info->hash_type != NETDEV_LAG_HASH_L23) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only offload L2 or L2+L3 TX hash");
return false;
+ }
return true;
}
@@ -1160,11 +1170,12 @@ static int qca8k_lag_refresh_portmap(struct dsa_switch *ds, int port,
}
int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
- struct netdev_lag_upper_info *info)
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack)
{
int ret;
- if (!qca8k_lag_can_offload(ds, lag, info))
+ if (!qca8k_lag_can_offload(ds, lag, info, extack))
return -EOPNOTSUPP;
ret = qca8k_lag_setup_hash(ds, lag, info);
diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h
index e36ecc9777f4..0b7a5cb12321 100644
--- a/drivers/net/dsa/qca/qca8k.h
+++ b/drivers/net/dsa/qca/qca8k.h
@@ -512,7 +512,8 @@ int qca8k_port_vlan_del(struct dsa_switch *ds, int port,
/* Common port LAG function */
int qca8k_port_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag,
- struct netdev_lag_upper_info *info);
+ struct netdev_lag_upper_info *info,
+ struct netlink_ext_ack *extack);
int qca8k_port_lag_leave(struct dsa_switch *ds, int port,
struct dsa_lag lag);
diff --git a/drivers/net/dsa/realtek/realtek-mdio.c b/drivers/net/dsa/realtek/realtek-mdio.c
index c58f49d558d2..3e54fac5f902 100644
--- a/drivers/net/dsa/realtek/realtek-mdio.c
+++ b/drivers/net/dsa/realtek/realtek-mdio.c
@@ -245,8 +245,6 @@ static void realtek_mdio_remove(struct mdio_device *mdiodev)
/* leave the device reset asserted */
if (priv->reset)
gpiod_set_value(priv->reset, 1);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void realtek_mdio_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dsa/realtek/realtek-smi.c b/drivers/net/dsa/realtek/realtek-smi.c
index 45992f79ec8d..1b447d96b9c4 100644
--- a/drivers/net/dsa/realtek/realtek-smi.c
+++ b/drivers/net/dsa/realtek/realtek-smi.c
@@ -522,8 +522,6 @@ static int realtek_smi_remove(struct platform_device *pdev)
if (priv->reset)
gpiod_set_value(priv->reset, 1);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/rzn1_a5psw.c b/drivers/net/dsa/rzn1_a5psw.c
index 0744e8162e1d..ed413d555bec 100644
--- a/drivers/net/dsa/rzn1_a5psw.c
+++ b/drivers/net/dsa/rzn1_a5psw.c
@@ -1025,8 +1025,6 @@ static int a5psw_remove(struct platform_device *pdev)
clk_disable_unprepare(a5psw->hclk);
clk_disable_unprepare(a5psw->clk);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index b03d0d0c3dbf..412666111b0c 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -3351,8 +3351,6 @@ static void sja1105_remove(struct spi_device *spi)
return;
dsa_unregister_switch(priv->ds);
-
- spi_set_drvdata(spi, NULL);
}
static void sja1105_shutdown(struct spi_device *spi)
diff --git a/drivers/net/dsa/vitesse-vsc73xx-platform.c b/drivers/net/dsa/vitesse-vsc73xx-platform.c
index fe4b154a0a57..bd4206e8f9af 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-platform.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-platform.c
@@ -121,8 +121,6 @@ static int vsc73xx_platform_remove(struct platform_device *pdev)
vsc73xx_remove(&vsc_platform->vsc);
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/vitesse-vsc73xx-spi.c b/drivers/net/dsa/vitesse-vsc73xx-spi.c
index 97a92e6da60d..85b9a0f51dd8 100644
--- a/drivers/net/dsa/vitesse-vsc73xx-spi.c
+++ b/drivers/net/dsa/vitesse-vsc73xx-spi.c
@@ -167,8 +167,6 @@ static void vsc73xx_spi_remove(struct spi_device *spi)
return;
vsc73xx_remove(&vsc_spi->vsc);
-
- spi_set_drvdata(spi, NULL);
}
static void vsc73xx_spi_shutdown(struct spi_device *spi)
diff --git a/drivers/net/dsa/xrs700x/xrs700x_i2c.c b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
index 6deae388a0d6..cd533b9e17ec 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_i2c.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_i2c.c
@@ -114,8 +114,6 @@ static int xrs700x_i2c_remove(struct i2c_client *i2c)
xrs700x_switch_remove(priv);
- i2c_set_clientdata(i2c, NULL);
-
return 0;
}
diff --git a/drivers/net/dsa/xrs700x/xrs700x_mdio.c b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
index 127a677d1f39..5f7d344b5d73 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_mdio.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
@@ -140,8 +140,6 @@ static void xrs700x_mdio_remove(struct mdio_device *mdiodev)
return;
xrs700x_switch_remove(priv);
-
- dev_set_drvdata(&mdiodev->dev, NULL);
}
static void xrs700x_mdio_shutdown(struct mdio_device *mdiodev)
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index f82ad7419508..aa0fc00faecb 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -102,7 +102,7 @@ static const struct net_device_ops dummy_netdev_ops = {
static void dummy_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
}
static const struct ethtool_ops dummy_ethtool_ops = {
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 846fa3af4504..fb68339e1511 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -1135,7 +1135,7 @@ el3_netdev_set_ecmd(struct net_device *dev,
static void el3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
}
static int el3_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 1d124b0f65e7..d2f4358cc550 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -1527,7 +1527,7 @@ static void set_rx_mode(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info), "ISA 0x%lx",
dev->base_addr);
}
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 4673bc1604e7..82f94b1635bf 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -480,7 +480,7 @@ static void tc589_reset(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info),
"PCMCIA 0x%lx", dev->base_addr);
}
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index ccf07667aa5e..082388bb6169 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -2959,13 +2959,13 @@ static void vortex_get_drvinfo(struct net_device *dev,
{
struct vortex_private *vp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
if (VORTEX_PCI(vp)) {
- strlcpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
+ strscpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
sizeof(info->bus_info));
} else {
if (VORTEX_EISA(vp))
- strlcpy(info->bus_info, dev_name(vp->gendev),
+ strscpy(info->bus_info, dev_name(vp->gendev),
sizeof(info->bus_info));
else
snprintf(info->bus_info, sizeof(info->bus_info),
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index cad4f354cc76..aaaff3ba43ef 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -969,12 +969,12 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
smp_rmb();
if (tp->card_state == Sleeping) {
- strlcpy(info->fw_version, "Sleep image",
+ strscpy(info->fw_version, "Sleep image",
sizeof(info->fw_version));
} else {
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
if (typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
- strlcpy(info->fw_version, "Unknown runtime",
+ strscpy(info->fw_version, "Unknown runtime",
sizeof(info->fw_version));
} else {
u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
@@ -984,8 +984,8 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
}
}
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static int
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 1f8acbba5b6b..af603256b724 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -579,9 +579,9 @@ static void ax_get_drvinfo(struct net_device *dev,
{
struct platform_device *pdev = to_platform_device(dev->dev.parent);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pdev->name, sizeof(info->bus_info));
}
static u32 ax_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index e7b879123bb1..05d39ecb97ff 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -555,9 +555,9 @@ static int __init etherm_addr(char *addr)
static void etherh_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, dev_name(dev->dev.parent),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, dev_name(dev->dev.parent),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index 21047ae1bc3d..8a7918d33419 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -450,8 +450,7 @@ static int mcf8390_remove(struct platform_device *pdev)
unregister_netdev(dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (mem)
- release_mem_region(mem->start, resource_size(mem));
+ release_mem_region(mem->start, resource_size(mem));
free_netdev(dev);
return 0;
}
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 9a55c1d5a0a1..1917da784191 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -121,6 +121,7 @@ config LANTIQ_XRX200
Support for the PMAC of the Gigabit switch (GSWIP) inside the
Lantiq / Intel VRX200 VDSL SoC
+source "drivers/net/ethernet/adi/Kconfig"
source "drivers/net/ethernet/litex/Kconfig"
source "drivers/net/ethernet/marvell/Kconfig"
source "drivers/net/ethernet/mediatek/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index c06e75ed4231..0d872d4efcd1 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_NET_VENDOR_8390) += 8390/
obj-$(CONFIG_NET_VENDOR_ACTIONS) += actions/
obj-$(CONFIG_NET_VENDOR_ADAPTEC) += adaptec/
obj-$(CONFIG_GRETH) += aeroflex/
+obj-$(CONFIG_NET_VENDOR_ADI) += adi/
obj-$(CONFIG_NET_VENDOR_AGERE) += agere/
obj-$(CONFIG_NET_VENDOR_ALACRITECH) += alacritech/
obj-$(CONFIG_NET_VENDOR_ALLWINNER) += allwinner/
diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c
index 1cfdd01b4c2e..cd4d71b83c33 100644
--- a/drivers/net/ethernet/actions/owl-emac.c
+++ b/drivers/net/ethernet/actions/owl-emac.c
@@ -1576,7 +1576,7 @@ static int owl_emac_probe(struct platform_device *pdev)
netdev->watchdog_timeo = OWL_EMAC_TX_TIMEOUT;
netdev->netdev_ops = &owl_emac_netdev_ops;
netdev->ethtool_ops = &owl_emac_ethtool_ops;
- netif_napi_add(netdev, &priv->napi, owl_emac_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &priv->napi, owl_emac_poll);
ret = devm_register_netdev(dev, netdev);
if (ret) {
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 8f0a6b9c518e..857361c74f5d 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -1844,8 +1844,8 @@ static int check_if_running(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/adi/Kconfig b/drivers/net/ethernet/adi/Kconfig
new file mode 100644
index 000000000000..da3bdd302502
--- /dev/null
+++ b/drivers/net/ethernet/adi/Kconfig
@@ -0,0 +1,28 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+#
+# Analog Devices device configuration
+#
+
+config NET_VENDOR_ADI
+ bool "Analog Devices devices"
+ default y
+ depends on SPI
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about ADI devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_ADI
+
+config ADIN1110
+ tristate "Analog Devices ADIN1110 MAC-PHY"
+ depends on SPI && NET_SWITCHDEV
+ select CRC8
+ help
+ Say yes here to build support for Analog Devices ADIN1110
+ Low Power 10BASE-T1L Ethernet MAC-PHY.
+
+endif # NET_VENDOR_ADI
diff --git a/drivers/net/ethernet/adi/Makefile b/drivers/net/ethernet/adi/Makefile
new file mode 100644
index 000000000000..d0383d94303c
--- /dev/null
+++ b/drivers/net/ethernet/adi/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+#
+# Makefile for the Analog Devices network device drivers.
+#
+
+obj-$(CONFIG_ADIN1110) += adin1110.o
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
new file mode 100644
index 000000000000..aaee7c4248e6
--- /dev/null
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -0,0 +1,1697 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
+/* ADIN1110 Low Power 10BASE-T1L Ethernet MAC-PHY
+ * ADIN2111 2-Port Ethernet Switch with Integrated 10BASE-T1L PHY
+ *
+ * Copyright 2021 Analog Devices Inc.
+ */
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+#include
+
+#define ADIN1110_PHY_ID 0x1
+
+#define ADIN1110_RESET 0x03
+#define ADIN1110_SWRESET BIT(0)
+
+#define ADIN1110_CONFIG1 0x04
+#define ADIN1110_CONFIG1_SYNC BIT(15)
+
+#define ADIN1110_CONFIG2 0x06
+#define ADIN2111_P2_FWD_UNK2HOST BIT(12)
+#define ADIN2111_PORT_CUT_THRU_EN BIT(11)
+#define ADIN1110_CRC_APPEND BIT(5)
+#define ADIN1110_FWD_UNK2HOST BIT(2)
+
+#define ADIN1110_STATUS0 0x08
+
+#define ADIN1110_STATUS1 0x09
+#define ADIN2111_P2_RX_RDY BIT(17)
+#define ADIN1110_SPI_ERR BIT(10)
+#define ADIN1110_RX_RDY BIT(4)
+
+#define ADIN1110_IMASK1 0x0D
+#define ADIN2111_RX_RDY_IRQ BIT(17)
+#define ADIN1110_SPI_ERR_IRQ BIT(10)
+#define ADIN1110_RX_RDY_IRQ BIT(4)
+#define ADIN1110_TX_RDY_IRQ BIT(3)
+
+#define ADIN1110_MDIOACC 0x20
+#define ADIN1110_MDIO_TRDONE BIT(31)
+#define ADIN1110_MDIO_ST GENMASK(29, 28)
+#define ADIN1110_MDIO_OP GENMASK(27, 26)
+#define ADIN1110_MDIO_PRTAD GENMASK(25, 21)
+#define ADIN1110_MDIO_DEVAD GENMASK(20, 16)
+#define ADIN1110_MDIO_DATA GENMASK(15, 0)
+
+#define ADIN1110_TX_FSIZE 0x30
+#define ADIN1110_TX 0x31
+#define ADIN1110_TX_SPACE 0x32
+
+#define ADIN1110_MAC_ADDR_FILTER_UPR 0x50
+#define ADIN2111_MAC_ADDR_APPLY2PORT2 BIT(31)
+#define ADIN1110_MAC_ADDR_APPLY2PORT BIT(30)
+#define ADIN2111_MAC_ADDR_TO_OTHER_PORT BIT(17)
+#define ADIN1110_MAC_ADDR_TO_HOST BIT(16)
+
+#define ADIN1110_MAC_ADDR_FILTER_LWR 0x51
+
+#define ADIN1110_MAC_ADDR_MASK_UPR 0x70
+#define ADIN1110_MAC_ADDR_MASK_LWR 0x71
+
+#define ADIN1110_RX_FSIZE 0x90
+#define ADIN1110_RX 0x91
+
+#define ADIN2111_RX_P2_FSIZE 0xC0
+#define ADIN2111_RX_P2 0xC1
+
+#define ADIN1110_CLEAR_STATUS0 0xFFF
+
+/* MDIO_OP codes */
+#define ADIN1110_MDIO_OP_WR 0x1
+#define ADIN1110_MDIO_OP_RD 0x3
+
+#define ADIN1110_CD BIT(7)
+#define ADIN1110_WRITE BIT(5)
+
+#define ADIN1110_MAX_BUFF 2048
+#define ADIN1110_MAX_FRAMES_READ 64
+#define ADIN1110_WR_HEADER_LEN 2
+#define ADIN1110_FRAME_HEADER_LEN 2
+#define ADIN1110_INTERNAL_SIZE_HEADER_LEN 2
+#define ADIN1110_RD_HEADER_LEN 3
+#define ADIN1110_REG_LEN 4
+#define ADIN1110_FEC_LEN 4
+
+#define ADIN1110_PHY_ID_VAL 0x0283BC91
+#define ADIN2111_PHY_ID_VAL 0x0283BCA1
+
+#define ADIN_MAC_MAX_PORTS 2
+#define ADIN_MAC_MAX_ADDR_SLOTS 16
+
+#define ADIN_MAC_MULTICAST_ADDR_SLOT 0
+#define ADIN_MAC_BROADCAST_ADDR_SLOT 1
+#define ADIN_MAC_P1_ADDR_SLOT 2
+#define ADIN_MAC_P2_ADDR_SLOT 3
+#define ADIN_MAC_FDB_ADDR_SLOT 4
+
+DECLARE_CRC8_TABLE(adin1110_crc_table);
+
+enum adin1110_chips_id {
+ ADIN1110_MAC = 0,
+ ADIN2111_MAC,
+};
+
+struct adin1110_cfg {
+ enum adin1110_chips_id id;
+ char name[MDIO_NAME_SIZE];
+ u32 phy_ids[PHY_MAX_ADDR];
+ u32 ports_nr;
+ u32 phy_id_val;
+};
+
+struct adin1110_port_priv {
+ struct adin1110_priv *priv;
+ struct net_device *netdev;
+ struct net_device *bridge;
+ struct phy_device *phydev;
+ struct work_struct tx_work;
+ u64 rx_packets;
+ u64 tx_packets;
+ u64 rx_bytes;
+ u64 tx_bytes;
+ struct work_struct rx_mode_work;
+ u32 flags;
+ struct sk_buff_head txq;
+ u32 nr;
+ u32 state;
+ struct adin1110_cfg *cfg;
+};
+
+struct adin1110_priv {
+ struct mutex lock; /* protect spi */
+ spinlock_t state_lock; /* protect RX mode */
+ struct mii_bus *mii_bus;
+ struct spi_device *spidev;
+ bool append_crc;
+ struct adin1110_cfg *cfg;
+ u32 tx_space;
+ u32 irq_mask;
+ bool forwarding;
+ int irq;
+ struct adin1110_port_priv *ports[ADIN_MAC_MAX_PORTS];
+ char mii_bus_name[MII_BUS_ID_SIZE];
+ u8 data[ADIN1110_MAX_BUFF] ____cacheline_aligned;
+};
+
+struct adin1110_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct adin1110_port_priv *port_priv;
+ unsigned long event;
+};
+
+static struct adin1110_cfg adin1110_cfgs[] = {
+ {
+ .id = ADIN1110_MAC,
+ .name = "adin1110",
+ .phy_ids = {1},
+ .ports_nr = 1,
+ .phy_id_val = ADIN1110_PHY_ID_VAL,
+ },
+ {
+ .id = ADIN2111_MAC,
+ .name = "adin2111",
+ .phy_ids = {1, 2},
+ .ports_nr = 2,
+ .phy_id_val = ADIN2111_PHY_ID_VAL,
+ },
+};
+
+static u8 adin1110_crc_data(u8 *data, u32 len)
+{
+ return crc8(adin1110_crc_table, data, len, 0);
+}
+
+static int adin1110_read_reg(struct adin1110_priv *priv, u16 reg, u32 *val)
+{
+ u32 header_len = ADIN1110_RD_HEADER_LEN;
+ u32 read_len = ADIN1110_REG_LEN;
+ struct spi_transfer t[2] = {0};
+ int ret;
+
+ priv->data[0] = ADIN1110_CD | FIELD_GET(GENMASK(12, 8), reg);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), reg);
+ priv->data[2] = 0x00;
+
+ if (priv->append_crc) {
+ priv->data[2] = adin1110_crc_data(&priv->data[0], 2);
+ priv->data[3] = 0x00;
+ header_len++;
+ }
+
+ t[0].tx_buf = &priv->data[0];
+ t[0].len = header_len;
+
+ if (priv->append_crc)
+ read_len++;
+
+ memset(&priv->data[header_len], 0, read_len);
+ t[1].rx_buf = &priv->data[header_len];
+ t[1].len = read_len;
+
+ ret = spi_sync_transfer(priv->spidev, t, 2);
+ if (ret)
+ return ret;
+
+ if (priv->append_crc) {
+ u8 recv_crc;
+ u8 crc;
+
+ crc = adin1110_crc_data(&priv->data[header_len],
+ ADIN1110_REG_LEN);
+ recv_crc = priv->data[header_len + ADIN1110_REG_LEN];
+
+ if (crc != recv_crc) {
+ dev_err_ratelimited(&priv->spidev->dev, "CRC error.");
+ return -EBADMSG;
+ }
+ }
+
+ *val = get_unaligned_be32(&priv->data[header_len]);
+
+ return ret;
+}
+
+static int adin1110_write_reg(struct adin1110_priv *priv, u16 reg, u32 val)
+{
+ u32 header_len = ADIN1110_WR_HEADER_LEN;
+ u32 write_len = ADIN1110_REG_LEN;
+
+ priv->data[0] = ADIN1110_CD | ADIN1110_WRITE | FIELD_GET(GENMASK(12, 8), reg);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), reg);
+
+ if (priv->append_crc) {
+ priv->data[2] = adin1110_crc_data(&priv->data[0], header_len);
+ header_len++;
+ }
+
+ put_unaligned_be32(val, &priv->data[header_len]);
+ if (priv->append_crc) {
+ priv->data[header_len + write_len] = adin1110_crc_data(&priv->data[header_len],
+ write_len);
+ write_len++;
+ }
+
+ return spi_write(priv->spidev, &priv->data[0], header_len + write_len);
+}
+
+static int adin1110_set_bits(struct adin1110_priv *priv, u16 reg,
+ unsigned long mask, unsigned long val)
+{
+ u32 write_val;
+ int ret;
+
+ ret = adin1110_read_reg(priv, reg, &write_val);
+ if (ret < 0)
+ return ret;
+
+ set_mask_bits(&write_val, mask, val);
+
+ return adin1110_write_reg(priv, reg, write_val);
+}
+
+static int adin1110_round_len(int len)
+{
+ /* can read/write only mutiples of 4 bytes of payload */
+ len = ALIGN(len, 4);
+
+ /* NOTE: ADIN1110_WR_HEADER_LEN should be used for write ops. */
+ if (len + ADIN1110_RD_HEADER_LEN > ADIN1110_MAX_BUFF)
+ return -EINVAL;
+
+ return len;
+}
+
+static int adin1110_read_fifo(struct adin1110_port_priv *port_priv)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 header_len = ADIN1110_RD_HEADER_LEN;
+ struct spi_transfer t[2] = {0};
+ u32 frame_size_no_fcs;
+ struct sk_buff *rxb;
+ u32 frame_size;
+ int round_len;
+ u16 reg;
+ int ret;
+
+ if (!port_priv->nr) {
+ reg = ADIN1110_RX;
+ ret = adin1110_read_reg(priv, ADIN1110_RX_FSIZE, &frame_size);
+ } else {
+ reg = ADIN2111_RX_P2;
+ ret = adin1110_read_reg(priv, ADIN2111_RX_P2_FSIZE,
+ &frame_size);
+ }
+
+ if (ret < 0)
+ return ret;
+
+ /* The read frame size includes the extra 2 bytes
+ * from the ADIN1110 frame header.
+ */
+ if (frame_size < ADIN1110_FRAME_HEADER_LEN + ADIN1110_FEC_LEN)
+ return ret;
+
+ round_len = adin1110_round_len(frame_size);
+ if (round_len < 0)
+ return ret;
+
+ frame_size_no_fcs = frame_size - ADIN1110_FRAME_HEADER_LEN - ADIN1110_FEC_LEN;
+
+ rxb = netdev_alloc_skb(port_priv->netdev, round_len);
+ if (!rxb)
+ return -ENOMEM;
+
+ memset(priv->data, 0, round_len + ADIN1110_RD_HEADER_LEN);
+
+ priv->data[0] = ADIN1110_CD | FIELD_GET(GENMASK(12, 8), reg);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), reg);
+
+ if (priv->append_crc) {
+ priv->data[2] = adin1110_crc_data(&priv->data[0], 2);
+ header_len++;
+ }
+
+ skb_put(rxb, frame_size_no_fcs + ADIN1110_FRAME_HEADER_LEN);
+
+ t[0].tx_buf = &priv->data[0];
+ t[0].len = header_len;
+
+ t[1].rx_buf = &rxb->data[0];
+ t[1].len = round_len;
+
+ ret = spi_sync_transfer(priv->spidev, t, 2);
+ if (ret) {
+ kfree_skb(rxb);
+ return ret;
+ }
+
+ skb_pull(rxb, ADIN1110_FRAME_HEADER_LEN);
+ rxb->protocol = eth_type_trans(rxb, port_priv->netdev);
+
+ if ((port_priv->flags & IFF_ALLMULTI && rxb->pkt_type == PACKET_MULTICAST) ||
+ (port_priv->flags & IFF_BROADCAST && rxb->pkt_type == PACKET_BROADCAST))
+ rxb->offload_fwd_mark = 1;
+
+ netif_rx(rxb);
+
+ port_priv->rx_bytes += frame_size - ADIN1110_FRAME_HEADER_LEN;
+ port_priv->rx_packets++;
+
+ return 0;
+}
+
+static int adin1110_write_fifo(struct adin1110_port_priv *port_priv,
+ struct sk_buff *txb)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 header_len = ADIN1110_WR_HEADER_LEN;
+ __be16 frame_header;
+ int padding = 0;
+ int padded_len;
+ int round_len;
+ int ret;
+
+ /* Pad frame to 64 byte length,
+ * MAC nor PHY will otherwise add the
+ * required padding.
+ * The FEC will be added by the MAC internally.
+ */
+ if (txb->len + ADIN1110_FEC_LEN < 64)
+ padding = 64 - (txb->len + ADIN1110_FEC_LEN);
+
+ padded_len = txb->len + padding + ADIN1110_FRAME_HEADER_LEN;
+
+ round_len = adin1110_round_len(padded_len);
+ if (round_len < 0)
+ return round_len;
+
+ ret = adin1110_write_reg(priv, ADIN1110_TX_FSIZE, padded_len);
+ if (ret < 0)
+ return ret;
+
+ memset(priv->data, 0, round_len + ADIN1110_WR_HEADER_LEN);
+
+ priv->data[0] = ADIN1110_CD | ADIN1110_WRITE;
+ priv->data[0] |= FIELD_GET(GENMASK(12, 8), ADIN1110_TX);
+ priv->data[1] = FIELD_GET(GENMASK(7, 0), ADIN1110_TX);
+ if (priv->append_crc) {
+ priv->data[2] = adin1110_crc_data(&priv->data[0], 2);
+ header_len++;
+ }
+
+ /* mention the port on which to send the frame in the frame header */
+ frame_header = cpu_to_be16(port_priv->nr);
+ memcpy(&priv->data[header_len], &frame_header,
+ ADIN1110_FRAME_HEADER_LEN);
+
+ memcpy(&priv->data[header_len + ADIN1110_FRAME_HEADER_LEN],
+ txb->data, txb->len);
+
+ ret = spi_write(priv->spidev, &priv->data[0], round_len + header_len);
+ if (ret < 0)
+ return ret;
+
+ port_priv->tx_bytes += txb->len;
+ port_priv->tx_packets++;
+
+ return 0;
+}
+
+static int adin1110_read_mdio_acc(struct adin1110_priv *priv)
+{
+ u32 val;
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = adin1110_read_reg(priv, ADIN1110_MDIOACC, &val);
+ mutex_unlock(&priv->lock);
+ if (ret < 0)
+ return 0;
+
+ return val;
+}
+
+static int adin1110_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ struct adin1110_priv *priv = bus->priv;
+ u32 val = 0;
+ int ret;
+
+ if (mdio_phy_id_is_c45(phy_id))
+ return -EOPNOTSUPP;
+
+ val |= FIELD_PREP(ADIN1110_MDIO_OP, ADIN1110_MDIO_OP_RD);
+ val |= FIELD_PREP(ADIN1110_MDIO_ST, 0x1);
+ val |= FIELD_PREP(ADIN1110_MDIO_PRTAD, phy_id);
+ val |= FIELD_PREP(ADIN1110_MDIO_DEVAD, reg);
+
+ /* write the clause 22 read command to the chip */
+ mutex_lock(&priv->lock);
+ ret = adin1110_write_reg(priv, ADIN1110_MDIOACC, val);
+ mutex_unlock(&priv->lock);
+ if (ret < 0)
+ return ret;
+
+ /* ADIN1110_MDIO_TRDONE BIT of the ADIN1110_MDIOACC
+ * register is set when the read is done.
+ * After the transaction is done, ADIN1110_MDIO_DATA
+ * bitfield of ADIN1110_MDIOACC register will contain
+ * the requested register value.
+ */
+ ret = readx_poll_timeout(adin1110_read_mdio_acc, priv, val,
+ (val & ADIN1110_MDIO_TRDONE), 10000, 30000);
+ if (ret < 0)
+ return ret;
+
+ return (val & ADIN1110_MDIO_DATA);
+}
+
+static int adin1110_mdio_write(struct mii_bus *bus, int phy_id,
+ int reg, u16 reg_val)
+{
+ struct adin1110_priv *priv = bus->priv;
+ u32 val = 0;
+ int ret;
+
+ if (mdio_phy_id_is_c45(phy_id))
+ return -EOPNOTSUPP;
+
+ val |= FIELD_PREP(ADIN1110_MDIO_OP, ADIN1110_MDIO_OP_WR);
+ val |= FIELD_PREP(ADIN1110_MDIO_ST, 0x1);
+ val |= FIELD_PREP(ADIN1110_MDIO_PRTAD, phy_id);
+ val |= FIELD_PREP(ADIN1110_MDIO_DEVAD, reg);
+ val |= FIELD_PREP(ADIN1110_MDIO_DATA, reg_val);
+
+ /* write the clause 22 write command to the chip */
+ mutex_lock(&priv->lock);
+ ret = adin1110_write_reg(priv, ADIN1110_MDIOACC, val);
+ mutex_unlock(&priv->lock);
+ if (ret < 0)
+ return ret;
+
+ return readx_poll_timeout(adin1110_read_mdio_acc, priv, val,
+ (val & ADIN1110_MDIO_TRDONE), 10000, 30000);
+}
+
+/* ADIN1110 MAC-PHY contains an ADIN1100 PHY.
+ * ADIN2111 MAC-PHY contains two ADIN1100 PHYs.
+ * By registering a new MDIO bus we allow the PAL to discover
+ * the encapsulated PHY and probe the ADIN1100 driver.
+ */
+static int adin1110_register_mdiobus(struct adin1110_priv *priv,
+ struct device *dev)
+{
+ struct mii_bus *mii_bus;
+ int ret;
+
+ mii_bus = devm_mdiobus_alloc(dev);
+ if (!mii_bus)
+ return -ENOMEM;
+
+ snprintf(priv->mii_bus_name, MII_BUS_ID_SIZE, "%s-%u",
+ priv->cfg->name, priv->spidev->chip_select);
+
+ mii_bus->name = priv->mii_bus_name;
+ mii_bus->read = adin1110_mdio_read;
+ mii_bus->write = adin1110_mdio_write;
+ mii_bus->priv = priv;
+ mii_bus->parent = dev;
+ mii_bus->phy_mask = ~((u32)GENMASK(2, 0));
+ mii_bus->probe_capabilities = MDIOBUS_C22;
+ snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+
+ ret = devm_mdiobus_register(dev, mii_bus);
+ if (ret)
+ return ret;
+
+ priv->mii_bus = mii_bus;
+
+ return 0;
+}
+
+static bool adin1110_port_rx_ready(struct adin1110_port_priv *port_priv,
+ u32 status)
+{
+ if (!netif_oper_up(port_priv->netdev))
+ return false;
+
+ if (!port_priv->nr)
+ return !!(status & ADIN1110_RX_RDY);
+ else
+ return !!(status & ADIN2111_P2_RX_RDY);
+}
+
+static void adin1110_read_frames(struct adin1110_port_priv *port_priv,
+ unsigned int budget)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 status1;
+ int ret;
+
+ while (budget) {
+ ret = adin1110_read_reg(priv, ADIN1110_STATUS1, &status1);
+ if (ret < 0)
+ return;
+
+ if (!adin1110_port_rx_ready(port_priv, status1))
+ break;
+
+ ret = adin1110_read_fifo(port_priv);
+ if (ret < 0)
+ return;
+
+ budget--;
+ }
+}
+
+static void adin1110_wake_queues(struct adin1110_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++)
+ netif_wake_queue(priv->ports[i]->netdev);
+}
+
+static irqreturn_t adin1110_irq(int irq, void *p)
+{
+ struct adin1110_priv *priv = p;
+ u32 status1;
+ u32 val;
+ int ret;
+ int i;
+
+ mutex_lock(&priv->lock);
+
+ ret = adin1110_read_reg(priv, ADIN1110_STATUS1, &status1);
+ if (ret < 0)
+ goto out;
+
+ if (priv->append_crc && (status1 & ADIN1110_SPI_ERR))
+ dev_warn_ratelimited(&priv->spidev->dev,
+ "SPI CRC error on write.\n");
+
+ ret = adin1110_read_reg(priv, ADIN1110_TX_SPACE, &val);
+ if (ret < 0)
+ goto out;
+
+ /* TX FIFO space is expressed in half-words */
+ priv->tx_space = 2 * val;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ if (adin1110_port_rx_ready(priv->ports[i], status1))
+ adin1110_read_frames(priv->ports[i],
+ ADIN1110_MAX_FRAMES_READ);
+ }
+
+ /* clear IRQ sources */
+ adin1110_write_reg(priv, ADIN1110_STATUS0, ADIN1110_CLEAR_STATUS0);
+ adin1110_write_reg(priv, ADIN1110_STATUS1, priv->irq_mask);
+
+out:
+ mutex_unlock(&priv->lock);
+
+ if (priv->tx_space > 0 && ret >= 0)
+ adin1110_wake_queues(priv);
+
+ return IRQ_HANDLED;
+}
+
+/* ADIN1110 can filter up to 16 MAC addresses, mac_nr here is the slot used */
+static int adin1110_write_mac_address(struct adin1110_port_priv *port_priv,
+ int mac_nr, const u8 *addr,
+ u8 *mask, u32 port_rules)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 offset = mac_nr * 2;
+ u32 port_rules_mask;
+ int ret;
+ u32 val;
+
+ if (!port_priv->nr)
+ port_rules_mask = ADIN1110_MAC_ADDR_APPLY2PORT;
+ else
+ port_rules_mask = ADIN2111_MAC_ADDR_APPLY2PORT2;
+
+ if (port_rules & port_rules_mask)
+ port_rules_mask |= ADIN1110_MAC_ADDR_TO_HOST | ADIN2111_MAC_ADDR_TO_OTHER_PORT;
+
+ port_rules_mask |= GENMASK(15, 0);
+ val = port_rules | get_unaligned_be16(&addr[0]);
+ ret = adin1110_set_bits(priv, ADIN1110_MAC_ADDR_FILTER_UPR + offset,
+ port_rules_mask, val);
+ if (ret < 0)
+ return ret;
+
+ val = get_unaligned_be32(&addr[2]);
+ ret = adin1110_write_reg(priv,
+ ADIN1110_MAC_ADDR_FILTER_LWR + offset, val);
+ if (ret < 0)
+ return ret;
+
+ /* Only the first two MAC address slots support masking. */
+ if (mac_nr < ADIN_MAC_P1_ADDR_SLOT) {
+ val = get_unaligned_be16(&mask[0]);
+ ret = adin1110_write_reg(priv,
+ ADIN1110_MAC_ADDR_MASK_UPR + offset,
+ val);
+ if (ret < 0)
+ return ret;
+
+ val = get_unaligned_be32(&mask[2]);
+ return adin1110_write_reg(priv,
+ ADIN1110_MAC_ADDR_MASK_LWR + offset,
+ val);
+ }
+
+ return 0;
+}
+
+static int adin1110_clear_mac_address(struct adin1110_priv *priv, int mac_nr)
+{
+ u32 offset = mac_nr * 2;
+ int ret;
+
+ ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_FILTER_UPR + offset, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_FILTER_LWR + offset, 0);
+ if (ret < 0)
+ return ret;
+
+ /* only the first two MAC address slots are maskable */
+ if (mac_nr <= 1) {
+ ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_MASK_UPR + offset, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_write_reg(priv, ADIN1110_MAC_ADDR_MASK_LWR + offset, 0);
+ }
+
+ return ret;
+}
+
+static u32 adin1110_port_rules(struct adin1110_port_priv *port_priv,
+ bool fw_to_host,
+ bool fw_to_other_port)
+{
+ u32 port_rules = 0;
+
+ if (!port_priv->nr)
+ port_rules |= ADIN1110_MAC_ADDR_APPLY2PORT;
+ else
+ port_rules |= ADIN2111_MAC_ADDR_APPLY2PORT2;
+
+ if (fw_to_host)
+ port_rules |= ADIN1110_MAC_ADDR_TO_HOST;
+
+ if (fw_to_other_port && port_priv->priv->forwarding)
+ port_rules |= ADIN2111_MAC_ADDR_TO_OTHER_PORT;
+
+ return port_rules;
+}
+
+static int adin1110_multicast_filter(struct adin1110_port_priv *port_priv,
+ int mac_nr, bool accept_multicast)
+{
+ u8 mask[ETH_ALEN] = {0};
+ u8 mac[ETH_ALEN] = {0};
+ u32 port_rules = 0;
+
+ mask[0] = BIT(0);
+ mac[0] = BIT(0);
+
+ if (accept_multicast && port_priv->state == BR_STATE_FORWARDING)
+ port_rules = adin1110_port_rules(port_priv, true, true);
+
+ return adin1110_write_mac_address(port_priv, mac_nr, mac,
+ mask, port_rules);
+}
+
+static int adin1110_broadcasts_filter(struct adin1110_port_priv *port_priv,
+ int mac_nr, bool accept_broadcast)
+{
+ u32 port_rules = 0;
+ u8 mask[ETH_ALEN];
+
+ memset(mask, 0xFF, ETH_ALEN);
+
+ if (accept_broadcast && port_priv->state == BR_STATE_FORWARDING)
+ port_rules = adin1110_port_rules(port_priv, true, true);
+
+ return adin1110_write_mac_address(port_priv, mac_nr, mask,
+ mask, port_rules);
+}
+
+static int adin1110_set_mac_address(struct net_device *netdev,
+ const unsigned char *dev_addr)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(netdev);
+ u8 mask[ETH_ALEN];
+ u32 port_rules;
+ u32 mac_slot;
+
+ if (!is_valid_ether_addr(dev_addr))
+ return -EADDRNOTAVAIL;
+
+ eth_hw_addr_set(netdev, dev_addr);
+ memset(mask, 0xFF, ETH_ALEN);
+
+ mac_slot = (!port_priv->nr) ? ADIN_MAC_P1_ADDR_SLOT : ADIN_MAC_P2_ADDR_SLOT;
+ port_rules = adin1110_port_rules(port_priv, true, false);
+
+ return adin1110_write_mac_address(port_priv, mac_slot, netdev->dev_addr,
+ mask, port_rules);
+}
+
+static int adin1110_ndo_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct sockaddr *sa = addr;
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(netdev, addr);
+ if (ret < 0)
+ return ret;
+
+ return adin1110_set_mac_address(netdev, sa->sa_data);
+}
+
+static int adin1110_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
+{
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ return phy_do_ioctl(netdev, rq, cmd);
+}
+
+static int adin1110_set_promisc_mode(struct adin1110_port_priv *port_priv,
+ bool promisc)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 mask;
+
+ if (port_priv->state != BR_STATE_FORWARDING)
+ promisc = false;
+
+ if (!port_priv->nr)
+ mask = ADIN1110_FWD_UNK2HOST;
+ else
+ mask = ADIN2111_P2_FWD_UNK2HOST;
+
+ return adin1110_set_bits(priv, ADIN1110_CONFIG2,
+ mask, promisc ? mask : 0);
+}
+
+static int adin1110_setup_rx_mode(struct adin1110_port_priv *port_priv)
+{
+ int ret;
+
+ ret = adin1110_set_promisc_mode(port_priv,
+ !!(port_priv->flags & IFF_PROMISC));
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_multicast_filter(port_priv, ADIN_MAC_MULTICAST_ADDR_SLOT,
+ !!(port_priv->flags & IFF_ALLMULTI));
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_broadcasts_filter(port_priv,
+ ADIN_MAC_BROADCAST_ADDR_SLOT,
+ !!(port_priv->flags & IFF_BROADCAST));
+ if (ret < 0)
+ return ret;
+
+ return adin1110_set_bits(port_priv->priv, ADIN1110_CONFIG1,
+ ADIN1110_CONFIG1_SYNC, ADIN1110_CONFIG1_SYNC);
+}
+
+static bool adin1110_can_offload_forwarding(struct adin1110_priv *priv)
+{
+ int i;
+
+ if (priv->cfg->id != ADIN2111_MAC)
+ return false;
+
+ /* Can't enable forwarding if ports do not belong to the same bridge */
+ if (priv->ports[0]->bridge != priv->ports[1]->bridge || !priv->ports[0]->bridge)
+ return false;
+
+ /* Can't enable forwarding if there is a port
+ * that has been blocked by STP.
+ */
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ if (priv->ports[i]->state != BR_STATE_FORWARDING)
+ return false;
+ }
+
+ return true;
+}
+
+static void adin1110_rx_mode_work(struct work_struct *work)
+{
+ struct adin1110_port_priv *port_priv;
+ struct adin1110_priv *priv;
+
+ port_priv = container_of(work, struct adin1110_port_priv, rx_mode_work);
+ priv = port_priv->priv;
+
+ mutex_lock(&priv->lock);
+ adin1110_setup_rx_mode(port_priv);
+ mutex_unlock(&priv->lock);
+}
+
+static void adin1110_set_rx_mode(struct net_device *dev)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ struct adin1110_priv *priv = port_priv->priv;
+
+ spin_lock(&priv->state_lock);
+
+ port_priv->flags = dev->flags;
+ schedule_work(&port_priv->rx_mode_work);
+
+ spin_unlock(&priv->state_lock);
+}
+
+static int adin1110_net_open(struct net_device *net_dev)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(net_dev);
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 val;
+ int ret;
+
+ mutex_lock(&priv->lock);
+
+ /* Configure MAC to compute and append the FCS itself. */
+ ret = adin1110_write_reg(priv, ADIN1110_CONFIG2, ADIN1110_CRC_APPEND);
+ if (ret < 0)
+ goto out;
+
+ val = ADIN1110_TX_RDY_IRQ | ADIN1110_RX_RDY_IRQ | ADIN1110_SPI_ERR_IRQ;
+ if (priv->cfg->id == ADIN2111_MAC)
+ val |= ADIN2111_RX_RDY_IRQ;
+
+ priv->irq_mask = val;
+ ret = adin1110_write_reg(priv, ADIN1110_IMASK1, ~val);
+ if (ret < 0) {
+ netdev_err(net_dev, "Failed to enable chip IRQs: %d\n", ret);
+ goto out;
+ }
+
+ ret = adin1110_read_reg(priv, ADIN1110_TX_SPACE, &val);
+ if (ret < 0) {
+ netdev_err(net_dev, "Failed to read TX FIFO space: %d\n", ret);
+ goto out;
+ }
+
+ priv->tx_space = 2 * val;
+
+ port_priv->state = BR_STATE_FORWARDING;
+ ret = adin1110_set_mac_address(net_dev, net_dev->dev_addr);
+ if (ret < 0) {
+ netdev_err(net_dev, "Could not set MAC address: %pM, %d\n",
+ net_dev->dev_addr, ret);
+ goto out;
+ }
+
+ ret = adin1110_set_bits(priv, ADIN1110_CONFIG1, ADIN1110_CONFIG1_SYNC,
+ ADIN1110_CONFIG1_SYNC);
+
+out:
+ mutex_unlock(&priv->lock);
+
+ if (ret < 0)
+ return ret;
+
+ phy_start(port_priv->phydev);
+
+ netif_start_queue(net_dev);
+
+ return 0;
+}
+
+static int adin1110_net_stop(struct net_device *net_dev)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(net_dev);
+ struct adin1110_priv *priv = port_priv->priv;
+ u32 mask;
+ int ret;
+
+ mask = !port_priv->nr ? ADIN2111_RX_RDY_IRQ : ADIN1110_RX_RDY_IRQ;
+
+ /* Disable RX RDY IRQs */
+ mutex_lock(&priv->lock);
+ ret = adin1110_set_bits(priv, ADIN1110_IMASK1, mask, mask);
+ mutex_unlock(&priv->lock);
+ if (ret < 0)
+ return ret;
+
+ netif_stop_queue(port_priv->netdev);
+ flush_work(&port_priv->tx_work);
+ phy_stop(port_priv->phydev);
+
+ return 0;
+}
+
+static void adin1110_tx_work(struct work_struct *work)
+{
+ struct adin1110_port_priv *port_priv;
+ struct adin1110_priv *priv;
+ struct sk_buff *txb;
+ int ret;
+
+ port_priv = container_of(work, struct adin1110_port_priv, tx_work);
+ priv = port_priv->priv;
+
+ mutex_lock(&priv->lock);
+
+ while ((txb = skb_dequeue(&port_priv->txq))) {
+ ret = adin1110_write_fifo(port_priv, txb);
+ if (ret < 0)
+ dev_err_ratelimited(&priv->spidev->dev,
+ "Frame write error: %d\n", ret);
+
+ dev_kfree_skb(txb);
+ }
+
+ mutex_unlock(&priv->lock);
+}
+
+static netdev_tx_t adin1110_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ struct adin1110_priv *priv = port_priv->priv;
+ netdev_tx_t netdev_ret = NETDEV_TX_OK;
+ u32 tx_space_needed;
+
+ tx_space_needed = skb->len + ADIN1110_FRAME_HEADER_LEN + ADIN1110_INTERNAL_SIZE_HEADER_LEN;
+ if (tx_space_needed > priv->tx_space) {
+ netif_stop_queue(dev);
+ netdev_ret = NETDEV_TX_BUSY;
+ } else {
+ priv->tx_space -= tx_space_needed;
+ skb_queue_tail(&port_priv->txq, skb);
+ }
+
+ schedule_work(&port_priv->tx_work);
+
+ return netdev_ret;
+}
+
+static void adin1110_ndo_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+
+ storage->rx_packets = port_priv->rx_packets;
+ storage->tx_packets = port_priv->tx_packets;
+
+ storage->rx_bytes = port_priv->rx_bytes;
+ storage->tx_bytes = port_priv->tx_bytes;
+}
+
+static int adin1110_port_get_port_parent_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ struct adin1110_priv *priv = port_priv->priv;
+
+ ppid->id_len = strnlen(priv->mii_bus_name, MAX_PHYS_ITEM_ID_LEN);
+ memcpy(ppid->id, priv->mii_bus_name, ppid->id_len);
+
+ return 0;
+}
+
+static int adin1110_ndo_get_phys_port_name(struct net_device *dev,
+ char *name, size_t len)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ int err;
+
+ err = snprintf(name, len, "p%d", port_priv->nr);
+ if (err >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+static const struct net_device_ops adin1110_netdev_ops = {
+ .ndo_open = adin1110_net_open,
+ .ndo_stop = adin1110_net_stop,
+ .ndo_eth_ioctl = adin1110_ioctl,
+ .ndo_start_xmit = adin1110_start_xmit,
+ .ndo_set_mac_address = adin1110_ndo_set_mac_address,
+ .ndo_set_rx_mode = adin1110_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_get_stats64 = adin1110_ndo_get_stats64,
+ .ndo_get_port_parent_id = adin1110_port_get_port_parent_id,
+ .ndo_get_phys_port_name = adin1110_ndo_get_phys_port_name,
+};
+
+static void adin1110_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *di)
+{
+ strscpy(di->driver, "ADIN1110", sizeof(di->driver));
+ strscpy(di->bus_info, dev_name(dev->dev.parent), sizeof(di->bus_info));
+}
+
+static const struct ethtool_ops adin1110_ethtool_ops = {
+ .get_drvinfo = adin1110_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static void adin1110_adjust_link(struct net_device *dev)
+{
+ struct phy_device *phydev = dev->phydev;
+
+ if (!phydev->link)
+ phy_print_status(phydev);
+}
+
+/* PHY ID is stored in the MAC registers too,
+ * check spi connection by reading it.
+ */
+static int adin1110_check_spi(struct adin1110_priv *priv)
+{
+ int ret;
+ u32 val;
+
+ ret = adin1110_read_reg(priv, ADIN1110_PHY_ID, &val);
+ if (ret < 0)
+ return ret;
+
+ if (val != priv->cfg->phy_id_val) {
+ dev_err(&priv->spidev->dev, "PHY ID expected: %x, read: %x\n",
+ priv->cfg->phy_id_val, val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int adin1110_hw_forwarding(struct adin1110_priv *priv, bool enable)
+{
+ int ret;
+ int i;
+
+ priv->forwarding = enable;
+
+ if (!priv->forwarding) {
+ for (i = ADIN_MAC_FDB_ADDR_SLOT; i < ADIN_MAC_MAX_ADDR_SLOTS; i++) {
+ ret = adin1110_clear_mac_address(priv, i);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ /* Forwarding is optimised when MAC runs in Cut Through mode. */
+ ret = adin1110_set_bits(priv, ADIN1110_CONFIG2,
+ ADIN2111_PORT_CUT_THRU_EN,
+ priv->forwarding ? ADIN2111_PORT_CUT_THRU_EN : 0);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ ret = adin1110_setup_rx_mode(priv->ports[i]);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int adin1110_port_bridge_join(struct adin1110_port_priv *port_priv,
+ struct net_device *bridge)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ int ret;
+
+ port_priv->bridge = bridge;
+
+ if (adin1110_can_offload_forwarding(priv)) {
+ mutex_lock(&priv->lock);
+ ret = adin1110_hw_forwarding(priv, true);
+ mutex_unlock(&priv->lock);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ return adin1110_set_mac_address(port_priv->netdev, bridge->dev_addr);
+}
+
+static int adin1110_port_bridge_leave(struct adin1110_port_priv *port_priv,
+ struct net_device *bridge)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ int ret;
+
+ port_priv->bridge = NULL;
+
+ mutex_lock(&priv->lock);
+ ret = adin1110_hw_forwarding(priv, false);
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int adin1110_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+ struct netdev_notifier_changeupper_info *info = ptr;
+ int ret = 0;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ ret = adin1110_port_bridge_join(port_priv, info->upper_dev);
+ else
+ ret = adin1110_port_bridge_leave(port_priv, info->upper_dev);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static struct notifier_block adin1110_netdevice_nb = {
+ .notifier_call = adin1110_netdevice_event,
+};
+
+static void adin1110_disconnect_phy(void *data)
+{
+ phy_disconnect(data);
+}
+
+static bool adin1110_port_dev_check(const struct net_device *dev)
+{
+ return dev->netdev_ops == &adin1110_netdev_ops;
+}
+
+static int adin1110_port_set_forwarding_state(struct adin1110_port_priv *port_priv)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ int ret;
+
+ port_priv->state = BR_STATE_FORWARDING;
+
+ mutex_lock(&priv->lock);
+ ret = adin1110_set_mac_address(port_priv->netdev,
+ port_priv->netdev->dev_addr);
+ if (ret < 0)
+ goto out;
+
+ if (adin1110_can_offload_forwarding(priv))
+ ret = adin1110_hw_forwarding(priv, true);
+ else
+ ret = adin1110_setup_rx_mode(port_priv);
+out:
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+static int adin1110_port_set_blocking_state(struct adin1110_port_priv *port_priv)
+{
+ u8 mac[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x00};
+ struct adin1110_priv *priv = port_priv->priv;
+ u8 mask[ETH_ALEN];
+ u32 port_rules;
+ int mac_slot;
+ int ret;
+
+ port_priv->state = BR_STATE_BLOCKING;
+
+ mutex_lock(&priv->lock);
+
+ mac_slot = (!port_priv->nr) ? ADIN_MAC_P1_ADDR_SLOT : ADIN_MAC_P2_ADDR_SLOT;
+ ret = adin1110_clear_mac_address(priv, mac_slot);
+ if (ret < 0)
+ goto out;
+
+ ret = adin1110_hw_forwarding(priv, false);
+ if (ret < 0)
+ goto out;
+
+ /* Allow only BPDUs to be passed to the CPU */
+ memset(mask, 0xFF, ETH_ALEN);
+ port_rules = adin1110_port_rules(port_priv, true, false);
+ ret = adin1110_write_mac_address(port_priv, mac_slot, mac,
+ mask, port_rules);
+out:
+ mutex_unlock(&priv->lock);
+
+ return ret;
+}
+
+/* ADIN1110/2111 does not have any native STP support.
+ * Listen for bridge core state changes and
+ * allow all frames to pass or only the BPDUs.
+ */
+static int adin1110_port_attr_stp_state_set(struct adin1110_port_priv *port_priv,
+ u8 state)
+{
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ return adin1110_port_set_forwarding_state(port_priv);
+ case BR_STATE_LEARNING:
+ case BR_STATE_LISTENING:
+ case BR_STATE_DISABLED:
+ case BR_STATE_BLOCKING:
+ return adin1110_port_set_blocking_state(port_priv);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int adin1110_port_attr_set(struct net_device *dev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct adin1110_port_priv *port_priv = netdev_priv(dev);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ return adin1110_port_attr_stp_state_set(port_priv,
+ attr->u.stp_state);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int adin1110_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event,
+ void *ptr)
+{
+ struct net_device *netdev = switchdev_notifier_info_to_dev(ptr);
+ int ret;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ ret = switchdev_handle_port_attr_set(netdev, ptr,
+ adin1110_port_dev_check,
+ adin1110_port_attr_set);
+
+ return notifier_from_errno(ret);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block adin1110_switchdev_blocking_notifier = {
+ .notifier_call = adin1110_switchdev_blocking_event,
+};
+
+static void adin1110_fdb_offload_notify(struct net_device *netdev,
+ struct switchdev_notifier_fdb_info *rcv)
+{
+ struct switchdev_notifier_fdb_info info = {};
+
+ info.addr = rcv->addr;
+ info.vid = rcv->vid;
+ info.offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ netdev, &info.info, NULL);
+}
+
+static int adin1110_fdb_add(struct adin1110_port_priv *port_priv,
+ struct switchdev_notifier_fdb_info *fdb)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ struct adin1110_port_priv *other_port;
+ u8 mask[ETH_ALEN];
+ u32 port_rules;
+ int mac_nr;
+ u32 val;
+ int ret;
+
+ netdev_dbg(port_priv->netdev,
+ "DEBUG: %s: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ __func__, fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_priv->nr);
+
+ if (!priv->forwarding)
+ return 0;
+
+ if (fdb->is_local)
+ return -EINVAL;
+
+ /* Find free FDB slot on device. */
+ for (mac_nr = ADIN_MAC_FDB_ADDR_SLOT; mac_nr < ADIN_MAC_MAX_ADDR_SLOTS; mac_nr++) {
+ ret = adin1110_read_reg(priv, ADIN1110_MAC_ADDR_FILTER_UPR + (mac_nr * 2), &val);
+ if (ret < 0)
+ return ret;
+ if (!val)
+ break;
+ }
+
+ if (mac_nr == ADIN_MAC_MAX_ADDR_SLOTS)
+ return -ENOMEM;
+
+ other_port = priv->ports[!port_priv->nr];
+ port_rules = adin1110_port_rules(port_priv, false, true);
+ memset(mask, 0xFF, ETH_ALEN);
+
+ return adin1110_write_mac_address(other_port, mac_nr, (u8 *)fdb->addr,
+ mask, port_rules);
+}
+
+static int adin1110_read_mac(struct adin1110_priv *priv, int mac_nr, u8 *addr)
+{
+ u32 val;
+ int ret;
+
+ ret = adin1110_read_reg(priv, ADIN1110_MAC_ADDR_FILTER_UPR + (mac_nr * 2), &val);
+ if (ret < 0)
+ return ret;
+
+ put_unaligned_be16(val, addr);
+
+ ret = adin1110_read_reg(priv, ADIN1110_MAC_ADDR_FILTER_LWR + (mac_nr * 2), &val);
+ if (ret < 0)
+ return ret;
+
+ put_unaligned_be32(val, addr + 2);
+
+ return 0;
+}
+
+static int adin1110_fdb_del(struct adin1110_port_priv *port_priv,
+ struct switchdev_notifier_fdb_info *fdb)
+{
+ struct adin1110_priv *priv = port_priv->priv;
+ u8 addr[ETH_ALEN];
+ int mac_nr;
+ int ret;
+
+ netdev_dbg(port_priv->netdev,
+ "DEBUG: %s: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ __func__, fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_priv->nr);
+
+ if (fdb->is_local)
+ return -EINVAL;
+
+ for (mac_nr = ADIN_MAC_FDB_ADDR_SLOT; mac_nr < ADIN_MAC_MAX_ADDR_SLOTS; mac_nr++) {
+ ret = adin1110_read_mac(priv, mac_nr, addr);
+ if (ret < 0)
+ return ret;
+
+ if (ether_addr_equal(addr, fdb->addr)) {
+ ret = adin1110_clear_mac_address(priv, mac_nr);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void adin1110_switchdev_event_work(struct work_struct *work)
+{
+ struct adin1110_switchdev_event_work *switchdev_work;
+ struct adin1110_port_priv *port_priv;
+ int ret;
+
+ switchdev_work = container_of(work, struct adin1110_switchdev_event_work, work);
+ port_priv = switchdev_work->port_priv;
+
+ mutex_lock(&port_priv->priv->lock);
+
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ ret = adin1110_fdb_add(port_priv, &switchdev_work->fdb_info);
+ if (!ret)
+ adin1110_fdb_offload_notify(port_priv->netdev,
+ &switchdev_work->fdb_info);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ adin1110_fdb_del(port_priv, &switchdev_work->fdb_info);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&port_priv->priv->lock);
+
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(port_priv->netdev);
+}
+
+/* called under rcu_read_lock() */
+static int adin1110_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *netdev = switchdev_notifier_info_to_dev(ptr);
+ struct adin1110_port_priv *port_priv = netdev_priv(netdev);
+ struct adin1110_switchdev_event_work *switchdev_work;
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+
+ if (!adin1110_port_dev_check(netdev))
+ return NOTIFY_DONE;
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (WARN_ON(!switchdev_work))
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, adin1110_switchdev_event_work);
+ switchdev_work->port_priv = port_priv;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(netdev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(system_long_wq, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static struct notifier_block adin1110_switchdev_notifier = {
+ .notifier_call = adin1110_switchdev_event,
+};
+
+static void adin1110_unregister_notifiers(void *data)
+{
+ unregister_switchdev_blocking_notifier(&adin1110_switchdev_blocking_notifier);
+ unregister_switchdev_notifier(&adin1110_switchdev_notifier);
+ unregister_netdevice_notifier(&adin1110_netdevice_nb);
+}
+
+static int adin1110_setup_notifiers(struct adin1110_priv *priv)
+{
+ struct device *dev = &priv->spidev->dev;
+ int ret;
+
+ ret = register_netdevice_notifier(&adin1110_netdevice_nb);
+ if (ret < 0)
+ return ret;
+
+ ret = register_switchdev_notifier(&adin1110_switchdev_notifier);
+ if (ret < 0)
+ goto err_netdev;
+
+ ret = register_switchdev_blocking_notifier(&adin1110_switchdev_blocking_notifier);
+ if (ret < 0)
+ goto err_sdev;
+
+ return devm_add_action_or_reset(dev, adin1110_unregister_notifiers, NULL);
+
+err_sdev:
+ unregister_switchdev_notifier(&adin1110_switchdev_notifier);
+
+err_netdev:
+ unregister_netdevice_notifier(&adin1110_netdevice_nb);
+ return ret;
+}
+
+static int adin1110_probe_netdevs(struct adin1110_priv *priv)
+{
+ struct device *dev = &priv->spidev->dev;
+ struct adin1110_port_priv *port_priv;
+ struct net_device *netdev;
+ int ret;
+ int i;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ netdev = devm_alloc_etherdev(dev, sizeof(*port_priv));
+ if (!netdev)
+ return -ENOMEM;
+
+ port_priv = netdev_priv(netdev);
+ port_priv->netdev = netdev;
+ port_priv->priv = priv;
+ port_priv->cfg = priv->cfg;
+ port_priv->nr = i;
+ priv->ports[i] = port_priv;
+ SET_NETDEV_DEV(netdev, dev);
+
+ ret = device_get_ethdev_address(dev, netdev);
+ if (ret < 0)
+ return ret;
+
+ netdev->irq = priv->spidev->irq;
+ INIT_WORK(&port_priv->tx_work, adin1110_tx_work);
+ INIT_WORK(&port_priv->rx_mode_work, adin1110_rx_mode_work);
+ skb_queue_head_init(&port_priv->txq);
+
+ netif_carrier_off(netdev);
+
+ netdev->if_port = IF_PORT_10BASET;
+ netdev->netdev_ops = &adin1110_netdev_ops;
+ netdev->ethtool_ops = &adin1110_ethtool_ops;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->features |= NETIF_F_NETNS_LOCAL;
+
+ port_priv->phydev = get_phy_device(priv->mii_bus, i + 1, false);
+ if (IS_ERR(port_priv->phydev)) {
+ netdev_err(netdev, "Could not find PHY with device address: %d.\n", i);
+ return PTR_ERR(port_priv->phydev);
+ }
+
+ port_priv->phydev = phy_connect(netdev,
+ phydev_name(port_priv->phydev),
+ adin1110_adjust_link,
+ PHY_INTERFACE_MODE_INTERNAL);
+ if (IS_ERR(port_priv->phydev)) {
+ netdev_err(netdev, "Could not connect PHY with device address: %d.\n", i);
+ return PTR_ERR(port_priv->phydev);
+ }
+
+ ret = devm_add_action_or_reset(dev, adin1110_disconnect_phy,
+ port_priv->phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* ADIN1110 INT_N pin will be used to signal the host */
+ ret = devm_request_threaded_irq(dev, priv->spidev->irq, NULL,
+ adin1110_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ dev_name(dev), priv);
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_setup_notifiers(priv);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < priv->cfg->ports_nr; i++) {
+ ret = devm_register_netdev(dev, priv->ports[i]->netdev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to register network device.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int adin1110_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *dev_id = spi_get_device_id(spi);
+ struct device *dev = &spi->dev;
+ struct adin1110_priv *priv;
+ int ret;
+
+ priv = devm_kzalloc(dev, sizeof(struct adin1110_priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->spidev = spi;
+ priv->cfg = &adin1110_cfgs[dev_id->driver_data];
+ spi->bits_per_word = 8;
+ spi->mode = SPI_MODE_0;
+
+ mutex_init(&priv->lock);
+ spin_lock_init(&priv->state_lock);
+
+ /* use of CRC on control and data transactions is pin dependent */
+ priv->append_crc = device_property_read_bool(dev, "adi,spi-crc");
+ if (priv->append_crc)
+ crc8_populate_msb(adin1110_crc_table, 0x7);
+
+ ret = adin1110_check_spi(priv);
+ if (ret < 0) {
+ dev_err(dev, "Probe SPI Read check failed: %d\n", ret);
+ return ret;
+ }
+
+ ret = adin1110_write_reg(priv, ADIN1110_RESET, ADIN1110_SWRESET);
+ if (ret < 0)
+ return ret;
+
+ ret = adin1110_register_mdiobus(priv, dev);
+ if (ret < 0) {
+ dev_err(dev, "Could not register MDIO bus %d\n", ret);
+ return ret;
+ }
+
+ return adin1110_probe_netdevs(priv);
+}
+
+static const struct of_device_id adin1110_match_table[] = {
+ { .compatible = "adi,adin1110" },
+ { .compatible = "adi,adin2111" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, adin1110_match_table);
+
+static const struct spi_device_id adin1110_spi_id[] = {
+ { .name = "adin1110", .driver_data = ADIN1110_MAC },
+ { .name = "adin2111", .driver_data = ADIN2111_MAC },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, adin1110_spi_id);
+
+static struct spi_driver adin1110_driver = {
+ .driver = {
+ .name = "adin1110",
+ .of_match_table = adin1110_match_table,
+ },
+ .probe = adin1110_probe,
+ .id_table = adin1110_spi_id,
+};
+module_spi_driver(adin1110_driver);
+
+MODULE_DESCRIPTION("ADIN1110 Network driver");
+MODULE_AUTHOR("Alexandru Tachici ");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 447dc64a17e5..e104fb02817d 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1112,9 +1112,9 @@ static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *in
{
struct greth_private *greth = netdev_priv(dev);
- strlcpy(info->driver, dev_driver_string(greth->dev),
+ strscpy(info->driver, dev_driver_string(greth->dev),
sizeof(info->driver));
- strlcpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
+ strscpy(info->bus_info, greth->dev->bus->name, sizeof(info->bus_info));
}
static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
@@ -1507,7 +1507,7 @@ static int greth_of_probe(struct platform_device *ofdev)
}
/* setup NAPI */
- netif_napi_add(dev, &greth->napi, greth_poll, 64);
+ netif_napi_add(dev, &greth->napi, greth_poll);
return 0;
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index d19d1579c415..5fab589b3ddf 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2952,8 +2952,8 @@ static void et131x_get_drvinfo(struct net_device *netdev,
{
struct et131x_adapter *adapter = netdev_priv(netdev);
- strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
@@ -3969,7 +3969,7 @@ static int et131x_pci_setup(struct pci_dev *pdev,
et131x_init_send(adapter);
- netif_napi_add(netdev, &adapter->napi, et131x_poll, 64);
+ netif_napi_add(netdev, &adapter->napi, et131x_poll);
eth_hw_addr_set(netdev, adapter->addr);
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index ce353b0c02a3..a30d0f172986 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -1531,8 +1531,8 @@ static void slic_get_drvinfo(struct net_device *dev,
{
struct slic_device *sdev = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(sdev->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(sdev->pdev), sizeof(info->bus_info));
}
static const struct ethtool_ops slic_ethtool_ops = {
@@ -1803,7 +1803,7 @@ static int slic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto unmap;
}
- netif_napi_add(dev, &sdev->napi, slic_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &sdev->napi, slic_poll);
netif_carrier_off(dev);
err = register_netdev(dev);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 621ce742ad21..a94c62956eed 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -331,8 +331,8 @@ prepare_err:
static void emac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(&dev->dev), sizeof(info->bus_info));
}
static u32 emac_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 22fe98555b24..d7762da8b2c0 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -2691,12 +2691,12 @@ static void ace_get_drvinfo(struct net_device *dev,
{
struct ace_private *ap = netdev_priv(dev);
- strlcpy(info->driver, "acenic", sizeof(info->driver));
+ strscpy(info->driver, "acenic", sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->version), "%i.%i.%i",
ap->firmware_major, ap->firmware_minor, ap->firmware_fix);
if (ap->pdev)
- strlcpy(info->bus_info, pci_name(ap->pdev),
+ strscpy(info->bus_info, pci_name(ap->pdev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/altera/Kconfig b/drivers/net/ethernet/altera/Kconfig
index 914e56b91467..dd7fd41ccde5 100644
--- a/drivers/net/ethernet/altera/Kconfig
+++ b/drivers/net/ethernet/altera/Kconfig
@@ -3,6 +3,8 @@ config ALTERA_TSE
tristate "Altera Triple-Speed Ethernet MAC support"
depends on HAS_DMA
select PHYLIB
+ select PHYLINK
+ select PCS_ALTERA_TSE
help
This driver supports the Altera Triple-Speed (TSE) Ethernet MAC.
diff --git a/drivers/net/ethernet/altera/altera_tse.h b/drivers/net/ethernet/altera/altera_tse.h
index f17acfb579a0..db5eed06e92d 100644
--- a/drivers/net/ethernet/altera/altera_tse.h
+++ b/drivers/net/ethernet/altera/altera_tse.h
@@ -27,6 +27,7 @@
#include
#include
#include
+#include
#define ALTERA_TSE_SW_RESET_WATCHDOG_CNTR 10000
#define ALTERA_TSE_MAC_FIFO_WIDTH 4 /* TX/RX FIFO width in
@@ -109,17 +110,6 @@
#define MAC_CMDCFG_DISABLE_READ_TIMEOUT_GET(v) GET_BIT_VALUE(v, 27)
#define MAC_CMDCFG_CNT_RESET_GET(v) GET_BIT_VALUE(v, 31)
-/* SGMII PCS register addresses
- */
-#define SGMII_PCS_SCRATCH 0x10
-#define SGMII_PCS_REV 0x11
-#define SGMII_PCS_LINK_TIMER_0 0x12
-#define SGMII_PCS_LINK_TIMER_1 0x13
-#define SGMII_PCS_IF_MODE 0x14
-#define SGMII_PCS_DIS_READ_TO 0x15
-#define SGMII_PCS_READ_TO 0x16
-#define SGMII_PCS_SW_RESET_TIMEOUT 100 /* usecs */
-
/* MDIO registers within MAC register Space
*/
struct altera_tse_mdio {
@@ -423,6 +413,9 @@ struct altera_tse_private {
void __iomem *tx_dma_csr;
void __iomem *tx_dma_desc;
+ /* SGMII PCS address space */
+ void __iomem *pcs_base;
+
/* Rx buffers queue */
struct tse_buffer *rx_ring;
u32 rx_cons;
@@ -480,6 +473,10 @@ struct altera_tse_private {
u32 msg_enable;
struct altera_dmaops *dmaops;
+
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+ struct phylink_pcs *pcs;
};
/* Function prototypes
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index 4299f1301149..81313c85833e 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -199,9 +199,9 @@ static int tse_reglen(struct net_device *dev)
static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *regbuf)
{
- int i;
struct altera_tse_private *priv = netdev_priv(dev);
u32 *buf = regbuf;
+ int i;
/* Set version to a known value, so ethtool knows
* how to do any special formatting of this data.
@@ -221,6 +221,22 @@ static void tse_get_regs(struct net_device *dev, struct ethtool_regs *regs,
buf[i] = csrrd32(priv->mac_dev, i * 4);
}
+static int tse_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+
+ return phylink_ethtool_ksettings_set(priv->phylink, cmd);
+}
+
+static int tse_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct altera_tse_private *priv = netdev_priv(dev);
+
+ return phylink_ethtool_ksettings_get(priv->phylink, cmd);
+}
+
static const struct ethtool_ops tse_ethtool_ops = {
.get_drvinfo = tse_get_drvinfo,
.get_regs_len = tse_reglen,
@@ -231,8 +247,9 @@ static const struct ethtool_ops tse_ethtool_ops = {
.get_ethtool_stats = tse_fill_stats,
.get_msglevel = tse_get_msglevel,
.set_msglevel = tse_set_msglevel,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .get_link_ksettings = tse_ethtool_get_link_ksettings,
+ .set_link_ksettings = tse_ethtool_set_link_ksettings,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void altera_tse_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 8c5828582c21..7633b227b2ca 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -32,6 +32,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -86,27 +87,6 @@ static inline u32 tse_tx_avail(struct altera_tse_private *priv)
return priv->tx_cons + priv->tx_ring_size - priv->tx_prod - 1;
}
-/* PCS Register read/write functions
- */
-static u16 sgmii_pcs_read(struct altera_tse_private *priv, int regnum)
-{
- return csrrd32(priv->mac_dev,
- tse_csroffs(mdio_phy0) + regnum * 4) & 0xffff;
-}
-
-static void sgmii_pcs_write(struct altera_tse_private *priv, int regnum,
- u16 value)
-{
- csrwr32(value, priv->mac_dev, tse_csroffs(mdio_phy0) + regnum * 4);
-}
-
-/* Check PCS scratch memory */
-static int sgmii_pcs_scratch_test(struct altera_tse_private *priv, u16 value)
-{
- sgmii_pcs_write(priv, SGMII_PCS_SCRATCH, value);
- return (sgmii_pcs_read(priv, SGMII_PCS_SCRATCH) == value);
-}
-
/* MDIO specific functions
*/
static int altera_tse_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
@@ -141,10 +121,10 @@ static int altera_tse_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
static int altera_tse_mdio_create(struct net_device *dev, unsigned int id)
{
struct altera_tse_private *priv = netdev_priv(dev);
- int ret;
struct device_node *mdio_node = NULL;
- struct mii_bus *mdio = NULL;
struct device_node *child_node = NULL;
+ struct mii_bus *mdio = NULL;
+ int ret;
for_each_child_of_node(priv->device->of_node, child_node) {
if (of_device_is_compatible(child_node, "altr,tse-mdio")) {
@@ -236,8 +216,8 @@ static int tse_init_rx_buffer(struct altera_tse_private *priv,
static void tse_free_rx_buffer(struct altera_tse_private *priv,
struct tse_buffer *rxbuffer)
{
- struct sk_buff *skb = rxbuffer->skb;
dma_addr_t dma_addr = rxbuffer->dma_addr;
+ struct sk_buff *skb = rxbuffer->skb;
if (skb != NULL) {
if (dma_addr)
@@ -358,6 +338,7 @@ static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
{
struct ethhdr *eth_hdr;
u16 vid;
+
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
!__vlan_get_tag(skb, &vid)) {
eth_hdr = (struct ethhdr *)skb->data;
@@ -371,10 +352,10 @@ static inline void tse_rx_vlan(struct net_device *dev, struct sk_buff *skb)
*/
static int tse_rx(struct altera_tse_private *priv, int limit)
{
- unsigned int count = 0;
- unsigned int next_entry;
- struct sk_buff *skb;
unsigned int entry = priv->rx_cons % priv->rx_ring_size;
+ unsigned int next_entry;
+ unsigned int count = 0;
+ struct sk_buff *skb;
u32 rxstatus;
u16 pktlength;
u16 pktstatus;
@@ -448,10 +429,10 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
static int tse_tx_complete(struct altera_tse_private *priv)
{
unsigned int txsize = priv->tx_ring_size;
- u32 ready;
- unsigned int entry;
struct tse_buffer *tx_buff;
+ unsigned int entry;
int txcomplete = 0;
+ u32 ready;
spin_lock(&priv->tx_lock);
@@ -497,8 +478,8 @@ static int tse_poll(struct napi_struct *napi, int budget)
{
struct altera_tse_private *priv =
container_of(napi, struct altera_tse_private, napi);
- int rxcomplete = 0;
unsigned long int flags;
+ int rxcomplete = 0;
tse_tx_complete(priv);
@@ -561,13 +542,13 @@ static irqreturn_t altera_isr(int irq, void *dev_id)
static netdev_tx_t tse_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- unsigned int txsize = priv->tx_ring_size;
- unsigned int entry;
- struct tse_buffer *buffer = NULL;
- int nfrags = skb_shinfo(skb)->nr_frags;
unsigned int nopaged_len = skb_headlen(skb);
+ unsigned int txsize = priv->tx_ring_size;
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ struct tse_buffer *buffer = NULL;
netdev_tx_t ret = NETDEV_TX_OK;
dma_addr_t dma_addr;
+ unsigned int entry;
spin_lock_bh(&priv->tx_lock);
@@ -619,117 +600,6 @@ out:
return ret;
}
-/* Called every time the controller might need to be made
- * aware of new link state. The PHY code conveys this
- * information through variables in the phydev structure, and this
- * function converts those variables into the appropriate
- * register values, and can bring down the device if needed.
- */
-static void altera_tse_adjust_link(struct net_device *dev)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev = dev->phydev;
- int new_state = 0;
-
- /* only change config if there is a link */
- spin_lock(&priv->mac_cfg_lock);
- if (phydev->link) {
- /* Read old config */
- u32 cfg_reg = ioread32(&priv->mac_dev->command_config);
-
- /* Check duplex */
- if (phydev->duplex != priv->oldduplex) {
- new_state = 1;
- if (!(phydev->duplex))
- cfg_reg |= MAC_CMDCFG_HD_ENA;
- else
- cfg_reg &= ~MAC_CMDCFG_HD_ENA;
-
- netdev_dbg(priv->dev, "%s: Link duplex = 0x%x\n",
- dev->name, phydev->duplex);
-
- priv->oldduplex = phydev->duplex;
- }
-
- /* Check speed */
- if (phydev->speed != priv->oldspeed) {
- new_state = 1;
- switch (phydev->speed) {
- case 1000:
- cfg_reg |= MAC_CMDCFG_ETH_SPEED;
- cfg_reg &= ~MAC_CMDCFG_ENA_10;
- break;
- case 100:
- cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
- cfg_reg &= ~MAC_CMDCFG_ENA_10;
- break;
- case 10:
- cfg_reg &= ~MAC_CMDCFG_ETH_SPEED;
- cfg_reg |= MAC_CMDCFG_ENA_10;
- break;
- default:
- if (netif_msg_link(priv))
- netdev_warn(dev, "Speed (%d) is not 10/100/1000!\n",
- phydev->speed);
- break;
- }
- priv->oldspeed = phydev->speed;
- }
- iowrite32(cfg_reg, &priv->mac_dev->command_config);
-
- if (!priv->oldlink) {
- new_state = 1;
- priv->oldlink = 1;
- }
- } else if (priv->oldlink) {
- new_state = 1;
- priv->oldlink = 0;
- priv->oldspeed = 0;
- priv->oldduplex = -1;
- }
-
- if (new_state && netif_msg_link(priv))
- phy_print_status(phydev);
-
- spin_unlock(&priv->mac_cfg_lock);
-}
-static struct phy_device *connect_local_phy(struct net_device *dev)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev = NULL;
- char phy_id_fmt[MII_BUS_ID_SIZE + 3];
-
- if (priv->phy_addr != POLL_PHY) {
- snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
- priv->mdio->id, priv->phy_addr);
-
- netdev_dbg(dev, "trying to attach to %s\n", phy_id_fmt);
-
- phydev = phy_connect(dev, phy_id_fmt, &altera_tse_adjust_link,
- priv->phy_iface);
- if (IS_ERR(phydev)) {
- netdev_err(dev, "Could not attach to PHY\n");
- phydev = NULL;
- }
-
- } else {
- int ret;
- phydev = phy_find_first(priv->mdio);
- if (phydev == NULL) {
- netdev_err(dev, "No PHY found\n");
- return phydev;
- }
-
- ret = phy_connect_direct(dev, phydev, &altera_tse_adjust_link,
- priv->phy_iface);
- if (ret != 0) {
- netdev_err(dev, "Could not attach to PHY\n");
- phydev = NULL;
- }
- }
- return phydev;
-}
-
static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
@@ -768,91 +638,6 @@ static int altera_tse_phy_get_addr_mdio_create(struct net_device *dev)
return 0;
}
-/* Initialize driver's PHY state, and attach to the PHY
- */
-static int init_phy(struct net_device *dev)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- struct phy_device *phydev;
- struct device_node *phynode;
- bool fixed_link = false;
- int rc = 0;
-
- /* Avoid init phy in case of no phy present */
- if (!priv->phy_iface)
- return 0;
-
- priv->oldlink = 0;
- priv->oldspeed = 0;
- priv->oldduplex = -1;
-
- phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
-
- if (!phynode) {
- /* check if a fixed-link is defined in device-tree */
- if (of_phy_is_fixed_link(priv->device->of_node)) {
- rc = of_phy_register_fixed_link(priv->device->of_node);
- if (rc < 0) {
- netdev_err(dev, "cannot register fixed PHY\n");
- return rc;
- }
-
- /* In the case of a fixed PHY, the DT node associated
- * to the PHY is the Ethernet MAC DT node.
- */
- phynode = of_node_get(priv->device->of_node);
- fixed_link = true;
-
- netdev_dbg(dev, "fixed-link detected\n");
- phydev = of_phy_connect(dev, phynode,
- &altera_tse_adjust_link,
- 0, priv->phy_iface);
- } else {
- netdev_dbg(dev, "no phy-handle found\n");
- if (!priv->mdio) {
- netdev_err(dev, "No phy-handle nor local mdio specified\n");
- return -ENODEV;
- }
- phydev = connect_local_phy(dev);
- }
- } else {
- netdev_dbg(dev, "phy-handle found\n");
- phydev = of_phy_connect(dev, phynode,
- &altera_tse_adjust_link, 0, priv->phy_iface);
- }
- of_node_put(phynode);
-
- if (!phydev) {
- netdev_err(dev, "Could not find the PHY\n");
- if (fixed_link)
- of_phy_deregister_fixed_link(priv->device->of_node);
- return -ENODEV;
- }
-
- /* Stop Advertising 1000BASE Capability if interface is not GMII
- */
- if ((priv->phy_iface == PHY_INTERFACE_MODE_MII) ||
- (priv->phy_iface == PHY_INTERFACE_MODE_RMII))
- phy_set_max_speed(phydev, SPEED_100);
-
- /* Broken HW is sometimes missing the pull-up resistor on the
- * MDIO line, which results in reads to non-existent devices returning
- * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
- * device as well. If a fixed-link is used the phy_id is always 0.
- * Note: phydev->phy_id is the result of reading the UID PHY registers.
- */
- if ((phydev->phy_id == 0) && !fixed_link) {
- netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
- phy_disconnect(phydev);
- return -ENODEV;
- }
-
- netdev_dbg(dev, "attached to PHY %d UID 0x%08x Link = %d\n",
- phydev->mdio.addr, phydev->phy_id, phydev->link);
-
- return 0;
-}
-
static void tse_update_mac_addr(struct altera_tse_private *priv, const u8 *addr)
{
u32 msb;
@@ -1012,8 +797,8 @@ static int tse_change_mtu(struct net_device *dev, int new_mtu)
static void altera_tse_set_mcfilter(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- int i;
struct netdev_hw_addr *ha;
+ int i;
/* clear the hash filter */
for (i = 0; i < 64; i++)
@@ -1087,74 +872,14 @@ static void tse_set_rx_mode(struct net_device *dev)
spin_unlock(&priv->mac_cfg_lock);
}
-/* Initialise (if necessary) the SGMII PCS component
- */
-static int init_sgmii_pcs(struct net_device *dev)
-{
- struct altera_tse_private *priv = netdev_priv(dev);
- int n;
- unsigned int tmp_reg = 0;
-
- if (priv->phy_iface != PHY_INTERFACE_MODE_SGMII)
- return 0; /* Nothing to do, not in SGMII mode */
-
- /* The TSE SGMII PCS block looks a little like a PHY, it is
- * mapped into the zeroth MDIO space of the MAC and it has
- * ID registers like a PHY would. Sadly this is often
- * configured to zeroes, so don't be surprised if it does
- * show 0x00000000.
- */
-
- if (sgmii_pcs_scratch_test(priv, 0x0000) &&
- sgmii_pcs_scratch_test(priv, 0xffff) &&
- sgmii_pcs_scratch_test(priv, 0xa5a5) &&
- sgmii_pcs_scratch_test(priv, 0x5a5a)) {
- netdev_info(dev, "PCS PHY ID: 0x%04x%04x\n",
- sgmii_pcs_read(priv, MII_PHYSID1),
- sgmii_pcs_read(priv, MII_PHYSID2));
- } else {
- netdev_err(dev, "SGMII PCS Scratch memory test failed.\n");
- return -ENOMEM;
- }
-
- /* Starting on page 5-29 of the MegaCore Function User Guide
- * Set SGMII Link timer to 1.6ms
- */
- sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_0, 0x0D40);
- sgmii_pcs_write(priv, SGMII_PCS_LINK_TIMER_1, 0x03);
-
- /* Enable SGMII Interface and Enable SGMII Auto Negotiation */
- sgmii_pcs_write(priv, SGMII_PCS_IF_MODE, 0x3);
-
- /* Enable Autonegotiation */
- tmp_reg = sgmii_pcs_read(priv, MII_BMCR);
- tmp_reg |= (BMCR_SPEED1000 | BMCR_FULLDPLX | BMCR_ANENABLE);
- sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
-
- /* Reset PCS block */
- tmp_reg |= BMCR_RESET;
- sgmii_pcs_write(priv, MII_BMCR, tmp_reg);
- for (n = 0; n < SGMII_PCS_SW_RESET_TIMEOUT; n++) {
- if (!(sgmii_pcs_read(priv, MII_BMCR) & BMCR_RESET)) {
- netdev_info(dev, "SGMII PCS block initialised OK\n");
- return 0;
- }
- udelay(1);
- }
-
- /* We failed to reset the block, return a timeout */
- netdev_err(dev, "SGMII PCS block reset failed.\n");
- return -ETIMEDOUT;
-}
-
/* Open and initialize the interface
*/
static int tse_open(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
+ unsigned long flags;
int ret = 0;
int i;
- unsigned long int flags;
/* Reset and configure TSE MAC and probe associated PHY */
ret = priv->dmaops->init_dma(priv);
@@ -1171,14 +896,6 @@ static int tse_open(struct net_device *dev)
netdev_warn(dev, "TSE revision %x\n", priv->revision);
spin_lock(&priv->mac_cfg_lock);
- /* no-op if MAC not operating in SGMII mode*/
- ret = init_sgmii_pcs(dev);
- if (ret) {
- netdev_err(dev,
- "Cannot init the SGMII PCS (error: %d)\n", ret);
- spin_unlock(&priv->mac_cfg_lock);
- goto phy_error;
- }
ret = reset_mac(priv);
/* Note that reset_mac will fail if the clocks are gated by the PHY
@@ -1236,8 +953,12 @@ static int tse_open(struct net_device *dev)
spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags);
- if (dev->phydev)
- phy_start(dev->phydev);
+ ret = phylink_of_phy_connect(priv->phylink, priv->device->of_node, 0);
+ if (ret) {
+ netdev_err(dev, "could not connect phylink (%d)\n", ret);
+ goto tx_request_irq_error;
+ }
+ phylink_start(priv->phylink);
napi_enable(&priv->napi);
netif_start_queue(dev);
@@ -1265,13 +986,10 @@ phy_error:
static int tse_shutdown(struct net_device *dev)
{
struct altera_tse_private *priv = netdev_priv(dev);
- int ret;
unsigned long int flags;
+ int ret;
- /* Stop the PHY */
- if (dev->phydev)
- phy_stop(dev->phydev);
-
+ phylink_stop(priv->phylink);
netif_stop_queue(dev);
napi_disable(&priv->napi);
@@ -1317,11 +1035,79 @@ static struct net_device_ops altera_tse_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
+static void alt_tse_mac_an_restart(struct phylink_config *config)
+{
+}
+
+static void alt_tse_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct altera_tse_private *priv = netdev_priv(ndev);
+
+ spin_lock(&priv->mac_cfg_lock);
+ reset_mac(priv);
+ tse_set_mac(priv, true);
+ spin_unlock(&priv->mac_cfg_lock);
+}
+
+static void alt_tse_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+}
+
+static void alt_tse_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy, unsigned int mode,
+ phy_interface_t interface, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct altera_tse_private *priv = netdev_priv(ndev);
+ u32 ctrl;
+
+ ctrl = csrrd32(priv->mac_dev, tse_csroffs(command_config));
+ ctrl &= ~(MAC_CMDCFG_ENA_10 | MAC_CMDCFG_ETH_SPEED | MAC_CMDCFG_HD_ENA);
+
+ if (duplex == DUPLEX_HALF)
+ ctrl |= MAC_CMDCFG_HD_ENA;
+
+ if (speed == SPEED_1000)
+ ctrl |= MAC_CMDCFG_ETH_SPEED;
+ else if (speed == SPEED_10)
+ ctrl |= MAC_CMDCFG_ENA_10;
+
+ spin_lock(&priv->mac_cfg_lock);
+ csrwr32(ctrl, priv->mac_dev, tse_csroffs(command_config));
+ spin_unlock(&priv->mac_cfg_lock);
+}
+
+static struct phylink_pcs *alt_tse_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct net_device *ndev = to_net_dev(config->dev);
+ struct altera_tse_private *priv = netdev_priv(ndev);
+
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ interface == PHY_INTERFACE_MODE_1000BASEX)
+ return priv->pcs;
+ else
+ return NULL;
+}
+
+static const struct phylink_mac_ops alt_tse_phylink_ops = {
+ .validate = phylink_generic_validate,
+ .mac_an_restart = alt_tse_mac_an_restart,
+ .mac_config = alt_tse_mac_config,
+ .mac_link_down = alt_tse_mac_link_down,
+ .mac_link_up = alt_tse_mac_link_up,
+ .mac_select_pcs = alt_tse_select_pcs,
+};
+
static int request_and_map(struct platform_device *pdev, const char *name,
struct resource **res, void __iomem **ptr)
{
- struct resource *region;
struct device *device = &pdev->dev;
+ struct resource *region;
*res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
if (*res == NULL) {
@@ -1350,13 +1136,15 @@ static int request_and_map(struct platform_device *pdev, const char *name,
*/
static int altera_tse_probe(struct platform_device *pdev)
{
- struct net_device *ndev;
- int ret = -ENODEV;
+ const struct of_device_id *of_id = NULL;
+ struct altera_tse_private *priv;
struct resource *control_port;
struct resource *dma_res;
- struct altera_tse_private *priv;
+ struct resource *pcs_res;
+ struct net_device *ndev;
void __iomem *descmap;
- const struct of_device_id *of_id = NULL;
+ int pcs_reg_width = 2;
+ int ret = -ENODEV;
ndev = alloc_etherdev(sizeof(struct altera_tse_private));
if (!ndev) {
@@ -1467,6 +1255,17 @@ static int altera_tse_probe(struct platform_device *pdev)
if (ret)
goto err_free_netdev;
+ /* SGMII PCS address space. The location can vary depending on how the
+ * IP is integrated. We can have a resource dedicated to it at a specific
+ * address space, but if it's not the case, we fallback to the mdiophy0
+ * from the MAC's address space
+ */
+ ret = request_and_map(pdev, "pcs", &pcs_res,
+ &priv->pcs_base);
+ if (ret) {
+ priv->pcs_base = priv->mac_dev + tse_csroffs(mdio_phy0);
+ pcs_reg_width = 4;
+ }
/* Rx IRQ */
priv->rx_irq = platform_get_irq_byname(pdev, "rx_irq");
@@ -1566,7 +1365,7 @@ static int altera_tse_probe(struct platform_device *pdev)
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
/* setup NAPI interface */
- netif_napi_add(ndev, &priv->napi, tse_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, tse_poll);
spin_lock_init(&priv->mac_cfg_lock);
spin_lock_init(&priv->tx_lock);
@@ -1590,11 +1389,32 @@ static int altera_tse_probe(struct platform_device *pdev)
(unsigned long) control_port->start, priv->rx_irq,
priv->tx_irq);
- ret = init_phy(ndev);
- if (ret != 0) {
- netdev_err(ndev, "Cannot attach to PHY (error: %d)\n", ret);
+ priv->pcs = alt_tse_pcs_create(ndev, priv->pcs_base, pcs_reg_width);
+
+ priv->phylink_config.dev = &ndev->dev;
+ priv->phylink_config.type = PHYLINK_NETDEV;
+ priv->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 |
+ MAC_100 | MAC_1000FD;
+
+ phy_interface_set_rgmii(priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ priv->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ priv->phylink_config.supported_interfaces);
+
+ priv->phylink = phylink_create(&priv->phylink_config,
+ of_fwnode_handle(priv->device->of_node),
+ priv->phy_iface, &alt_tse_phylink_ops);
+ if (IS_ERR(priv->phylink)) {
+ dev_err(&pdev->dev, "failed to create phylink\n");
+ ret = PTR_ERR(priv->phylink);
goto err_init_phy;
}
+
return 0;
err_init_phy:
@@ -1614,16 +1434,10 @@ static int altera_tse_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct altera_tse_private *priv = netdev_priv(ndev);
- if (ndev->phydev) {
- phy_disconnect(ndev->phydev);
-
- if (of_phy_is_fixed_link(priv->device->of_node))
- of_phy_deregister_fixed_link(priv->device->of_node);
- }
-
platform_set_drvdata(pdev, NULL);
altera_tse_mdio_destroy(ndev);
unregister_netdev(ndev);
+ phylink_destroy(priv->phylink);
free_netdev(ndev);
return 0;
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index 39242c5a1729..98d6386b7f39 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -462,8 +462,8 @@ static void ena_get_drvinfo(struct net_device *dev,
{
struct ena_adapter *adapter = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 6a356a6cee15..d350eeec8bad 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -2265,10 +2265,8 @@ static void ena_init_napi_in_range(struct ena_adapter *adapter,
for (i = first_index; i < first_index + count; i++) {
struct ena_napi *napi = &adapter->ena_napi[i];
- netif_napi_add(adapter->netdev,
- &napi->napi,
- ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(adapter->netdev, &napi->napi,
+ ENA_IS_XDP_INDEX(adapter, i) ? ena_xdp_io_poll : ena_io_poll);
if (!ENA_IS_XDP_INDEX(adapter, i)) {
napi->rx_ring = &adapter->rx_ring[i];
@@ -3166,7 +3164,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
host_info->bdf = (pdev->bus->number << 8) | pdev->devfn;
host_info->os_type = ENA_ADMIN_OS_LINUX;
host_info->kernel_ver = LINUX_VERSION_CODE;
- strlcpy(host_info->kernel_ver_str, utsname()->version,
+ strscpy(host_info->kernel_ver_str, utsname()->version,
sizeof(host_info->kernel_ver_str) - 1);
host_info->os_dist = 0;
strncpy(host_info->os_dist_str, utsname()->release,
diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c
index 3a351d3396bf..68983b717145 100644
--- a/drivers/net/ethernet/amd/a2065.c
+++ b/drivers/net/ethernet/amd/a2065.c
@@ -695,7 +695,7 @@ static int a2065_init_one(struct zorro_dev *z,
}
dev = alloc_etherdev(sizeof(struct lance_private));
- if (dev == NULL) {
+ if (!dev) {
release_mem_region(base_addr, sizeof(struct lance_regs));
release_mem_region(mem_start, A2065_RAM_SIZE);
return -ENOMEM;
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 5d1baa01360f..ea6cfc2095e1 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -43,7 +43,7 @@ Revision History:
3.0.4 12/09/2003
1. Added set_mac_address routine for bonding driver support.
2. Tested the driver for bonding support
- 3. Bug fix: Fixed mismach in actual receive buffer lenth and lenth
+ 3. Bug fix: Fixed mismach in actual receive buffer length and length
indicated to the h/w.
4. Modified amd8111e_rx() routine to receive all the received packets
in the first interrupt.
@@ -185,24 +185,23 @@ static void amd8111e_set_ext_phy(struct net_device *dev)
advert = amd8111e_mdio_read(dev, lp->ext_phy_addr, MII_ADVERTISE);
tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
switch (lp->ext_phy_option) {
-
- default:
- case SPEED_AUTONEG: /* advertise all values */
- tmp |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
- ADVERTISE_100HALF | ADVERTISE_100FULL);
- break;
- case SPEED10_HALF:
- tmp |= ADVERTISE_10HALF;
- break;
- case SPEED10_FULL:
- tmp |= ADVERTISE_10FULL;
- break;
- case SPEED100_HALF:
- tmp |= ADVERTISE_100HALF;
- break;
- case SPEED100_FULL:
- tmp |= ADVERTISE_100FULL;
- break;
+ default:
+ case SPEED_AUTONEG: /* advertise all values */
+ tmp |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
+ ADVERTISE_100HALF | ADVERTISE_100FULL);
+ break;
+ case SPEED10_HALF:
+ tmp |= ADVERTISE_10HALF;
+ break;
+ case SPEED10_FULL:
+ tmp |= ADVERTISE_10FULL;
+ break;
+ case SPEED100_HALF:
+ tmp |= ADVERTISE_100HALF;
+ break;
+ case SPEED100_FULL:
+ tmp |= ADVERTISE_100FULL;
+ break;
}
if(advert != tmp)
@@ -237,7 +236,7 @@ static int amd8111e_free_skbs(struct net_device *dev)
/* Freeing previously allocated receive buffers */
for (i = 0; i < NUM_RX_BUFFERS; i++) {
rx_skbuff = lp->rx_skbuff[i];
- if (rx_skbuff != NULL) {
+ if (rx_skbuff) {
dma_unmap_single(&lp->pci_dev->dev,
lp->rx_dma_addr[i],
lp->rx_buff_len - 2, DMA_FROM_DEVICE);
@@ -1084,7 +1083,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
unsigned int intr0, intren0;
unsigned int handled = 1;
- if (unlikely(dev == NULL))
+ if (unlikely(!dev))
return IRQ_NONE;
spin_lock(&lp->lock);
@@ -1109,7 +1108,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
/* Check if Receive Interrupt has occurred. */
if (intr0 & RINT0) {
if (napi_schedule_prep(&lp->napi)) {
- /* Disable receive interupts */
+ /* Disable receive interrupts */
writel(RINTEN0, mmio + INTEN0);
/* Schedule a polling routine */
__napi_schedule(&lp->napi);
@@ -1364,10 +1363,10 @@ static void amd8111e_get_drvinfo(struct net_device *dev,
{
struct amd8111e_priv *lp = netdev_priv(dev);
struct pci_dev *pci_dev = lp->pci_dev;
- strlcpy(info->driver, MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, MODULE_NAME, sizeof(info->driver));
snprintf(info->fw_version, sizeof(info->fw_version),
"%u", chip_version);
- strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static int amd8111e_get_regs_len(struct net_device *dev)
@@ -1554,7 +1553,7 @@ static int amd8111e_enable_magicpkt(struct amd8111e_priv *lp)
static int amd8111e_enable_link_change(struct amd8111e_priv *lp)
{
- /* Adapter is already stoped/suspended/interrupt-disabled */
+ /* Adapter is already stopped/suspended/interrupt-disabled */
writel(VAL0 | LCMODE_SW, lp->mmio + CMD7);
/* To eliminate PCI posting bug */
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 37da79da5f5e..9d570adb295b 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -600,7 +600,7 @@ typedef enum {
#define CSTATE 1
#define SSTATE 2
-/* Assume contoller gets data 10 times the maximum processing time */
+/* Assume controller gets data 10 times the maximum processing time */
#define REPEAT_CNT 10
/* amd8111e descriptor flag definitions */
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index 4ea7b9f3c424..38153e633231 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -193,7 +193,7 @@ static int ariadne_rx(struct net_device *dev)
struct sk_buff *skb;
skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb == NULL) {
+ if (!skb) {
for (i = 0; i < RX_RING_SIZE; i++)
if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN)
break;
@@ -731,7 +731,7 @@ static int ariadne_init_one(struct zorro_dev *z,
}
dev = alloc_etherdev(sizeof(struct ariadne_private));
- if (dev == NULL) {
+ if (!dev) {
release_mem_region(base_addr, sizeof(struct Am79C960));
release_mem_region(mem_start, ARIADNE_RAM_SIZE);
return -ENOMEM;
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 27869164c6e6..3222c48ce6ae 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -581,15 +581,15 @@ static unsigned long __init lance_probe1( struct net_device *dev,
/* Get the ethernet address */
switch( lp->cardtype ) {
- case OLD_RIEBL:
+ case OLD_RIEBL:
/* No ethernet address! (Set some default address) */
eth_hw_addr_set(dev, OldRieblDefHwaddr);
break;
- case NEW_RIEBL:
+ case NEW_RIEBL:
lp->memcpy_f(addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
eth_hw_addr_set(dev, addr);
break;
- case PAM_CARD:
+ case PAM_CARD:
i = IO->eeprom;
for( i = 0; i < 6; ++i )
addr[i] =
@@ -854,7 +854,7 @@ static irqreturn_t lance_interrupt( int irq, void *dev_id )
int csr0, boguscnt = 10;
int handled = 0;
- if (dev == NULL) {
+ if (!dev) {
DPRINTK( 1, ( "lance_interrupt(): interrupt for unknown device.\n" ));
return IRQ_NONE;
}
@@ -995,7 +995,7 @@ static int lance_rx( struct net_device *dev )
}
else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb == NULL) {
+ if (!skb) {
for( i = 0; i < RX_RING_SIZE; i++ )
if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag &
RMD1_OWN_CHIP)
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index d5f2c6989221..c5cec4e79489 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -650,7 +650,7 @@ au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct au1000_private *aup = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info), "%s %d", DRV_NAME,
aup->mac_id);
}
@@ -786,7 +786,7 @@ static int au1000_rx(struct net_device *dev)
frmlen = (status & RX_FRAME_LEN_MASK);
frmlen -= 4; /* Remove FCS */
skb = netdev_alloc_skb(dev, frmlen + 2);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
continue;
}
@@ -1199,7 +1199,7 @@ static int au1000_probe(struct platform_device *pdev)
}
aup->mii_bus = mdiobus_alloc();
- if (aup->mii_bus == NULL) {
+ if (!aup->mii_bus) {
dev_err(&pdev->dev, "failed to allocate mdiobus structure\n");
err = -ENOMEM;
goto err_mdiobus_alloc;
@@ -1284,7 +1284,7 @@ static int au1000_probe(struct platform_device *pdev)
return 0;
err_out:
- if (aup->mii_bus != NULL)
+ if (aup->mii_bus)
mdiobus_unregister(aup->mii_bus);
/* here we should have a valid dev plus aup-> register addresses
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 462016666752..fb8686214a32 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -880,7 +880,7 @@ lance_init_ring(struct net_device *dev, gfp_t gfp)
rx_buff = skb->data;
else
rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
- if (rx_buff == NULL)
+ if (!rx_buff)
lp->rx_ring[i].base = 0;
else
lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
@@ -1186,7 +1186,7 @@ lance_rx(struct net_device *dev)
else
{
skb = dev_alloc_skb(pkt_len+2);
- if (skb == NULL)
+ if (!skb)
{
printk("%s: Memory squeeze, deferring packet.\n", dev->name);
for (i=0; i < RX_RING_SIZE; i++)
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 30ee5329bd7c..823a329a921f 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -485,10 +485,10 @@ static int mace_read(mace_private *lp, unsigned int ioaddr, int reg)
unsigned long flags;
switch (reg >> 4) {
- case 0: /* register 0-15 */
+ case 0: /* register 0-15 */
data = inb(ioaddr + AM2150_MACE_BASE + reg);
break;
- case 1: /* register 16-31 */
+ case 1: /* register 16-31 */
spin_lock_irqsave(&lp->bank_lock, flags);
MACEBANK(1);
data = inb(ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
@@ -512,10 +512,10 @@ static void mace_write(mace_private *lp, unsigned int ioaddr, int reg,
unsigned long flags;
switch (reg >> 4) {
- case 0: /* register 0-15 */
+ case 0: /* register 0-15 */
outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + reg);
break;
- case 1: /* register 16-31 */
+ case 1: /* register 16-31 */
spin_lock_irqsave(&lp->bank_lock, flags);
MACEBANK(1);
outb(data & 0xFF, ioaddr + AM2150_MACE_BASE + (reg & 0x0F));
@@ -567,13 +567,13 @@ static int mace_init(mace_private *lp, unsigned int ioaddr,
* Or just set ASEL in PHYCC below!
*/
switch (if_port) {
- case 1:
+ case 1:
mace_write(lp, ioaddr, MACE_PLSCC, 0x02);
break;
- case 2:
+ case 2:
mace_write(lp, ioaddr, MACE_PLSCC, 0x00);
break;
- default:
+ default:
mace_write(lp, ioaddr, MACE_PHYCC, /* ASEL */ 4);
/* ASEL Auto Select. When set, the PORTSEL[1-0] bits are overridden,
and the MACE device will automatically select the operating media
@@ -815,7 +815,7 @@ static int mace_close(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
snprintf(info->bus_info, sizeof(info->bus_info),
"PCMCIA 0x%lx", dev->base_addr);
}
@@ -918,7 +918,7 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
int status;
int IntrCnt = MACE_MAX_IR_ITERATIONS;
- if (dev == NULL) {
+ if (!dev) {
pr_debug("mace_interrupt(): irq 0x%X for unknown device.\n",
irq);
return IRQ_NONE;
@@ -1102,7 +1102,7 @@ static int mace_rx(struct net_device *dev, unsigned char RxCnt)
skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb != NULL) {
+ if (skb) {
skb_reserve(skb, 2);
insw(ioaddr + AM2150_RCV, skb_put(skb, pkt_len), pkt_len>>1);
if (pkt_len & 1)
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index b5ff47283cfe..72db9f9e7bee 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -488,7 +488,7 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_tx_head) * entries,
&new_ring_dma_addr, GFP_ATOMIC);
- if (new_tx_ring == NULL)
+ if (!new_tx_ring)
return;
new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
@@ -547,7 +547,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_rx_head) * entries,
&new_ring_dma_addr, GFP_ATOMIC);
- if (new_rx_ring == NULL)
+ if (!new_rx_ring)
return;
new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
@@ -797,9 +797,9 @@ static void pcnet32_get_drvinfo(struct net_device *dev,
{
struct pcnet32_private *lp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
if (lp->pci_dev)
- strlcpy(info->bus_info, pci_name(lp->pci_dev),
+ strscpy(info->bus_info, pci_name(lp->pci_dev),
sizeof(info->bus_info));
else
snprintf(info->bus_info, sizeof(info->bus_info),
@@ -1249,7 +1249,7 @@ static void pcnet32_rx_entry(struct net_device *dev,
} else
skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
return;
}
@@ -2018,7 +2018,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
lp->tx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_tx_head) * lp->tx_ring_size,
&lp->tx_ring_dma_addr, GFP_KERNEL);
- if (lp->tx_ring == NULL) {
+ if (!lp->tx_ring) {
netif_err(lp, drv, dev, "Coherent memory allocation failed\n");
return -ENOMEM;
}
@@ -2026,7 +2026,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
lp->rx_ring = dma_alloc_coherent(&lp->pci_dev->dev,
sizeof(struct pcnet32_rx_head) * lp->rx_ring_size,
&lp->rx_ring_dma_addr, GFP_KERNEL);
- if (lp->rx_ring == NULL) {
+ if (!lp->rx_ring) {
netif_err(lp, drv, dev, "Coherent memory allocation failed\n");
return -ENOMEM;
}
@@ -2365,7 +2365,7 @@ static int pcnet32_init_ring(struct net_device *dev)
for (i = 0; i < lp->rx_ring_size; i++) {
struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
- if (rx_skbuff == NULL) {
+ if (!rx_skbuff) {
lp->rx_skbuff[i] = netdev_alloc_skb(dev, PKT_BUF_SKB);
rx_skbuff = lp->rx_skbuff[i];
if (!rx_skbuff) {
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 007bd7787291..246f34c43765 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -341,7 +341,7 @@ static int __init lance_probe( struct net_device *dev)
/* XXX - leak? */
MEM = dvma_malloc_align(sizeof(struct lance_memory), 0x10000);
- if (MEM == NULL) {
+ if (!MEM) {
#ifdef CONFIG_SUN3
iounmap((void __iomem *)ioaddr);
#endif
@@ -796,7 +796,7 @@ static int lance_rx( struct net_device *dev )
}
else {
skb = netdev_alloc_skb(dev, pkt_len + 2);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
head->msg_length = 0;
head->flag |= RMD1_OWN_CHIP;
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 22d609563af8..68ca1225eedc 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -530,7 +530,7 @@ static void lance_rx_dvma(struct net_device *dev)
len = (rd->mblength & 0xfff) - 4;
skb = netdev_alloc_skb(dev, len + 2);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
rd->mblength = 0;
rd->rmd1_bits = LE_R1_OWN;
@@ -700,7 +700,7 @@ static void lance_rx_pio(struct net_device *dev)
len = (sbus_readw(&rd->mblength) & 0xfff) - 4;
skb = netdev_alloc_skb(dev, len + 2);
- if (skb == NULL) {
+ if (!skb) {
dev->stats.rx_dropped++;
sbus_writew(0, &rd->mblength);
sbus_writeb(LE_R1_OWN, &rd->rmd1_bits);
@@ -1276,7 +1276,7 @@ static void lance_free_hwresources(struct lance_private *lp)
/* Ethtool support... */
static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "sunlance", sizeof(info->driver));
+ strscpy(info->driver, "sunlance", sizeof(info->driver));
}
static const struct ethtool_ops sparc_lance_ethtool_ops = {
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index f342bb853189..7b666106feee 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -952,14 +952,14 @@ static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
channel = pdata->channel[i];
if (add)
netif_napi_add(pdata->netdev, &channel->napi,
- xgbe_one_poll, NAPI_POLL_WEIGHT);
+ xgbe_one_poll);
napi_enable(&channel->napi);
}
} else {
if (add)
netif_napi_add(pdata->netdev, &pdata->napi,
- xgbe_all_poll, NAPI_POLL_WEIGHT);
+ xgbe_all_poll);
napi_enable(&pdata->napi);
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index 6ceb1cdf6eba..6e83ff59172a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -402,8 +402,8 @@ static void xgbe_get_drvinfo(struct net_device *netdev,
struct xgbe_prv_data *pdata = netdev_priv(netdev);
struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
- strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
+ strscpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, dev_name(pdata->dev),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
XGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER),
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index d022b6db9e06..379d19d18dbe 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -672,7 +672,7 @@ static int xge_probe(struct platform_device *pdev)
if (ret)
goto err;
- netif_napi_add(ndev, &pdata->napi, xge_napi, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &pdata->napi, xge_napi);
ret = register_netdev(ndev);
if (ret) {
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 53dc8d5fede8..d6cfea65a714 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1977,14 +1977,12 @@ static void xgene_enet_napi_add(struct xgene_enet_pdata *pdata)
for (i = 0; i < pdata->rxq_cnt; i++) {
napi = &pdata->rx_ring[i]->napi;
- netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(pdata->ndev, napi, xgene_enet_napi);
}
for (i = 0; i < pdata->cq_cnt; i++) {
napi = &pdata->tx_ring[i]->cp_ring->napi;
- netif_napi_add(pdata->ndev, napi, xgene_enet_napi,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(pdata->ndev, napi, xgene_enet_napi);
}
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index 1daecd483b8d..a08f221e30d4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -238,7 +238,7 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev,
"%u.%u.%u", firmware_version >> 24,
(firmware_version >> 16) & 0xFFU, firmware_version & 0xFFFFU);
- strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
+ strscpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
sizeof(drvinfo->bus_info));
drvinfo->n_stats = aq_ethtool_n_stats(ndev);
drvinfo->testinfo_len = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
index 02058fe79f52..3d0e16791e1c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
@@ -292,9 +292,6 @@ static int aq_mdo_dev_open(struct macsec_context *ctx)
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
int ret = 0;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev))
ret = aq_apply_secy_cfg(nic, ctx->secy);
@@ -306,9 +303,6 @@ static int aq_mdo_dev_stop(struct macsec_context *ctx)
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
int i;
- if (ctx->prepare)
- return 0;
-
for (i = 0; i < AQ_MACSEC_MAX_SC; i++) {
if (nic->macsec_cfg->txsc_idx_busy & BIT(i))
aq_clear_secy(nic, nic->macsec_cfg->aq_txsc[i].sw_secy,
@@ -466,9 +460,6 @@ static int aq_mdo_add_secy(struct macsec_context *ctx)
if (txsc_idx == AQ_MACSEC_MAX_SC)
return -ENOSPC;
- if (ctx->prepare)
- return 0;
-
cfg->sc_sa = sc_sa;
cfg->aq_txsc[txsc_idx].hw_sc_idx = aq_to_hw_sc_idx(txsc_idx, sc_sa);
cfg->aq_txsc[txsc_idx].sw_secy = secy;
@@ -492,9 +483,6 @@ static int aq_mdo_upd_secy(struct macsec_context *ctx)
if (txsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_set_txsc(nic, txsc_idx);
@@ -543,9 +531,6 @@ static int aq_mdo_del_secy(struct macsec_context *ctx)
struct aq_nic_s *nic = netdev_priv(ctx->netdev);
int ret = 0;
- if (ctx->prepare)
- return 0;
-
if (!nic->macsec_cfg)
return 0;
@@ -601,9 +586,6 @@ static int aq_mdo_add_txsa(struct macsec_context *ctx)
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &cfg->aq_txsc[txsc_idx];
set_bit(ctx->sa.assoc_num, &aq_txsc->tx_sa_idx_busy);
@@ -631,9 +613,6 @@ static int aq_mdo_upd_txsa(struct macsec_context *ctx)
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &cfg->aq_txsc[txsc_idx];
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_update_txsa(nic, aq_txsc->hw_sc_idx, secy,
@@ -681,9 +660,6 @@ static int aq_mdo_del_txsa(struct macsec_context *ctx)
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
ret = aq_clear_txsa(nic, &cfg->aq_txsc[txsc_idx], ctx->sa.assoc_num,
AQ_CLEAR_ALL);
@@ -780,9 +756,6 @@ static int aq_mdo_add_rxsc(struct macsec_context *ctx)
if (rxsc_idx >= rxsc_idx_max)
return -ENOSPC;
- if (ctx->prepare)
- return 0;
-
cfg->aq_rxsc[rxsc_idx].hw_sc_idx = aq_to_hw_sc_idx(rxsc_idx,
cfg->sc_sa);
cfg->aq_rxsc[rxsc_idx].sw_secy = ctx->secy;
@@ -809,9 +782,6 @@ static int aq_mdo_upd_rxsc(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev) && netif_running(ctx->secy->netdev))
ret = aq_set_rxsc(nic, rxsc_idx);
@@ -876,9 +846,6 @@ static int aq_mdo_del_rxsc(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev))
clear_type = AQ_CLEAR_ALL;
@@ -948,9 +915,6 @@ static int aq_mdo_add_rxsa(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_rxsc = &nic->macsec_cfg->aq_rxsc[rxsc_idx];
set_bit(ctx->sa.assoc_num, &aq_rxsc->rx_sa_idx_busy);
@@ -978,9 +942,6 @@ static int aq_mdo_upd_rxsa(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
if (netif_carrier_ok(nic->ndev) && netif_running(secy->netdev))
ret = aq_update_rxsa(nic, cfg->aq_rxsc[rxsc_idx].hw_sc_idx,
secy, ctx->sa.rx_sa, NULL,
@@ -1029,9 +990,6 @@ static int aq_mdo_del_rxsa(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
ret = aq_clear_rxsa(nic, &cfg->aq_rxsc[rxsc_idx], ctx->sa.assoc_num,
AQ_CLEAR_ALL);
@@ -1044,9 +1002,6 @@ static int aq_mdo_get_dev_stats(struct macsec_context *ctx)
struct aq_macsec_common_stats *stats = &nic->macsec_cfg->stats;
struct aq_hw_s *hw = nic->aq_hw;
- if (ctx->prepare)
- return 0;
-
aq_get_macsec_common_stats(hw, stats);
ctx->stats.dev_stats->OutPktsUntagged = stats->out.untagged_pkts;
@@ -1073,9 +1028,6 @@ static int aq_mdo_get_tx_sc_stats(struct macsec_context *ctx)
if (txsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &nic->macsec_cfg->aq_txsc[txsc_idx];
stats = &aq_txsc->stats;
aq_get_txsc_stats(hw, aq_txsc->hw_sc_idx, stats);
@@ -1106,9 +1058,6 @@ static int aq_mdo_get_tx_sa_stats(struct macsec_context *ctx)
if (txsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_txsc = &cfg->aq_txsc[txsc_idx];
sa_idx = aq_txsc->hw_sc_idx | ctx->sa.assoc_num;
stats = &aq_txsc->tx_sa_stats[ctx->sa.assoc_num];
@@ -1147,9 +1096,6 @@ static int aq_mdo_get_rx_sc_stats(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -ENOENT;
- if (ctx->prepare)
- return 0;
-
aq_rxsc = &cfg->aq_rxsc[rxsc_idx];
for (i = 0; i < MACSEC_NUM_AN; i++) {
if (!test_bit(i, &aq_rxsc->rx_sa_idx_busy))
@@ -1196,9 +1142,6 @@ static int aq_mdo_get_rx_sa_stats(struct macsec_context *ctx)
if (rxsc_idx < 0)
return -EINVAL;
- if (ctx->prepare)
- return 0;
-
aq_rxsc = &cfg->aq_rxsc[rxsc_idx];
stats = &aq_rxsc->rx_sa_stats[ctx->sa.assoc_num];
sa_idx = aq_rxsc->hw_sc_idx | ctx->sa.assoc_num;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
index 275324c9e51e..80b44043e6c5 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ptp.c
@@ -1217,8 +1217,7 @@ int aq_ptp_init(struct aq_nic_s *aq_nic, unsigned int idx_vec)
atomic_set(&aq_ptp->offset_egress, 0);
atomic_set(&aq_ptp->offset_ingress, 0);
- netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi,
- aq_ptp_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(aq_nic_get_ndev(aq_nic), &aq_ptp->napi, aq_ptp_poll);
aq_ptp->idx_vector = idx_vec;
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
index f0fdf20f01c1..f5db1c44e9b9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -119,8 +119,7 @@ struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
self->tx_rings = 0;
self->rx_rings = 0;
- netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi,
- aq_vec_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi, aq_vec_poll);
err_exit:
return self;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 288e2961823e..ba0646b3b122 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -91,7 +91,7 @@ static void arc_emac_get_drvinfo(struct net_device *ndev,
{
struct arc_emac_priv *priv = netdev_priv(ndev);
- strlcpy(info->driver, priv->drv_name, sizeof(info->driver));
+ strscpy(info->driver, priv->drv_name, sizeof(info->driver));
}
static const struct ethtool_ops arc_emac_ethtool_ops = {
diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c
index 6ba5b024a7be..8b7cdf015a16 100644
--- a/drivers/net/ethernet/asix/ax88796c_main.c
+++ b/drivers/net/ethernet/asix/ax88796c_main.c
@@ -293,7 +293,7 @@ ax88796c_tx_fixup(struct net_device *ndev, struct sk_buff_head *q)
skb_put(skb, padlen);
/* EOP header */
- memcpy(skb_put(skb, TX_EOP_SIZE), &info.eop, TX_EOP_SIZE);
+ skb_put_data(skb, &info.eop, TX_EOP_SIZE);
skb_unlink(skb, q);
@@ -381,7 +381,7 @@ static int ax88796c_hard_xmit(struct ax88796c_device *ax_local)
return 1;
}
-static int
+static netdev_tx_t
ax88796c_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index e461f4764066..cc932b3cf873 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -451,8 +451,8 @@ static void ag71xx_get_drvinfo(struct net_device *ndev,
{
struct ag71xx *ag = netdev_priv(ndev);
- strlcpy(info->driver, "ag71xx", sizeof(info->driver));
- strlcpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node),
+ strscpy(info->driver, "ag71xx", sizeof(info->driver));
+ strscpy(info->bus_info, of_node_full_name(ag->pdev->dev.of_node),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index a89b93cb4e26..d30d11872719 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -752,7 +752,7 @@ static int alx_alloc_napis(struct alx_priv *alx)
goto err_out;
np->alx = alx;
- netif_napi_add(alx->dev, &np->napi, alx_poll, 64);
+ netif_napi_add(alx->dev, &np->napi, alx_poll);
alx->qnapi[i] = np;
}
@@ -1912,11 +1912,14 @@ static int alx_suspend(struct device *dev)
if (!netif_running(alx->dev))
return 0;
+
+ rtnl_lock();
netif_device_detach(alx->dev);
mutex_lock(&alx->mtx);
__alx_stop(alx);
mutex_unlock(&alx->mtx);
+ rtnl_unlock();
return 0;
}
@@ -1927,6 +1930,7 @@ static int alx_resume(struct device *dev)
struct alx_hw *hw = &alx->hw;
int err;
+ rtnl_lock();
mutex_lock(&alx->mtx);
alx_reset_phy(hw);
@@ -1943,6 +1947,7 @@ static int alx_resume(struct device *dev)
unlock:
mutex_unlock(&alx->mtx);
+ rtnl_unlock();
return err;
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index e2eb7b8c63a0..0bce122c68f1 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -220,8 +220,8 @@ static void atl1c_get_drvinfo(struct net_device *netdev,
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index be4b1f8eef29..40c781695d58 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -2732,7 +2732,7 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_set_threaded(netdev, true);
for (i = 0; i < adapter->rx_queue_count; ++i)
netif_napi_add(netdev, &adapter->rrd_ring[i].napi,
- atl1c_clean_rx, 64);
+ atl1c_clean_rx);
for (i = 0; i < adapter->tx_queue_count; ++i)
netif_napi_add_tx(netdev, &adapter->tpd_ring[i].napi,
atl1c_clean_tx);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 0cbde352d1ba..68f1832a198d 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -306,9 +306,9 @@ static void atl1e_get_drvinfo(struct net_device *netdev,
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 57a51fb7746c..5db0f3495a32 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -2354,7 +2354,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->mii.phy_id_mask = 0x1f;
adapter->mii.reg_num_mask = MDIO_REG_ADDR_MASK;
- netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
+ netif_napi_add(netdev, &adapter->napi, atl1e_clean);
timer_setup(&adapter->phy_config_timer, atl1e_phy_config, 0);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index ff1fe09abf9f..c8444bcdf527 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2977,7 +2977,7 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &atl1_netdev_ops;
netdev->watchdog_timeo = 5 * HZ;
- netif_napi_add(netdev, &adapter->napi, atl1_rings_clean, 64);
+ netif_napi_add(netdev, &adapter->napi, atl1_rings_clean);
netdev->ethtool_ops = &atl1_ethtool_ops;
adapter->bd_number = cards_found;
@@ -3340,8 +3340,8 @@ static void atl1_get_drvinfo(struct net_device *netdev,
{
struct atl1_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index bbc4d7b08a49..1b487c071cb6 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1980,9 +1980,9 @@ static void atl2_get_drvinfo(struct net_device *netdev,
{
struct atl2_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index e5857e88c207..7f876721596c 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1790,13 +1790,13 @@ static void b44_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *inf
struct b44 *bp = netdev_priv(dev);
struct ssb_bus *bus = bp->sdev->bus;
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
switch (bus->bustype) {
case SSB_BUSTYPE_PCI:
- strlcpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(bus->host_pci), sizeof(info->bus_info));
break;
case SSB_BUSTYPE_SSB:
- strlcpy(info->bus_info, "SSB", sizeof(info->bus_info));
+ strscpy(info->bus_info, "SSB", sizeof(info->bus_info));
break;
case SSB_BUSTYPE_PCMCIA:
case SSB_BUSTYPE_SDIO:
@@ -2375,7 +2375,7 @@ static int b44_init_one(struct ssb_device *sdev,
bp->tx_pending = B44_DEF_TX_RING_PENDING;
dev->netdev_ops = &b44_netdev_ops;
- netif_napi_add(dev, &bp->napi, b44_poll, 64);
+ netif_napi_add(dev, &bp->napi, b44_poll);
dev->watchdog_timeo = B44_TX_TIMEOUT;
dev->min_mtu = B44_MIN_MTU;
dev->max_mtu = B44_MAX_MTU;
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index c131d8118489..93ccf549e2ed 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -507,7 +507,7 @@ static int bcm4908_enet_stop(struct net_device *netdev)
return 0;
}
-static int bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct bcm4908_enet *enet = netdev_priv(netdev);
struct bcm4908_enet_dma_ring *ring = &enet->tx_ring;
@@ -716,6 +716,8 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
SET_NETDEV_DEV(netdev, &pdev->dev);
err = of_get_ethdev_address(dev->of_node, netdev);
+ if (err == -EPROBE_DEFER)
+ goto err_dma_free;
if (err)
eth_hw_addr_random(netdev);
netdev->netdev_ops = &bcm4908_enet_netdev_ops;
@@ -723,17 +725,20 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
netdev->mtu = ETH_DATA_LEN;
netdev->max_mtu = ENET_MTU_MAX;
netif_napi_add_tx(netdev, &enet->tx_ring.napi, bcm4908_enet_poll_tx);
- netif_napi_add(netdev, &enet->rx_ring.napi, bcm4908_enet_poll_rx, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &enet->rx_ring.napi, bcm4908_enet_poll_rx);
err = register_netdev(netdev);
- if (err) {
- bcm4908_enet_dma_free(enet);
- return err;
- }
+ if (err)
+ goto err_dma_free;
platform_set_drvdata(pdev, enet);
return 0;
+
+err_dma_free:
+ bcm4908_enet_dma_free(enet);
+
+ return err;
}
static int bcm4908_enet_remove(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 1c6aea12db72..d91fdb0c2649 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1321,8 +1321,8 @@ static const u32 unused_mib_regs[] = {
static void bcm_enet_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
+ strscpy(drvinfo->driver, bcm_enet_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
}
static int bcm_enet_get_sset_count(struct net_device *netdev,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 47fc8e6963d5..867f14c30e09 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -308,8 +308,8 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
static void bcm_sysport_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, "platform", sizeof(info->bus_info));
}
static u32 bcm_sysport_get_msglvl(struct net_device *dev)
@@ -2564,7 +2564,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, dev);
dev->ethtool_ops = &bcm_sysport_ethtool_ops;
dev->netdev_ops = &bcm_sysport_netdev_ops;
- netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
+ netif_napi_add(dev, &priv->napi, bcm_sysport_poll);
dev->features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 93580484a3f4..5fb3af5670ec 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1367,7 +1367,7 @@ static void bgmac_get_strings(struct net_device *dev, u32 stringset,
return;
for (i = 0; i < BGMAC_STATS_LEN; i++)
- strlcpy(data + i * ETH_GSTRING_LEN,
+ strscpy(data + i * ETH_GSTRING_LEN,
bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN);
}
@@ -1395,8 +1395,8 @@ static void bgmac_get_ethtool_stats(struct net_device *dev,
static void bgmac_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
- strlcpy(info->bus_info, "AXI", sizeof(info->bus_info));
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strscpy(info->bus_info, "AXI", sizeof(info->bus_info));
}
static const struct ethtool_ops bgmac_ethtool_ops = {
@@ -1527,7 +1527,7 @@ int bgmac_enet_probe(struct bgmac *bgmac)
if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0)
bgmac->int_mask &= ~BGMAC_IS_TX_MASK;
- netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(net_dev, &bgmac->napi, bgmac_poll);
err = bgmac_phy_connect(bgmac);
if (err) {
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index b97ed9b5f685..fec57f1982c8 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -176,12 +176,12 @@ static const struct flash_spec flash_table[] =
{0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
- "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
+ "Entry 0101: ST M45PE10 (128kB non-buffered)"},
/* Entry 0110: ST M45PE20 (non-buffered flash)*/
{0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
- "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
+ "Entry 0110: ST M45PE20 (256kB non-buffered)"},
/* Saifun SA25F005 (non-buffered flash) */
/* strap, cfg1, & write1 need updates */
{0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
@@ -7042,9 +7042,9 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct bnx2 *bp = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
- strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ strscpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
}
#define BNX2_REGDUMP_LEN (32 * 1024)
@@ -8522,7 +8522,7 @@ bnx2_init_napi(struct bnx2 *bp)
else
poll = bnx2_poll_msix;
- netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
+ netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll);
bnapi->bp = bp;
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 712b5595bc39..16c490692f42 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -44,8 +44,7 @@ static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
/* Add NAPI objects */
for_each_rx_queue_cnic(bp, i) {
- netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll);
}
}
@@ -55,8 +54,7 @@ static void bnx2x_add_all_napi(struct bnx2x *bp)
/* Add NAPI objects */
for_each_eth_queue(bp, i) {
- netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
- bnx2x_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll);
}
}
@@ -150,7 +148,7 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
phy_fw_ver[0] = '\0';
bnx2x_get_ext_phy_fw_version(&bp->link_params,
phy_fw_ver, PHY_FW_VER_LEN);
- strlcpy(buf, bp->fw_ver, buf_len);
+ strscpy(buf, bp->fw_ver, buf_len);
snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
"bc %d.%d.%d%s%s",
(bp->common.bc_ver & 0xff0000) >> 16,
@@ -789,6 +787,7 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
pad, len, fp->rx_buf_size);
bnx2x_panic();
+ bnx2x_frag_free(fp, new_data);
return;
}
#endif
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 0e319ac7799f..bda3ccc28eca 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1112,7 +1112,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
int ext_dev_info_offset;
u32 mbi;
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
if (SHMEM2_HAS(bp, extended_dev_info_shared_addr)) {
ext_dev_info_offset = SHMEM2_RD(bp,
@@ -1126,7 +1126,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
(mbi & 0xff000000) >> 24,
(mbi & 0x00ff0000) >> 16,
(mbi & 0x0000ff00) >> 8);
- strlcpy(info->fw_version, version,
+ strscpy(info->fw_version, version,
sizeof(info->fw_version));
}
}
@@ -1135,7 +1135,7 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
bnx2x_fill_fw_str(bp, version, ETHTOOL_FWVERS_LEN);
strlcat(info->fw_version, version, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
}
static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 962253db25b8..51b1690fd045 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -3385,7 +3385,7 @@ static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
&bp->sp_objs->mac_obj;
int i;
- strlcpy(ether_stat->version, DRV_MODULE_VERSION,
+ strscpy(ether_stat->version, DRV_MODULE_VERSION,
ETH_STAT_INFO_VERSION_LEN);
/* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 2dac704dc346..02a4e557e176 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -518,7 +518,7 @@ int bnx2x_vfpf_storm_rx_mode(struct bnx2x *bp);
static inline void bnx2x_vf_fill_fw_str(struct bnx2x *bp, char *buf,
size_t buf_len)
{
- strlcpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
+ strscpy(buf, bp->acquire_resp.pfdev_info.fw_ver, buf_len);
}
static inline int bnx2x_vf_ustorm_prods_offset(struct bnx2x *bp,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index c9129b9ba446..0657a0f5170f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -380,7 +380,7 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
bp->igu_base_sb = bp->acquire_resp.resc.hw_sbs[0].hw_sb_id;
bp->vlan_credit = bp->acquire_resp.resc.num_vlan_filters;
- strlcpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
+ strscpy(bp->fw_ver, bp->acquire_resp.pfdev_info.fw_ver,
sizeof(bp->fw_ver));
if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 96da0ba3d507..eed98c10ca9d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -9366,16 +9366,16 @@ static void bnxt_init_napi(struct bnxt *bp)
cp_nr_rings--;
for (i = 0; i < cp_nr_rings; i++) {
bnapi = bp->bnapi[i];
- netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
+ netif_napi_add(bp->dev, &bnapi->napi, poll_fn);
}
if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
bnapi = bp->bnapi[cp_nr_rings];
netif_napi_add(bp->dev, &bnapi->napi,
- bnxt_poll_nitroa0, 64);
+ bnxt_poll_nitroa0);
}
} else {
bnapi = bp->bnapi[0];
- netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
+ netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 87eb5362ad70..f57e524c7e30 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -1371,9 +1371,9 @@ static void bnxt_get_drvinfo(struct net_device *dev,
{
struct bnxt *bp = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->fw_version, bp->fw_ver_str, sizeof(info->fw_version));
+ strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = bnxt_get_num_stats(bp);
info->testinfo_len = bp->num_tests;
/* TODO CHIMP_FW: eeprom dump details */
@@ -3876,7 +3876,7 @@ void bnxt_ethtool_init(struct bnxt *bp)
} else if (i == BNXT_IRQ_TEST_IDX) {
strcpy(str, "Interrupt_test (offline)");
} else {
- strlcpy(str, fw_str, ETH_GSTRING_LEN);
+ strscpy(str, fw_str, ETH_GSTRING_LEN);
strncat(str, " test", ETH_GSTRING_LEN - strlen(str));
if (test_info->offline_mask & (1 << i))
strncat(str, " (offline)",
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index 8e316367f6ce..2132ce63193c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -505,9 +505,13 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
ptp->tstamp_filters = flags;
if (netif_running(bp->dev)) {
- rc = bnxt_close_nic(bp, false, false);
- if (!rc)
- rc = bnxt_open_nic(bp, false, false);
+ if (ptp->rx_filter == HWTSTAMP_FILTER_ALL) {
+ rc = bnxt_close_nic(bp, false, false);
+ if (!rc)
+ rc = bnxt_open_nic(bp, false, false);
+ } else {
+ bnxt_ptp_cfg_tstamp_filters(bp);
+ }
if (!rc && !ptp->tstamp_filters)
rc = -EIO;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index eb4803b11c0e..fcc65890820a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -222,7 +222,7 @@ static int bnxt_vf_rep_get_phys_port_name(struct net_device *dev, char *buf,
static void bnxt_vf_rep_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
}
static int bnxt_vf_rep_get_port_parent_id(struct net_device *dev,
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 8309fb993cdb..25c450606985 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1146,7 +1146,7 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
static void bcmgenet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
+ strscpy(info->driver, "bcmgenet", sizeof(info->driver));
}
static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
@@ -2707,8 +2707,7 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
bcmgenet_init_rx_coalesce(ring);
/* Initialize Rx NAPI */
- netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll);
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 89889d8150da..4179a12fc881 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7380,9 +7380,9 @@ static void tg3_napi_init(struct tg3 *tp)
{
int i;
- netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
+ netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll);
for (i = 1; i < tp->irq_cnt; i++)
- netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
+ netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix);
}
static void tg3_napi_fini(struct tg3 *tp)
@@ -12302,9 +12302,9 @@ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct tg3 *tp = netdev_priv(dev);
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
- strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
- strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
+ strscpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
}
static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 29dd0f93d6c0..d6d90f9722a7 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1891,7 +1891,7 @@ bnad_napi_add(struct bnad *bnad, u32 rx_id)
for (i = 0; i < bnad->num_rxp_per_rx; i++) {
rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
netif_napi_add(bnad->netdev, &rx_ctrl->napi,
- bnad_napi_poll_rx, NAPI_POLL_WEIGHT);
+ bnad_napi_poll_rx);
}
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 8aca768571b2..df10edff5603 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -114,7 +114,7 @@ static const char *bnad_net_stats_strings[] = {
"mac_tx_deferral",
"mac_tx_excessive_deferral",
"mac_tx_single_collision",
- "mac_tx_muliple_collision",
+ "mac_tx_multiple_collision",
"mac_tx_late_collision",
"mac_tx_excessive_collision",
"mac_tx_total_collision",
@@ -283,7 +283,7 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
struct bfa_ioc_attr *ioc_attr;
unsigned long flags;
- strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
if (ioc_attr) {
@@ -291,12 +291,12 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- strlcpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
+ strscpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
sizeof(drvinfo->fw_version));
kfree(ioc_attr);
}
- strlcpy(drvinfo->bus_info, pci_name(bnad->pcidev),
+ strscpy(drvinfo->bus_info, pci_name(bnad->pcidev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index a2897549f9c4..51c9fd6f68a4 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -38,6 +38,7 @@
#include
#include
#include
+#include
#include "macb.h"
/* This structure is only used for MACB on SiFive FU540 devices */
@@ -3977,8 +3978,8 @@ static int macb_init(struct platform_device *pdev)
queue = &bp->queues[q];
queue->bp = bp;
spin_lock_init(&queue->tx_ptr_lock);
- netif_napi_add(dev, &queue->napi_rx, macb_rx_poll, NAPI_POLL_WEIGHT);
- netif_napi_add(dev, &queue->napi_tx, macb_tx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &queue->napi_rx, macb_rx_poll);
+ netif_napi_add(dev, &queue->napi_tx, macb_tx_poll);
if (hw_q) {
queue->ISR = GEM_ISR(hw_q - 1);
queue->IER = GEM_IER(hw_q - 1);
@@ -4621,6 +4622,25 @@ static int init_reset_optional(struct platform_device *pdev)
"failed to init SGMII PHY\n");
}
+ ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
+ if (!ret) {
+ u32 pm_info[2];
+
+ ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
+ pm_info, ARRAY_SIZE(pm_info));
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to read power management information\n");
+ goto err_out_phy_exit;
+ }
+ ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
+ if (ret)
+ goto err_out_phy_exit;
+
+ ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
+ if (ret)
+ goto err_out_phy_exit;
+ }
+
/* Fully reset controller at hardware level if mapped in device tree */
ret = device_reset_optional(&pdev->dev);
if (ret) {
@@ -4629,6 +4649,8 @@ static int init_reset_optional(struct platform_device *pdev)
}
ret = macb_init(pdev);
+
+err_out_phy_exit:
if (ret)
phy_exit(bp->sgmii_phy);
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 1281d1565ef8..f4f87dfa9687 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1792,7 +1792,7 @@ static int xgmac_probe(struct platform_device *pdev)
netdev_warn(ndev, "MAC address %pM not valid",
ndev->dev_addr);
- netif_napi_add(ndev, &priv->napi, xgmac_poll, 64);
+ netif_napi_add(ndev, &priv->napi, xgmac_poll);
ret = register_netdev(ndev);
if (ret)
goto err_reg;
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
index 3f1c189646f4..a0fd32476225 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_regs.h
@@ -87,8 +87,8 @@
*/
#define CN23XX_SLI_PKT_IN_JABBER 0x29170
/* The input jabber is used to determine the TSO max size.
- * Due to H/W limitation, this need to be reduced to 60000
- * in order to to H/W TSO and avoid the WQE malfarmation
+ * Due to H/W limitation, this needs to be reduced to 60000
+ * in order to use H/W TSO and avoid the WQE malformation
* PKO_BUG_24989_WQE_LEN
*/
#define CN23XX_DEFAULT_INPUT_JABBER 0xEA60 /*60000*/
diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
index d33dd8f4226f..e956109415cd 100644
--- a/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
+++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_vf_regs.h
@@ -36,8 +36,8 @@
#define CN23XX_CONFIG_PCIE_FLTMSK 0x720
/* The input jabber is used to determine the TSO max size.
- * Due to H/W limitation, this need to be reduced to 60000
- * in order to to H/W TSO and avoid the WQE malfarmation
+ * Due to H/W limitation, this needs to be reduced to 60000
+ * in order to use H/W TSO and avoid the WQE malformation
* PKO_BUG_24989_WQE_LEN
*/
#define CN23XX_DEFAULT_INPUT_JABBER 0xEA60 /*60000*/
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 73cb03266549..882b2be06ea0 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -851,7 +851,7 @@ int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
napi = &droq->napi;
dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
(u64)netdev, (u64)octeon_dev);
- netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
+ netif_napi_add(netdev, napi, liquidio_napi_poll);
/* designate a CPU for this droq */
droq->cpu_id = cpu_id;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index bee35ce60171..d312bd594935 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -92,11 +92,6 @@ static int octeon_console_debug_enabled(u32 console)
/* time to wait for possible in-flight requests in milliseconds */
#define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000)
-struct lio_trusted_vf_ctx {
- struct completion complete;
- int status;
-};
-
struct oct_link_status_resp {
u64 rh;
struct oct_link_info link_info;
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 103591dcea1c..edde0b8fa49c 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1342,7 +1342,7 @@ static void octeon_mgmt_poll_controller(struct net_device *netdev)
static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
}
static int octeon_mgmt_nway_reset(struct net_device *dev)
@@ -1396,8 +1396,8 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, netdev);
p = netdev_priv(netdev);
- netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
- OCTEON_MGMT_NAPI_WEIGHT);
+ netif_napi_add_weight(netdev, &p->napi, octeon_mgmt_napi_poll,
+ OCTEON_MGMT_NAPI_WEIGHT);
p->netdev = netdev;
p->dev = &pdev->dev;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 5a9fad61e9ea..e5c71f907852 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -191,8 +191,8 @@ static void nicvf_get_drvinfo(struct net_device *netdev,
{
struct nicvf *nic = netdev_priv(netdev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
}
static u32 nicvf_get_msglevel(struct net_device *netdev)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 768ea426d49f..98f3dc460ca7 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1472,8 +1472,7 @@ int nicvf_open(struct net_device *netdev)
}
cq_poll->cq_idx = qidx;
cq_poll->nicvf = nic;
- netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &cq_poll->napi, nicvf_poll);
napi_enable(&cq_poll->napi);
nic->napi[qidx] = cq_poll;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index f4054d2553ea..d2286adf09fe 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -429,8 +429,8 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct adapter *adapter = dev->ml_priv;
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
@@ -1053,7 +1053,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
- netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
+ netif_napi_add(netdev, &adapter->napi, t1_poll);
netdev->ethtool_ops = &t1_ethtool_ops;
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 174b1e156669..a52e6b6e2876 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -609,8 +609,7 @@ static void init_napi(struct adapter *adap)
struct sge_qset *qs = &adap->sge.qs[i];
if (qs->adap)
- netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll,
- 64);
+ netif_napi_add(qs->netdev, &qs->napi, qs->napi.poll);
}
/*
@@ -1627,8 +1626,8 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
t3_get_tp_version(adapter, &tp_vers);
spin_unlock(&adapter->stats_lock);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
if (fw_vers)
snprintf(info->fw_version, sizeof(info->fw_version),
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 77897edd2bc0..8477a93cee6b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -199,8 +199,8 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
struct adapter *adapter = netdev2adap(dev);
u32 exprom_vers;
- strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
info->regdump_len = get_regs_len(dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index d0061921529f..9cbce1faab26 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3903,8 +3903,8 @@ static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
{
struct adapter *adapter = netdev2adap(dev);
- strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(adapter->pdev),
+ strscpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(adapter->pdev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index ee52e3b1d74f..46809e2d94ee 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -4467,7 +4467,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
if (ret)
goto err;
- netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
+ netif_napi_add(dev, &iq->napi, napi_rx_handler);
iq->cur_desc = iq->desc;
iq->cidx = 0;
iq->gen = 1;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index c2822e635f89..54db79f4dcfe 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1553,8 +1553,8 @@ static void cxgb4vf_get_drvinfo(struct net_device *dev,
{
struct adapter *adapter = netdev2adap(dev);
- strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
+ strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 43b2ceb6aa32..2d0cf76fb3c5 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -2336,7 +2336,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
if (ret)
goto err;
- netif_napi_add(dev, &rspq->napi, napi_rx_handler, 64);
+ netif_napi_add(dev, &rspq->napi, napi_rx_handler);
rspq->cur_desc = rspq->desc;
rspq->cidx = 0;
rspq->gen = 1;
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index ddfe9208529a..f90bfba4b303 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -1069,8 +1069,7 @@ static void chtls_pass_accept_rpl(struct sk_buff *skb,
cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry);
}
-static void inet_inherit_port(struct inet_hashinfo *hash_info,
- struct sock *lsk, struct sock *newsk)
+static void inet_inherit_port(struct sock *lsk, struct sock *newsk)
{
local_bh_disable();
__inet_inherit_port(lsk, newsk);
@@ -1240,7 +1239,7 @@ static struct sock *chtls_recv_sock(struct sock *lsk,
ipv4.sysctl_tcp_window_scaling),
tp->window_clamp);
neigh_release(n);
- inet_inherit_port(&tcp_hashinfo, lsk, newsk);
+ inet_inherit_port(lsk, newsk);
csk_set_flag(csk, CSK_CONN_INLINE);
bh_unlock_sock(newsk); /* tcp_create_openreq_child ->sk_clone_lock */
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
index 9098b3eed4da..1e55b12fee51 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_main.c
@@ -193,7 +193,7 @@ static void chtls_register_dev(struct chtls_dev *cdev)
{
struct tls_toe_device *tlsdev = &cdev->tlsdev;
- strlcpy(tlsdev->name, "chtls", TLS_TOE_DEVICE_NAME_MAX);
+ strscpy(tlsdev->name, "chtls", TLS_TOE_DEVICE_NAME_MAX);
strlcat(tlsdev->name, cdev->lldi->ports[0]->name,
TLS_TOE_DEVICE_NAME_MAX);
tlsdev->feature = chtls_inline_feature;
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 21ba6e893072..8627ab19d470 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -689,7 +689,7 @@ static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
}
static int ep93xx_get_link_ksettings(struct net_device *dev,
@@ -812,7 +812,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
ep = netdev_priv(dev);
ep->dev = dev;
SET_NETDEV_DEV(dev, &pdev->dev);
- netif_napi_add(dev, &ep->napi, ep93xx_poll, 64);
+ netif_napi_add(dev, &ep->napi, ep93xx_poll);
platform_set_drvdata(pdev, dev);
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 60d8c0fbc037..08b7cc0a1809 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -131,10 +131,10 @@ static void enic_get_drvinfo(struct net_device *netdev,
if (err == -ENOMEM)
return;
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, fw_info->fw_version,
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, fw_info->fw_version,
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
+ strscpy(drvinfo->bus_info, pci_name(enic->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 372fb7b3a282..29500d32e362 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2633,16 +2633,17 @@ static int enic_dev_init(struct enic *enic)
switch (vnic_dev_get_intr_mode(enic->vdev)) {
default:
- netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
+ netif_napi_add(netdev, &enic->napi[0], enic_poll);
break;
case VNIC_DEV_INTR_MODE_MSIX:
for (i = 0; i < enic->rq_count; i++) {
netif_napi_add(netdev, &enic->napi[i],
- enic_poll_msix_rq, NAPI_POLL_WEIGHT);
+ enic_poll_msix_rq);
}
for (i = 0; i < enic->wq_count; i++)
- netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)],
- enic_poll_msix_wq, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev,
+ &enic->napi[enic_cq_wq(enic, i)],
+ enic_poll_msix_wq);
break;
}
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 6dae768671e3..fdf10318758b 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -2471,7 +2471,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
netdev->max_mtu = 10236 - VLAN_ETH_HLEN;
port->freeq_refill = 0;
- netif_napi_add(netdev, &port->napi, gmac_napi_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &port->napi, gmac_napi_poll);
ret = of_get_mac_address(np, mac);
if (!ret) {
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 0985ab216566..b21e56de6167 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -28,8 +28,7 @@
#include
#include
#include
-#include
-#include
+#include
#include
#include
@@ -540,8 +539,8 @@ static void dm9000_get_drvinfo(struct net_device *dev,
{
struct board_info *dm = to_dm9000_board(dev);
- strlcpy(info->driver, CARDNAME, sizeof(info->driver));
- strlcpy(info->bus_info, to_platform_device(dm->dev)->name,
+ strscpy(info->driver, CARDNAME, sizeof(info->driver));
+ strscpy(info->bus_info, to_platform_device(dm->dev)->name,
sizeof(info->bus_info));
}
@@ -1012,7 +1011,7 @@ static void dm9000_send_packet(struct net_device *dev,
* Hardware start transmission.
* Send a packet to media from the upper layer.
*/
-static int
+static netdev_tx_t
dm9000_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned long flags;
@@ -1421,8 +1420,7 @@ dm9000_probe(struct platform_device *pdev)
int iosize;
int i;
u32 id_val;
- int reset_gpios;
- enum of_gpio_flags flags;
+ struct gpio_desc *reset_gpio;
struct regulator *power;
bool inv_mac_addr = false;
u8 addr[ETH_ALEN];
@@ -1442,20 +1440,24 @@ dm9000_probe(struct platform_device *pdev)
dev_dbg(dev, "regulator enabled\n");
}
- reset_gpios = of_get_named_gpio_flags(dev->of_node, "reset-gpios", 0,
- &flags);
- if (gpio_is_valid(reset_gpios)) {
- ret = devm_gpio_request_one(dev, reset_gpios, flags,
- "dm9000_reset");
+ reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ ret = PTR_ERR_OR_ZERO(reset_gpio);
+ if (ret) {
+ dev_err(dev, "failed to request reset gpio: %d\n", ret);
+ goto out_regulator_disable;
+ }
+
+ if (reset_gpio) {
+ ret = gpiod_set_consumer_name(reset_gpio, "dm9000_reset");
if (ret) {
- dev_err(dev, "failed to request reset gpio %d: %d\n",
- reset_gpios, ret);
+ dev_err(dev, "failed to set reset gpio name: %d\n",
+ ret);
goto out_regulator_disable;
}
/* According to manual PWRST# Low Period Min 1ms */
msleep(2);
- gpio_set_value(reset_gpios, 1);
+ gpiod_set_value_cansleep(reset_gpio, 0);
/* Needs 3ms to read eeprom when PWRST is deasserted */
msleep(4);
}
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index d51b3d24a0c8..cd3dc4b89518 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1606,8 +1606,8 @@ static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
{
struct de_private *de = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
}
static int de_get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 83f1727d1423..3188ba7b450f 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -1074,8 +1074,8 @@ static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
{
struct dmfe_board_info *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
static int dmfe_ethtool_set_wol(struct net_device *dev,
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index b8e46c4849ef..ecfad43df45a 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -858,8 +858,8 @@ static struct net_device_stats *tulip_get_stats(struct net_device *dev)
static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct tulip_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 77d9058431e3..ff080ab0f116 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -971,8 +971,8 @@ static void netdev_get_drvinfo(struct net_device *dev,
{
struct uli526x_board_info *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 1db19463fd46..37fba39c0056 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -1374,8 +1374,8 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index a301f7e6a440..2c67a857a42f 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -1235,8 +1235,8 @@ static void rio_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, "dl2k", sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, "dl2k", sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
static int rio_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index 8dd7bf9014ec..43def191f26f 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -1644,8 +1644,8 @@ static int check_if_running(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 92462ed87bc4..08184f20f510 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -725,8 +725,8 @@ static struct net_device_stats *dnet_get_stats(struct net_device *dev)
static void dnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, "0", sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, "0", sizeof(info->bus_info));
}
static const struct ethtool_ops dnet_ethtool_ops = {
@@ -788,7 +788,7 @@ static int dnet_probe(struct platform_device *pdev)
}
dev->netdev_ops = &dnet_netdev_ops;
- netif_napi_add(dev, &bp->napi, dnet_poll, 64);
+ netif_napi_add(dev, &bp->napi, dnet_poll);
dev->ethtool_ops = &dnet_ethtool_ops;
dev->base_addr = (unsigned long)bp->regs;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index b4f5e57d0285..08ec84cd21c0 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1878,9 +1878,9 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter)
if (!status) {
struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
- strlcpy(adapter->fw_ver, resp->firmware_version_string,
+ strscpy(adapter->fw_ver, resp->firmware_version_string,
sizeof(adapter->fw_ver));
- strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
+ strscpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
sizeof(adapter->fw_on_flash));
}
err:
@@ -2373,7 +2373,7 @@ static int lancer_cmd_write_object(struct be_adapter *adapter,
be_dws_cpu_to_le(ctxt, sizeof(req->context));
req->write_offset = cpu_to_le32(data_offset);
- strlcpy(req->object_name, obj_name, sizeof(req->object_name));
+ strscpy(req->object_name, obj_name, sizeof(req->object_name));
req->descriptor_count = cpu_to_le32(1);
req->buf_len = cpu_to_le32(data_size);
req->addr_low = cpu_to_le32((cmd->dma +
@@ -2442,9 +2442,9 @@ int be_cmd_query_sfp_info(struct be_adapter *adapter)
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
0, PAGE_DATA_LEN, page_data);
if (!status) {
- strlcpy(adapter->phy.vendor_name, page_data +
+ strscpy(adapter->phy.vendor_name, page_data +
SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
- strlcpy(adapter->phy.vendor_pn,
+ strscpy(adapter->phy.vendor_pn,
page_data + SFP_VENDOR_PN_OFFSET,
SFP_VENDOR_NAME_LEN - 1);
}
@@ -2473,7 +2473,7 @@ static int lancer_cmd_delete_object(struct be_adapter *adapter,
OPCODE_COMMON_DELETE_OBJECT,
sizeof(*req), wrb, NULL);
- strlcpy(req->object_name, obj_name, sizeof(req->object_name));
+ strscpy(req->object_name, obj_name, sizeof(req->object_name));
status = be_mcc_notify_wait(adapter);
err:
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index bd0df189d871..77edc3d9b505 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -220,15 +220,15 @@ static void be_get_drvinfo(struct net_device *netdev,
{
struct be_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
if (!memcmp(adapter->fw_ver, adapter->fw_on_flash, FW_VER_LEN))
- strlcpy(drvinfo->fw_version, adapter->fw_ver,
+ strscpy(drvinfo->fw_version, adapter->fw_ver,
sizeof(drvinfo->fw_version));
else
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%s [%s]", adapter->fw_ver, adapter->fw_on_flash);
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 414362febbb9..a92a74761546 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2982,8 +2982,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
return -ENOMEM;
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
eqo->affinity_mask);
- netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(adapter->netdev, &eqo->napi, be_poll);
}
return 0;
}
diff --git a/drivers/net/ethernet/engleder/Kconfig b/drivers/net/ethernet/engleder/Kconfig
index f4e2b1102d8f..3df6bf476ae7 100644
--- a/drivers/net/ethernet/engleder/Kconfig
+++ b/drivers/net/ethernet/engleder/Kconfig
@@ -21,6 +21,7 @@ config TSNEP
depends on HAS_IOMEM && HAS_DMA
depends on PTP_1588_CLOCK_OPTIONAL
select PHYLIB
+ select PAGE_POOL
help
Support for the Engleder TSN endpoint Ethernet MAC IP Core.
diff --git a/drivers/net/ethernet/engleder/Makefile b/drivers/net/ethernet/engleder/Makefile
index cce2191cb889..b6e3b16623de 100644
--- a/drivers/net/ethernet/engleder/Makefile
+++ b/drivers/net/ethernet/engleder/Makefile
@@ -6,5 +6,5 @@
obj-$(CONFIG_TSNEP) += tsnep.o
tsnep-objs := tsnep_main.o tsnep_ethtool.o tsnep_ptp.o tsnep_tc.o \
- $(tsnep-y)
+ tsnep_rxnfc.o $(tsnep-y)
tsnep-$(CONFIG_TSNEP_SELFTESTS) += tsnep_selftests.o
diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h
index 23bbece6b7de..09a723b827c7 100644
--- a/drivers/net/ethernet/engleder/tsnep.h
+++ b/drivers/net/ethernet/engleder/tsnep.h
@@ -21,8 +21,6 @@
#define TSNEP_RING_ENTRIES_PER_PAGE (PAGE_SIZE / TSNEP_DESC_SIZE)
#define TSNEP_RING_PAGE_COUNT (TSNEP_RING_SIZE / TSNEP_RING_ENTRIES_PER_PAGE)
-#define TSNEP_QUEUES 1
-
struct tsnep_gcl {
void __iomem *addr;
@@ -39,6 +37,24 @@ struct tsnep_gcl {
bool change;
};
+enum tsnep_rxnfc_filter_type {
+ TSNEP_RXNFC_ETHER_TYPE,
+};
+
+struct tsnep_rxnfc_filter {
+ enum tsnep_rxnfc_filter_type type;
+ union {
+ u16 ether_type;
+ };
+};
+
+struct tsnep_rxnfc_rule {
+ struct list_head list;
+ struct tsnep_rxnfc_filter filter;
+ int queue_index;
+ int location;
+};
+
struct tsnep_tx_entry {
struct tsnep_tx_desc *desc;
struct tsnep_tx_desc_wb *desc_wb;
@@ -55,6 +71,7 @@ struct tsnep_tx_entry {
struct tsnep_tx {
struct tsnep_adapter *adapter;
void __iomem *addr;
+ int queue_index;
void *page[TSNEP_RING_PAGE_COUNT];
dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
@@ -79,14 +96,15 @@ struct tsnep_rx_entry {
u32 properties;
- struct sk_buff *skb;
+ struct page *page;
size_t len;
- DEFINE_DMA_UNMAP_ADDR(dma);
+ dma_addr_t dma;
};
struct tsnep_rx {
struct tsnep_adapter *adapter;
void __iomem *addr;
+ int queue_index;
void *page[TSNEP_RING_PAGE_COUNT];
dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT];
@@ -95,6 +113,7 @@ struct tsnep_rx {
int read;
u32 owner_counter;
int increment_owner_counter;
+ struct page_pool *page_pool;
u32 packets;
u32 bytes;
@@ -104,12 +123,14 @@ struct tsnep_rx {
struct tsnep_queue {
struct tsnep_adapter *adapter;
+ char name[IFNAMSIZ + 9];
struct tsnep_tx *tx;
struct tsnep_rx *rx;
struct napi_struct napi;
+ int irq;
u32 irq_mask;
};
@@ -125,7 +146,6 @@ struct tsnep_adapter {
struct platform_device *pdev;
struct device *dmadev;
void __iomem *addr;
- int irq;
bool gate_control;
/* gate control lock */
@@ -140,6 +160,12 @@ struct tsnep_adapter {
/* ptp clock lock */
spinlock_t ptp_lock;
+ /* RX flow classification rules lock */
+ struct mutex rxnfc_lock;
+ struct list_head rxnfc_rules;
+ int rxnfc_count;
+ int rxnfc_max;
+
int num_tx_queues;
struct tsnep_tx tx[TSNEP_MAX_QUEUES];
int num_rx_queues;
@@ -160,6 +186,18 @@ void tsnep_tc_cleanup(struct tsnep_adapter *adapter);
int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type,
void *type_data);
+int tsnep_rxnfc_init(struct tsnep_adapter *adapter);
+void tsnep_rxnfc_cleanup(struct tsnep_adapter *adapter);
+int tsnep_rxnfc_get_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd);
+int tsnep_rxnfc_get_all(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs);
+int tsnep_rxnfc_add_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd);
+int tsnep_rxnfc_del_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd);
+
#if IS_ENABLED(CONFIG_TSNEP_SELFTESTS)
int tsnep_ethtool_get_test_count(void);
void tsnep_ethtool_get_test_strings(u8 *data);
diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c
index e6760dc68ddd..a713a126b227 100644
--- a/drivers/net/ethernet/engleder/tsnep_ethtool.c
+++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c
@@ -250,6 +250,44 @@ static int tsnep_ethtool_get_sset_count(struct net_device *netdev, int sset)
}
}
+static int tsnep_ethtool_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct tsnep_adapter *adapter = netdev_priv(dev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->num_rx_queues;
+ return 0;
+ case ETHTOOL_GRXCLSRLCNT:
+ cmd->rule_cnt = adapter->rxnfc_count;
+ cmd->data = adapter->rxnfc_max;
+ cmd->data |= RX_CLS_LOC_SPECIAL;
+ return 0;
+ case ETHTOOL_GRXCLSRULE:
+ return tsnep_rxnfc_get_rule(adapter, cmd);
+ case ETHTOOL_GRXCLSRLALL:
+ return tsnep_rxnfc_get_all(adapter, cmd, rule_locs);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tsnep_ethtool_set_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct tsnep_adapter *adapter = netdev_priv(dev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ return tsnep_rxnfc_add_rule(adapter, cmd);
+ case ETHTOOL_SRXCLSRLDEL:
+ return tsnep_rxnfc_del_rule(adapter, cmd);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int tsnep_ethtool_get_ts_info(struct net_device *dev,
struct ethtool_ts_info *info)
{
@@ -287,6 +325,8 @@ const struct ethtool_ops tsnep_ethtool_ops = {
.get_strings = tsnep_ethtool_get_strings,
.get_ethtool_stats = tsnep_ethtool_get_ethtool_stats,
.get_sset_count = tsnep_ethtool_get_sset_count,
+ .get_rxnfc = tsnep_ethtool_get_rxnfc,
+ .set_rxnfc = tsnep_ethtool_set_rxnfc,
.get_ts_info = tsnep_ethtool_get_ts_info,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
diff --git a/drivers/net/ethernet/engleder/tsnep_hw.h b/drivers/net/ethernet/engleder/tsnep_hw.h
index 916ceac3ada2..315dada75323 100644
--- a/drivers/net/ethernet/engleder/tsnep_hw.h
+++ b/drivers/net/ethernet/engleder/tsnep_hw.h
@@ -34,6 +34,7 @@
#define ECM_INT_LINK 0x00000020
#define ECM_INT_TX_0 0x00000100
#define ECM_INT_RX_0 0x00000200
+#define ECM_INT_TXRX_SHIFT 2
#define ECM_INT_ALL 0x7FFFFFFF
#define ECM_INT_DISABLE 0x80000000
@@ -92,8 +93,7 @@
/* tsnep register */
#define TSNEP_INFO 0x0100
-#define TSNEP_INFO_RX_ASSIGN 0x00010000
-#define TSNEP_INFO_TX_TIME 0x00020000
+#define TSNEP_INFO_TX_TIME 0x00010000
#define TSNEP_CONTROL 0x0108
#define TSNEP_CONTROL_TX_RESET 0x00000001
#define TSNEP_CONTROL_TX_ENABLE 0x00000002
@@ -122,10 +122,6 @@
#define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL 0x0191
#define TSNEP_RX_STATISTIC_FIFO_OVERFLOW 0x0192
#define TSNEP_RX_STATISTIC_INVALID_FRAME 0x0193
-#define TSNEP_RX_ASSIGN 0x01A0
-#define TSNEP_RX_ASSIGN_ETHER_TYPE_ACTIVE 0x00000001
-#define TSNEP_RX_ASSIGN_ETHER_TYPE_MASK 0xFFFF0000
-#define TSNEP_RX_ASSIGN_ETHER_TYPE_SHIFT 16
#define TSNEP_MAC_ADDRESS_LOW 0x0800
#define TSNEP_MAC_ADDRESS_HIGH 0x0804
#define TSNEP_RX_FILTER 0x0806
@@ -152,6 +148,14 @@
#define TSNEP_GCL_A 0x2000
#define TSNEP_GCL_B 0x2800
#define TSNEP_GCL_SIZE SZ_2K
+#define TSNEP_RX_ASSIGN 0x0840
+#define TSNEP_RX_ASSIGN_ACTIVE 0x00000001
+#define TSNEP_RX_ASSIGN_QUEUE_MASK 0x00000006
+#define TSNEP_RX_ASSIGN_QUEUE_SHIFT 1
+#define TSNEP_RX_ASSIGN_OFFSET 1
+#define TSNEP_RX_ASSIGN_ETHER_TYPE 0x0880
+#define TSNEP_RX_ASSIGN_ETHER_TYPE_OFFSET 2
+#define TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT 2
/* tsnep gate control list operation */
struct tsnep_gcl_operation {
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index a5f7152a1716..48fb391951dd 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -27,10 +27,10 @@
#include
#include
-#define RX_SKB_LENGTH (round_up(TSNEP_RX_INLINE_METADATA_SIZE + ETH_HLEN + \
- TSNEP_MAX_FRAME_SIZE + ETH_FCS_LEN, 4))
-#define RX_SKB_RESERVE ((16 - TSNEP_RX_INLINE_METADATA_SIZE) + NET_IP_ALIGN)
-#define RX_SKB_ALLOC_LENGTH (RX_SKB_RESERVE + RX_SKB_LENGTH)
+#define TSNEP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#define TSNEP_HEADROOM ALIGN(TSNEP_SKB_PAD, 4)
+#define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
#define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF))
@@ -60,22 +60,29 @@ static irqreturn_t tsnep_irq(int irq, void *arg)
iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE);
/* handle link interrupt */
- if ((active & ECM_INT_LINK) != 0) {
- if (adapter->netdev->phydev)
- phy_mac_interrupt(adapter->netdev->phydev);
- }
+ if ((active & ECM_INT_LINK) != 0)
+ phy_mac_interrupt(adapter->netdev->phydev);
/* handle TX/RX queue 0 interrupt */
if ((active & adapter->queue[0].irq_mask) != 0) {
- if (adapter->netdev) {
- tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
- napi_schedule(&adapter->queue[0].napi);
- }
+ tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
+ napi_schedule(&adapter->queue[0].napi);
}
return IRQ_HANDLED;
}
+static irqreturn_t tsnep_irq_txrx(int irq, void *arg)
+{
+ struct tsnep_queue *queue = arg;
+
+ /* handle TX/RX queue interrupt */
+ tsnep_disable_irq(queue->adapter, queue->irq_mask);
+ napi_schedule(&queue->napi);
+
+ return IRQ_HANDLED;
+}
+
static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
{
struct tsnep_adapter *adapter = bus->priv;
@@ -124,30 +131,51 @@ static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
return 0;
}
+static void tsnep_set_link_mode(struct tsnep_adapter *adapter)
+{
+ u32 mode;
+
+ switch (adapter->phydev->speed) {
+ case SPEED_100:
+ mode = ECM_LINK_MODE_100;
+ break;
+ case SPEED_1000:
+ mode = ECM_LINK_MODE_1000;
+ break;
+ default:
+ mode = ECM_LINK_MODE_OFF;
+ break;
+ }
+ iowrite32(mode, adapter->addr + ECM_STATUS);
+}
+
static void tsnep_phy_link_status_change(struct net_device *netdev)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
struct phy_device *phydev = netdev->phydev;
- u32 mode;
- if (phydev->link) {
- switch (phydev->speed) {
- case SPEED_100:
- mode = ECM_LINK_MODE_100;
- break;
- case SPEED_1000:
- mode = ECM_LINK_MODE_1000;
- break;
- default:
- mode = ECM_LINK_MODE_OFF;
- break;
- }
- iowrite32(mode, adapter->addr + ECM_STATUS);
- }
+ if (phydev->link)
+ tsnep_set_link_mode(adapter);
phy_print_status(netdev->phydev);
}
+static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
+{
+ int retval;
+
+ retval = phy_loopback(adapter->phydev, enable);
+
+ /* PHY link state change is not signaled if loopback is enabled, it
+ * would delay a working loopback anyway, let's ensure that loopback
+ * is working immediately by setting link mode directly
+ */
+ if (!retval && enable)
+ tsnep_set_link_mode(adapter);
+
+ return retval;
+}
+
static int tsnep_phy_open(struct tsnep_adapter *adapter)
{
struct phy_device *phydev;
@@ -241,14 +269,14 @@ alloc_failed:
return retval;
}
-static void tsnep_tx_activate(struct tsnep_tx *tx, int index, bool last)
+static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
+ bool last)
{
struct tsnep_tx_entry *entry = &tx->entry[index];
entry->properties = 0;
if (entry->skb) {
- entry->properties =
- skb_pagelen(entry->skb) & TSNEP_DESC_LENGTH_MASK;
+ entry->properties = length & TSNEP_DESC_LENGTH_MASK;
entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
if (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)
entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
@@ -313,6 +341,7 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
struct tsnep_tx_entry *entry;
unsigned int len;
dma_addr_t dma;
+ int map_len = 0;
int i;
for (i = 0; i < count; i++) {
@@ -335,15 +364,18 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
dma_unmap_addr_set(entry, dma, dma);
entry->desc->tx = __cpu_to_le64(dma);
+
+ map_len += len;
}
- return 0;
+ return map_len;
}
-static void tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
+static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
{
struct device *dmadev = tx->adapter->dmadev;
struct tsnep_tx_entry *entry;
+ int map_len = 0;
int i;
for (i = 0; i < count; i++) {
@@ -360,9 +392,12 @@ static void tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
dma_unmap_addr(entry, dma),
dma_unmap_len(entry, len),
DMA_TO_DEVICE);
+ map_len += entry->len;
entry->len = 0;
}
}
+
+ return map_len;
}
static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
@@ -371,6 +406,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
unsigned long flags;
int count = 1;
struct tsnep_tx_entry *entry;
+ int length;
int i;
int retval;
@@ -394,7 +430,7 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
entry->skb = skb;
retval = tsnep_tx_map(skb, tx, count);
- if (retval != 0) {
+ if (retval < 0) {
tsnep_tx_unmap(tx, tx->write, count);
dev_kfree_skb_any(entry->skb);
entry->skb = NULL;
@@ -407,12 +443,13 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_OK;
}
+ length = retval;
if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
for (i = 0; i < count; i++)
- tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE,
+ tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length,
i == (count - 1));
tx->write = (tx->write + count) % TSNEP_RING_SIZE;
@@ -428,9 +465,6 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
netif_stop_queue(tx->adapter->netdev);
}
- tx->packets++;
- tx->bytes += skb_pagelen(entry->skb) + ETH_FCS_LEN;
-
spin_unlock_irqrestore(&tx->lock, flags);
return NETDEV_TX_OK;
@@ -442,6 +476,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
int budget = 128;
struct tsnep_tx_entry *entry;
int count;
+ int length;
spin_lock_irqsave(&tx->lock, flags);
@@ -464,7 +499,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
if (skb_shinfo(entry->skb)->nr_frags > 0)
count += skb_shinfo(entry->skb)->nr_frags;
- tsnep_tx_unmap(tx, tx->read, count);
+ length = tsnep_tx_unmap(tx, tx->read, count);
if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
(__le32_to_cpu(entry->desc_wb->properties) &
@@ -491,6 +526,9 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
tx->read = (tx->read + count) % TSNEP_RING_SIZE;
+ tx->packets++;
+ tx->bytes += length + ETH_FCS_LEN;
+
budget--;
} while (likely(budget));
@@ -505,7 +543,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
}
static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr,
- struct tsnep_tx *tx)
+ int queue_index, struct tsnep_tx *tx)
{
dma_addr_t dma;
int retval;
@@ -513,6 +551,7 @@ static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr,
memset(tx, 0, sizeof(*tx));
tx->adapter = adapter;
tx->addr = addr;
+ tx->queue_index = queue_index;
retval = tsnep_tx_ring_init(tx);
if (retval)
@@ -548,14 +587,15 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
for (i = 0; i < TSNEP_RING_SIZE; i++) {
entry = &rx->entry[i];
- if (dma_unmap_addr(entry, dma))
- dma_unmap_single(dmadev, dma_unmap_addr(entry, dma),
- dma_unmap_len(entry, len),
- DMA_FROM_DEVICE);
- if (entry->skb)
- dev_kfree_skb(entry->skb);
+ if (entry->page)
+ page_pool_put_full_page(rx->page_pool, entry->page,
+ false);
+ entry->page = NULL;
}
+ if (rx->page_pool)
+ page_pool_destroy(rx->page_pool);
+
memset(rx->entry, 0, sizeof(rx->entry));
for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
@@ -568,31 +608,19 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
}
}
-static int tsnep_rx_alloc_and_map_skb(struct tsnep_rx *rx,
- struct tsnep_rx_entry *entry)
+static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx,
+ struct tsnep_rx_entry *entry)
{
- struct device *dmadev = rx->adapter->dmadev;
- struct sk_buff *skb;
- dma_addr_t dma;
+ struct page *page;
- skb = __netdev_alloc_skb(rx->adapter->netdev, RX_SKB_ALLOC_LENGTH,
- GFP_ATOMIC | GFP_DMA);
- if (!skb)
+ page = page_pool_dev_alloc_pages(rx->page_pool);
+ if (unlikely(!page))
return -ENOMEM;
- skb_reserve(skb, RX_SKB_RESERVE);
-
- dma = dma_map_single(dmadev, skb->data, RX_SKB_LENGTH,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(dmadev, dma)) {
- dev_kfree_skb(skb);
- return -ENOMEM;
- }
-
- entry->skb = skb;
- entry->len = RX_SKB_LENGTH;
- dma_unmap_addr_set(entry, dma, dma);
- entry->desc->rx = __cpu_to_le64(dma);
+ entry->page = page;
+ entry->len = TSNEP_MAX_RX_BUF_SIZE;
+ entry->dma = page_pool_get_dma_addr(entry->page);
+ entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_SKB_PAD);
return 0;
}
@@ -601,6 +629,7 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx)
{
struct device *dmadev = rx->adapter->dmadev;
struct tsnep_rx_entry *entry;
+ struct page_pool_params pp_params = { 0 };
struct tsnep_rx_entry *next_entry;
int i, j;
int retval;
@@ -622,12 +651,28 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx)
entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j;
}
}
+
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.order = 0;
+ pp_params.pool_size = TSNEP_RING_SIZE;
+ pp_params.nid = dev_to_node(dmadev);
+ pp_params.dev = dmadev;
+ pp_params.dma_dir = DMA_FROM_DEVICE;
+ pp_params.max_len = TSNEP_MAX_RX_BUF_SIZE;
+ pp_params.offset = TSNEP_SKB_PAD;
+ rx->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rx->page_pool)) {
+ retval = PTR_ERR(rx->page_pool);
+ rx->page_pool = NULL;
+ goto failed;
+ }
+
for (i = 0; i < TSNEP_RING_SIZE; i++) {
entry = &rx->entry[i];
next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE];
entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
- retval = tsnep_rx_alloc_and_map_skb(rx, entry);
+ retval = tsnep_rx_alloc_buffer(rx, entry);
if (retval)
goto failed;
}
@@ -643,7 +688,7 @@ static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
{
struct tsnep_rx_entry *entry = &rx->entry[index];
- /* RX_SKB_LENGTH is a multiple of 4 */
+ /* TSNEP_MAX_RX_BUF_SIZE is a multiple of 4 */
entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK;
entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
if (index == rx->increment_owner_counter) {
@@ -666,19 +711,52 @@ static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
entry->desc->properties = __cpu_to_le32(entry->properties);
}
+static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page,
+ int length)
+{
+ struct sk_buff *skb;
+
+ skb = napi_build_skb(page_address(page), PAGE_SIZE);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, TSNEP_SKB_PAD + TSNEP_RX_INLINE_METADATA_SIZE);
+ __skb_put(skb, length - TSNEP_RX_INLINE_METADATA_SIZE - ETH_FCS_LEN);
+
+ if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) {
+ struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
+ struct tsnep_rx_inline *rx_inline =
+ (struct tsnep_rx_inline *)(page_address(page) +
+ TSNEP_SKB_PAD);
+
+ skb_shinfo(skb)->tx_flags |=
+ SKBTX_HW_TSTAMP_NETDEV;
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->netdev_data = rx_inline;
+ }
+
+ skb_record_rx_queue(skb, rx->queue_index);
+ skb->protocol = eth_type_trans(skb, rx->adapter->netdev);
+
+ return skb;
+}
+
static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
int budget)
{
struct device *dmadev = rx->adapter->dmadev;
int done = 0;
+ enum dma_data_direction dma_dir;
struct tsnep_rx_entry *entry;
+ struct page *page;
struct sk_buff *skb;
- size_t len;
- dma_addr_t dma;
int length;
bool enable = false;
int retval;
+ dma_dir = page_pool_get_dma_dir(rx->page_pool);
+
while (likely(done < budget)) {
entry = &rx->entry[rx->read];
if ((__le32_to_cpu(entry->desc_wb->properties) &
@@ -691,42 +769,34 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
*/
dma_rmb();
- skb = entry->skb;
- len = dma_unmap_len(entry, len);
- dma = dma_unmap_addr(entry, dma);
+ prefetch(page_address(entry->page) + TSNEP_SKB_PAD);
+ length = __le32_to_cpu(entry->desc_wb->properties) &
+ TSNEP_DESC_LENGTH_MASK;
+ dma_sync_single_range_for_cpu(dmadev, entry->dma, TSNEP_SKB_PAD,
+ length, dma_dir);
+ page = entry->page;
/* forward skb only if allocation is successful, otherwise
- * skb is reused and frame dropped
+ * page is reused and frame dropped
*/
- retval = tsnep_rx_alloc_and_map_skb(rx, entry);
+ retval = tsnep_rx_alloc_buffer(rx, entry);
if (!retval) {
- dma_unmap_single(dmadev, dma, len, DMA_FROM_DEVICE);
+ skb = tsnep_build_skb(rx, page, length);
+ if (skb) {
+ page_pool_release_page(rx->page_pool, page);
- length = __le32_to_cpu(entry->desc_wb->properties) &
- TSNEP_DESC_LENGTH_MASK;
- skb_put(skb, length - ETH_FCS_LEN);
- if (rx->adapter->hwtstamp_config.rx_filter ==
- HWTSTAMP_FILTER_ALL) {
- struct skb_shared_hwtstamps *hwtstamps =
- skb_hwtstamps(skb);
- struct tsnep_rx_inline *rx_inline =
- (struct tsnep_rx_inline *)skb->data;
+ rx->packets++;
+ rx->bytes += length -
+ TSNEP_RX_INLINE_METADATA_SIZE;
+ if (skb->pkt_type == PACKET_MULTICAST)
+ rx->multicast++;
- skb_shinfo(skb)->tx_flags |=
- SKBTX_HW_TSTAMP_NETDEV;
- memset(hwtstamps, 0, sizeof(*hwtstamps));
- hwtstamps->netdev_data = rx_inline;
+ napi_gro_receive(napi, skb);
+ } else {
+ page_pool_recycle_direct(rx->page_pool, page);
+
+ rx->dropped++;
}
- skb_pull(skb, TSNEP_RX_INLINE_METADATA_SIZE);
- skb->protocol = eth_type_trans(skb,
- rx->adapter->netdev);
-
- rx->packets++;
- rx->bytes += length - TSNEP_RX_INLINE_METADATA_SIZE;
- if (skb->pkt_type == PACKET_MULTICAST)
- rx->multicast++;
-
- napi_gro_receive(napi, skb);
done++;
} else {
rx->dropped++;
@@ -752,7 +822,7 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
}
static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
- struct tsnep_rx *rx)
+ int queue_index, struct tsnep_rx *rx)
{
dma_addr_t dma;
int i;
@@ -761,6 +831,7 @@ static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
memset(rx, 0, sizeof(*rx));
rx->adapter = adapter;
rx->addr = addr;
+ rx->queue_index = queue_index;
retval = tsnep_rx_ring_init(rx);
if (retval)
@@ -821,6 +892,56 @@ static int tsnep_poll(struct napi_struct *napi, int budget)
return min(done, budget - 1);
}
+static int tsnep_request_irq(struct tsnep_queue *queue, bool first)
+{
+ const char *name = netdev_name(queue->adapter->netdev);
+ irq_handler_t handler;
+ void *dev;
+ int retval;
+
+ if (first) {
+ sprintf(queue->name, "%s-mac", name);
+ handler = tsnep_irq;
+ dev = queue->adapter;
+ } else {
+ if (queue->tx && queue->rx)
+ sprintf(queue->name, "%s-txrx-%d", name,
+ queue->rx->queue_index);
+ else if (queue->tx)
+ sprintf(queue->name, "%s-tx-%d", name,
+ queue->tx->queue_index);
+ else
+ sprintf(queue->name, "%s-rx-%d", name,
+ queue->rx->queue_index);
+ handler = tsnep_irq_txrx;
+ dev = queue;
+ }
+
+ retval = request_irq(queue->irq, handler, 0, queue->name, dev);
+ if (retval) {
+ /* if name is empty, then interrupt won't be freed */
+ memset(queue->name, 0, sizeof(queue->name));
+ }
+
+ return retval;
+}
+
+static void tsnep_free_irq(struct tsnep_queue *queue, bool first)
+{
+ void *dev;
+
+ if (!strlen(queue->name))
+ return;
+
+ if (first)
+ dev = queue->adapter;
+ else
+ dev = queue;
+
+ free_irq(queue->irq, dev);
+ memset(queue->name, 0, sizeof(queue->name));
+}
+
static int tsnep_netdev_open(struct net_device *netdev)
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
@@ -830,15 +951,11 @@ static int tsnep_netdev_open(struct net_device *netdev)
int rx_queue_index = 0;
int retval;
- retval = tsnep_phy_open(adapter);
- if (retval)
- return retval;
-
for (i = 0; i < adapter->num_queues; i++) {
adapter->queue[i].adapter = adapter;
if (adapter->queue[i].tx) {
addr = adapter->addr + TSNEP_QUEUE(tx_queue_index);
- retval = tsnep_tx_open(adapter, addr,
+ retval = tsnep_tx_open(adapter, addr, tx_queue_index,
adapter->queue[i].tx);
if (retval)
goto failed;
@@ -847,11 +964,20 @@ static int tsnep_netdev_open(struct net_device *netdev)
if (adapter->queue[i].rx) {
addr = adapter->addr + TSNEP_QUEUE(rx_queue_index);
retval = tsnep_rx_open(adapter, addr,
+ rx_queue_index,
adapter->queue[i].rx);
if (retval)
goto failed;
rx_queue_index++;
}
+
+ retval = tsnep_request_irq(&adapter->queue[i], i == 0);
+ if (retval) {
+ netif_err(adapter, drv, adapter->netdev,
+ "can't get assigned irq %d.\n",
+ adapter->queue[i].irq);
+ goto failed;
+ }
}
retval = netif_set_real_num_tx_queues(adapter->netdev,
@@ -863,9 +989,14 @@ static int tsnep_netdev_open(struct net_device *netdev)
if (retval)
goto failed;
+ tsnep_enable_irq(adapter, ECM_INT_LINK);
+ retval = tsnep_phy_open(adapter);
+ if (retval)
+ goto phy_failed;
+
for (i = 0; i < adapter->num_queues; i++) {
netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
- tsnep_poll, 64);
+ tsnep_poll);
napi_enable(&adapter->queue[i].napi);
tsnep_enable_irq(adapter, adapter->queue[i].irq_mask);
@@ -873,14 +1004,18 @@ static int tsnep_netdev_open(struct net_device *netdev)
return 0;
+phy_failed:
+ tsnep_disable_irq(adapter, ECM_INT_LINK);
+ tsnep_phy_close(adapter);
failed:
for (i = 0; i < adapter->num_queues; i++) {
+ tsnep_free_irq(&adapter->queue[i], i == 0);
+
if (adapter->queue[i].rx)
tsnep_rx_close(adapter->queue[i].rx);
if (adapter->queue[i].tx)
tsnep_tx_close(adapter->queue[i].tx);
}
- tsnep_phy_close(adapter);
return retval;
}
@@ -889,20 +1024,23 @@ static int tsnep_netdev_close(struct net_device *netdev)
struct tsnep_adapter *adapter = netdev_priv(netdev);
int i;
+ tsnep_disable_irq(adapter, ECM_INT_LINK);
+ tsnep_phy_close(adapter);
+
for (i = 0; i < adapter->num_queues; i++) {
tsnep_disable_irq(adapter, adapter->queue[i].irq_mask);
napi_disable(&adapter->queue[i].napi);
netif_napi_del(&adapter->queue[i].napi);
+ tsnep_free_irq(&adapter->queue[i], i == 0);
+
if (adapter->queue[i].rx)
tsnep_rx_close(adapter->queue[i].rx);
if (adapter->queue[i].tx)
tsnep_tx_close(adapter->queue[i].tx);
}
- tsnep_phy_close(adapter);
-
return 0;
}
@@ -1017,6 +1155,22 @@ static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr)
return 0;
}
+static int tsnep_netdev_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct tsnep_adapter *adapter = netdev_priv(netdev);
+ netdev_features_t changed = netdev->features ^ features;
+ bool enable;
+ int retval = 0;
+
+ if (changed & NETIF_F_LOOPBACK) {
+ enable = !!(features & NETIF_F_LOOPBACK);
+ retval = tsnep_phy_loopback(adapter, enable);
+ }
+
+ return retval;
+}
+
static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev,
const struct skb_shared_hwtstamps *hwtstamps,
bool cycles)
@@ -1038,9 +1192,9 @@ static const struct net_device_ops tsnep_netdev_ops = {
.ndo_start_xmit = tsnep_netdev_xmit_frame,
.ndo_eth_ioctl = tsnep_netdev_ioctl,
.ndo_set_rx_mode = tsnep_netdev_set_multicast,
-
.ndo_get_stats64 = tsnep_netdev_get_stats64,
.ndo_set_mac_address = tsnep_netdev_set_mac_address,
+ .ndo_set_features = tsnep_netdev_set_features,
.ndo_get_tstamp = tsnep_netdev_get_tstamp,
.ndo_setup_tc = tsnep_tc_setup,
};
@@ -1141,6 +1295,52 @@ static int tsnep_phy_init(struct tsnep_adapter *adapter)
return 0;
}
+static int tsnep_queue_init(struct tsnep_adapter *adapter, int queue_count)
+{
+ u32 irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0;
+ char name[8];
+ int i;
+ int retval;
+
+ /* one TX/RX queue pair for netdev is mandatory */
+ if (platform_irq_count(adapter->pdev) == 1)
+ retval = platform_get_irq(adapter->pdev, 0);
+ else
+ retval = platform_get_irq_byname(adapter->pdev, "mac");
+ if (retval < 0)
+ return retval;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_queues = 1;
+ adapter->num_queues = 1;
+ adapter->queue[0].irq = retval;
+ adapter->queue[0].tx = &adapter->tx[0];
+ adapter->queue[0].rx = &adapter->rx[0];
+ adapter->queue[0].irq_mask = irq_mask;
+
+ adapter->netdev->irq = adapter->queue[0].irq;
+
+ /* add additional TX/RX queue pairs only if dedicated interrupt is
+ * available
+ */
+ for (i = 1; i < queue_count; i++) {
+ sprintf(name, "txrx-%d", i);
+ retval = platform_get_irq_byname_optional(adapter->pdev, name);
+ if (retval < 0)
+ break;
+
+ adapter->num_tx_queues++;
+ adapter->num_rx_queues++;
+ adapter->num_queues++;
+ adapter->queue[i].irq = retval;
+ adapter->queue[i].tx = &adapter->tx[i];
+ adapter->queue[i].rx = &adapter->rx[i];
+ adapter->queue[i].irq_mask =
+ irq_mask << (ECM_INT_TXRX_SHIFT * i);
+ }
+
+ return 0;
+}
+
static int tsnep_probe(struct platform_device *pdev)
{
struct tsnep_adapter *adapter;
@@ -1149,6 +1349,7 @@ static int tsnep_probe(struct platform_device *pdev)
u32 type;
int revision;
int version;
+ int queue_count;
int retval;
netdev = devm_alloc_etherdev_mqs(&pdev->dev,
@@ -1170,41 +1371,39 @@ static int tsnep_probe(struct platform_device *pdev)
netdev->max_mtu = TSNEP_MAX_FRAME_SIZE;
mutex_init(&adapter->gate_control_lock);
+ mutex_init(&adapter->rxnfc_lock);
+ INIT_LIST_HEAD(&adapter->rxnfc_rules);
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
adapter->addr = devm_ioremap_resource(&pdev->dev, io);
if (IS_ERR(adapter->addr))
return PTR_ERR(adapter->addr);
- adapter->irq = platform_get_irq(pdev, 0);
netdev->mem_start = io->start;
netdev->mem_end = io->end;
- netdev->irq = adapter->irq;
type = ioread32(adapter->addr + ECM_TYPE);
revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT;
version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT;
+ queue_count = (type & ECM_QUEUE_COUNT_MASK) >> ECM_QUEUE_COUNT_SHIFT;
adapter->gate_control = type & ECM_GATE_CONTROL;
-
- adapter->num_tx_queues = TSNEP_QUEUES;
- adapter->num_rx_queues = TSNEP_QUEUES;
- adapter->num_queues = TSNEP_QUEUES;
- adapter->queue[0].tx = &adapter->tx[0];
- adapter->queue[0].rx = &adapter->rx[0];
- adapter->queue[0].irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0;
+ adapter->rxnfc_max = TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT;
tsnep_disable_irq(adapter, ECM_INT_ALL);
- retval = devm_request_irq(&adapter->pdev->dev, adapter->irq, tsnep_irq,
- 0, TSNEP, adapter);
- if (retval != 0) {
- dev_err(&adapter->pdev->dev, "can't get assigned irq %d.\n",
- adapter->irq);
+
+ retval = tsnep_queue_init(adapter, queue_count);
+ if (retval)
+ return retval;
+
+ retval = dma_set_mask_and_coherent(&adapter->pdev->dev,
+ DMA_BIT_MASK(64));
+ if (retval) {
+ dev_err(&adapter->pdev->dev, "no usable DMA configuration.\n");
return retval;
}
- tsnep_enable_irq(adapter, ECM_INT_LINK);
retval = tsnep_mac_init(adapter);
if (retval)
- goto mac_init_failed;
+ return retval;
retval = tsnep_mdio_init(adapter);
if (retval)
@@ -1222,10 +1421,14 @@ static int tsnep_probe(struct platform_device *pdev)
if (retval)
goto tc_init_failed;
+ retval = tsnep_rxnfc_init(adapter);
+ if (retval)
+ goto rxnfc_init_failed;
+
netdev->netdev_ops = &tsnep_netdev_ops;
netdev->ethtool_ops = &tsnep_ethtool_ops;
netdev->features = NETIF_F_SG;
- netdev->hw_features = netdev->features;
+ netdev->hw_features = netdev->features | NETIF_F_LOOPBACK;
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
@@ -1242,6 +1445,8 @@ static int tsnep_probe(struct platform_device *pdev)
return 0;
register_failed:
+ tsnep_rxnfc_cleanup(adapter);
+rxnfc_init_failed:
tsnep_tc_cleanup(adapter);
tc_init_failed:
tsnep_ptp_cleanup(adapter);
@@ -1250,8 +1455,6 @@ phy_init_failed:
if (adapter->mdiobus)
mdiobus_unregister(adapter->mdiobus);
mdio_init_failed:
-mac_init_failed:
- tsnep_disable_irq(adapter, ECM_INT_ALL);
return retval;
}
@@ -1261,6 +1464,8 @@ static int tsnep_remove(struct platform_device *pdev)
unregister_netdev(adapter->netdev);
+ tsnep_rxnfc_cleanup(adapter);
+
tsnep_tc_cleanup(adapter);
tsnep_ptp_cleanup(adapter);
diff --git a/drivers/net/ethernet/engleder/tsnep_rxnfc.c b/drivers/net/ethernet/engleder/tsnep_rxnfc.c
new file mode 100644
index 000000000000..9ac2a0cf3833
--- /dev/null
+++ b/drivers/net/ethernet/engleder/tsnep_rxnfc.c
@@ -0,0 +1,307 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2022 Gerhard Engleder */
+
+#include "tsnep.h"
+
+#define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
+
+static void tsnep_enable_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ u8 rx_assign;
+ void __iomem *addr;
+
+ rx_assign = TSNEP_RX_ASSIGN_ACTIVE;
+ rx_assign |= (rule->queue_index << TSNEP_RX_ASSIGN_QUEUE_SHIFT) &
+ TSNEP_RX_ASSIGN_QUEUE_MASK;
+
+ addr = adapter->addr + TSNEP_RX_ASSIGN_ETHER_TYPE +
+ TSNEP_RX_ASSIGN_ETHER_TYPE_OFFSET * rule->location;
+ iowrite16(rule->filter.ether_type, addr);
+
+ /* enable rule after all settings are done */
+ addr = adapter->addr + TSNEP_RX_ASSIGN +
+ TSNEP_RX_ASSIGN_OFFSET * rule->location;
+ iowrite8(rx_assign, addr);
+}
+
+static void tsnep_disable_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ void __iomem *addr;
+
+ addr = adapter->addr + TSNEP_RX_ASSIGN +
+ TSNEP_RX_ASSIGN_OFFSET * rule->location;
+ iowrite8(0, addr);
+}
+
+static struct tsnep_rxnfc_rule *tsnep_get_rule(struct tsnep_adapter *adapter,
+ int location)
+{
+ struct tsnep_rxnfc_rule *rule;
+
+ list_for_each_entry(rule, &adapter->rxnfc_rules, list) {
+ if (rule->location == location)
+ return rule;
+ if (rule->location > location)
+ break;
+ }
+
+ return NULL;
+}
+
+static void tsnep_add_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ struct tsnep_rxnfc_rule *pred, *cur;
+
+ tsnep_enable_rule(adapter, rule);
+
+ pred = NULL;
+ list_for_each_entry(cur, &adapter->rxnfc_rules, list) {
+ if (cur->location >= rule->location)
+ break;
+ pred = cur;
+ }
+
+ list_add(&rule->list, pred ? &pred->list : &adapter->rxnfc_rules);
+ adapter->rxnfc_count++;
+}
+
+static void tsnep_delete_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ tsnep_disable_rule(adapter, rule);
+
+ list_del(&rule->list);
+ adapter->rxnfc_count--;
+
+ kfree(rule);
+}
+
+static void tsnep_flush_rules(struct tsnep_adapter *adapter)
+{
+ struct tsnep_rxnfc_rule *rule, *tmp;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ list_for_each_entry_safe(rule, tmp, &adapter->rxnfc_rules, list)
+ tsnep_delete_rule(adapter, rule);
+
+ mutex_unlock(&adapter->rxnfc_lock);
+}
+
+int tsnep_rxnfc_get_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp = &cmd->fs;
+ struct tsnep_rxnfc_rule *rule = NULL;
+
+ cmd->data = adapter->rxnfc_max;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ rule = tsnep_get_rule(adapter, fsp->location);
+ if (!rule) {
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return -ENOENT;
+ }
+
+ fsp->flow_type = ETHER_FLOW;
+ fsp->ring_cookie = rule->queue_index;
+
+ if (rule->filter.type == TSNEP_RXNFC_ETHER_TYPE) {
+ fsp->h_u.ether_spec.h_proto = htons(rule->filter.ether_type);
+ fsp->m_u.ether_spec.h_proto = ETHER_TYPE_FULL_MASK;
+ }
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return 0;
+}
+
+int tsnep_rxnfc_get_all(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct tsnep_rxnfc_rule *rule;
+ int count = 0;
+
+ cmd->data = adapter->rxnfc_max;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ list_for_each_entry(rule, &adapter->rxnfc_rules, list) {
+ if (count == cmd->rule_cnt) {
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return -EMSGSIZE;
+ }
+
+ rule_locs[count] = rule->location;
+ count++;
+ }
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ cmd->rule_cnt = count;
+
+ return 0;
+}
+
+static int tsnep_rxnfc_find_location(struct tsnep_adapter *adapter)
+{
+ struct tsnep_rxnfc_rule *tmp;
+ int location = 0;
+
+ list_for_each_entry(tmp, &adapter->rxnfc_rules, list) {
+ if (tmp->location == location)
+ location++;
+ else
+ return location;
+ }
+
+ if (location >= adapter->rxnfc_max)
+ return -ENOSPC;
+
+ return location;
+}
+
+static void tsnep_rxnfc_init_rule(struct tsnep_rxnfc_rule *rule,
+ const struct ethtool_rx_flow_spec *fsp)
+{
+ INIT_LIST_HEAD(&rule->list);
+
+ rule->queue_index = fsp->ring_cookie;
+ rule->location = fsp->location;
+
+ rule->filter.type = TSNEP_RXNFC_ETHER_TYPE;
+ rule->filter.ether_type = ntohs(fsp->h_u.ether_spec.h_proto);
+}
+
+static int tsnep_rxnfc_check_rule(struct tsnep_adapter *adapter,
+ struct tsnep_rxnfc_rule *rule)
+{
+ struct net_device *dev = adapter->netdev;
+ struct tsnep_rxnfc_rule *tmp;
+
+ list_for_each_entry(tmp, &adapter->rxnfc_rules, list) {
+ if (!memcmp(&rule->filter, &tmp->filter, sizeof(rule->filter)) &&
+ tmp->location != rule->location) {
+ netdev_dbg(dev, "rule already exists\n");
+
+ return -EEXIST;
+ }
+ }
+
+ return 0;
+}
+
+int tsnep_rxnfc_add_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct tsnep_rxnfc_rule *rule, *old_rule;
+ int retval;
+
+ /* only EtherType is supported */
+ if (fsp->flow_type != ETHER_FLOW ||
+ !is_zero_ether_addr(fsp->m_u.ether_spec.h_dest) ||
+ !is_zero_ether_addr(fsp->m_u.ether_spec.h_source) ||
+ fsp->m_u.ether_spec.h_proto != ETHER_TYPE_FULL_MASK) {
+ netdev_dbg(netdev, "only ethernet protocol is supported\n");
+
+ return -EOPNOTSUPP;
+ }
+
+ if (fsp->ring_cookie >
+ (TSNEP_RX_ASSIGN_QUEUE_MASK >> TSNEP_RX_ASSIGN_QUEUE_SHIFT)) {
+ netdev_dbg(netdev, "invalid action\n");
+
+ return -EINVAL;
+ }
+
+ if (fsp->location != RX_CLS_LOC_ANY &&
+ fsp->location >= adapter->rxnfc_max) {
+ netdev_dbg(netdev, "invalid location\n");
+
+ return -EINVAL;
+ }
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ if (fsp->location == RX_CLS_LOC_ANY) {
+ retval = tsnep_rxnfc_find_location(adapter);
+ if (retval < 0)
+ goto failed;
+ fsp->location = retval;
+ }
+
+ tsnep_rxnfc_init_rule(rule, fsp);
+
+ retval = tsnep_rxnfc_check_rule(adapter, rule);
+ if (retval)
+ goto failed;
+
+ old_rule = tsnep_get_rule(adapter, fsp->location);
+ if (old_rule)
+ tsnep_delete_rule(adapter, old_rule);
+
+ tsnep_add_rule(adapter, rule);
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return 0;
+
+failed:
+ mutex_unlock(&adapter->rxnfc_lock);
+ kfree(rule);
+ return retval;
+}
+
+int tsnep_rxnfc_del_rule(struct tsnep_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct tsnep_rxnfc_rule *rule;
+
+ mutex_lock(&adapter->rxnfc_lock);
+
+ rule = tsnep_get_rule(adapter, fsp->location);
+ if (!rule) {
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return -ENOENT;
+ }
+
+ tsnep_delete_rule(adapter, rule);
+
+ mutex_unlock(&adapter->rxnfc_lock);
+
+ return 0;
+}
+
+int tsnep_rxnfc_init(struct tsnep_adapter *adapter)
+{
+ int i;
+
+ /* disable all rules */
+ for (i = 0; i < adapter->rxnfc_max;
+ i += sizeof(u32) / TSNEP_RX_ASSIGN_OFFSET)
+ iowrite32(0, adapter->addr + TSNEP_RX_ASSIGN + i);
+
+ return 0;
+}
+
+void tsnep_rxnfc_cleanup(struct tsnep_adapter *adapter)
+{
+ tsnep_flush_rules(adapter);
+}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 437c5acfe222..95cbad198b4b 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1224,7 +1224,7 @@ static int ethoc_probe(struct platform_device *pdev)
netdev->ethtool_ops = ðoc_ethtool_ops;
/* setup NAPI */
- netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
+ netif_napi_add(netdev, &priv->napi, ethoc_poll);
spin_lock_init(&priv->lock);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index c03663785a8d..a03879a27b04 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1063,8 +1063,8 @@ static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr,
static void ftgmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
static void
@@ -1506,7 +1506,7 @@ static int ftgmac100_open(struct net_device *netdev)
goto err_hw;
/* Initialize NAPI */
- netif_napi_add(netdev, &priv->napi, ftgmac100_poll, 64);
+ netif_napi_add(netdev, &priv->napi, ftgmac100_poll);
/* Grab our interrupt */
err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev);
@@ -1701,10 +1701,14 @@ err_register_mdiobus:
static void ftgmac100_phy_disconnect(struct net_device *netdev)
{
+ struct ftgmac100 *priv = netdev_priv(netdev);
+
if (!netdev->phydev)
return;
phy_disconnect(netdev->phydev);
+ if (of_phy_is_fixed_link(priv->dev->of_node))
+ of_phy_deregister_fixed_link(priv->dev->of_node);
}
static void ftgmac100_destroy_mdio(struct net_device *netdev)
@@ -1867,6 +1871,26 @@ static int ftgmac100_probe(struct platform_device *pdev)
err = -EINVAL;
goto err_phy_connect;
}
+ } else if (np && of_phy_is_fixed_link(np)) {
+ struct phy_device *phy;
+
+ err = of_phy_register_fixed_link(np);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register fixed PHY\n");
+ goto err_phy_connect;
+ }
+
+ phy = of_phy_get_and_connect(priv->netdev, np,
+ &ftgmac100_adjust_link);
+ if (!phy) {
+ dev_err(&pdev->dev, "Failed to connect to fixed PHY\n");
+ of_phy_deregister_fixed_link(np);
+ err = -EINVAL;
+ goto err_phy_connect;
+ }
+
+ /* Display what we found */
+ phy_attached_info(phy);
} else if (np && of_get_property(np, "phy-handle", NULL)) {
struct phy_device *phy;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 8a341e2d5833..d95d78230828 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -807,8 +807,8 @@ static void ftmac100_mdio_write(struct net_device *netdev, int phy_id, int reg,
static void ftmac100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
}
static int ftmac100_get_link_ksettings(struct net_device *netdev,
@@ -1075,6 +1075,11 @@ static int ftmac100_probe(struct platform_device *pdev)
SET_NETDEV_DEV(netdev, &pdev->dev);
netdev->ethtool_ops = &ftmac100_ethtool_ops;
netdev->netdev_ops = &ftmac100_netdev_ops;
+ netdev->max_mtu = MAX_PKT_SIZE;
+
+ err = platform_get_ethdev_address(&pdev->dev, netdev);
+ if (err == -EPROBE_DEFER)
+ goto defer_get_mac;
platform_set_drvdata(pdev, netdev);
@@ -1086,7 +1091,7 @@ static int ftmac100_probe(struct platform_device *pdev)
spin_lock_init(&priv->tx_lock);
/* initialize NAPI */
- netif_napi_add(netdev, &priv->napi, ftmac100_poll, 64);
+ netif_napi_add(netdev, &priv->napi, ftmac100_poll);
/* map io memory */
priv->res = request_mem_region(res->start, resource_size(res),
@@ -1137,6 +1142,7 @@ err_ioremap:
release_resource(priv->res);
err_req_mem:
netif_napi_del(&priv->napi);
+defer_get_mac:
free_netdev(netdev);
err_alloc_etherdev:
return err;
diff --git a/drivers/net/ethernet/faraday/ftmac100.h b/drivers/net/ethernet/faraday/ftmac100.h
index fe986f1673fc..8af32f9070f4 100644
--- a/drivers/net/ethernet/faraday/ftmac100.h
+++ b/drivers/net/ethernet/faraday/ftmac100.h
@@ -122,9 +122,9 @@
* Transmit descriptor, aligned to 16 bytes
*/
struct ftmac100_txdes {
- unsigned int txdes0;
- unsigned int txdes1;
- unsigned int txdes2; /* TXBUF_BADR */
+ __le32 txdes0;
+ __le32 txdes1;
+ __le32 txdes2; /* TXBUF_BADR */
unsigned int txdes3; /* not used by HW */
} __attribute__ ((aligned(16)));
@@ -143,9 +143,9 @@ struct ftmac100_txdes {
* Receive descriptor, aligned to 16 bytes
*/
struct ftmac100_rxdes {
- unsigned int rxdes0;
- unsigned int rxdes1;
- unsigned int rxdes2; /* RXBUF_BADR */
+ __le32 rxdes0;
+ __le32 rxdes1;
+ __le32 rxdes2; /* RXBUF_BADR */
unsigned int rxdes3; /* not used by HW */
} __attribute__ ((aligned(16)));
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index b3939a5f7b03..ed18450fd2cc 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -1809,8 +1809,8 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
{
struct netdev_private *np = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index e04e1c5cb013..ce866ae3df03 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -9,7 +9,7 @@ config NET_VENDOR_FREESCALE
depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \
M523x || M527x || M5272 || M528x || M520x || M532x || \
ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \
- ARCH_LAYERSCAPE || COMPILE_TEST
+ ARCH_LAYERSCAPE || ARCH_S32 || COMPILE_TEST
help
If you have a network (Ethernet) card belonging to this class, say Y.
@@ -23,15 +23,16 @@ if NET_VENDOR_FREESCALE
config FEC
tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
- ARCH_MXC || SOC_IMX28 || COMPILE_TEST)
+ ARCH_MXC || ARCH_S32 || SOC_IMX28 || COMPILE_TEST)
default ARCH_MXC || SOC_IMX28 if ARM
depends on PTP_1588_CLOCK_OPTIONAL
select CRC32
select PHYLIB
+ select PAGE_POOL
imply NET_SELFTESTS
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
- controller on some Motorola ColdFire and Freescale i.MX processors.
+ controller on some Motorola ColdFire and Freescale i.MX/S32 processors.
config FEC_MPC52xx
tristate "FEC MPC52xx driver"
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index a770bab4d1ed..31cfa121333d 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -197,12 +197,15 @@ static int dpaa_rx_extra_headroom;
#define dpaa_get_max_mtu() \
(dpaa_max_frm - (VLAN_ETH_HLEN + ETH_FCS_LEN))
+static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed);
+
static int dpaa_netdev_init(struct net_device *net_dev,
const struct net_device_ops *dpaa_ops,
u16 tx_timeout)
{
struct dpaa_priv *priv = netdev_priv(net_dev);
struct device *dev = net_dev->dev.parent;
+ struct mac_device *mac_dev = priv->mac_dev;
struct dpaa_percpu_priv *percpu_priv;
const u8 *mac_addr;
int i, err;
@@ -216,10 +219,10 @@ static int dpaa_netdev_init(struct net_device *net_dev,
}
net_dev->netdev_ops = dpaa_ops;
- mac_addr = priv->mac_dev->addr;
+ mac_addr = mac_dev->addr;
- net_dev->mem_start = priv->mac_dev->res->start;
- net_dev->mem_end = priv->mac_dev->res->end;
+ net_dev->mem_start = (unsigned long)mac_dev->vaddr;
+ net_dev->mem_end = (unsigned long)mac_dev->vaddr_end;
net_dev->min_mtu = ETH_MIN_MTU;
net_dev->max_mtu = dpaa_get_max_mtu();
@@ -246,7 +249,7 @@ static int dpaa_netdev_init(struct net_device *net_dev,
eth_hw_addr_set(net_dev, mac_addr);
} else {
eth_hw_addr_random(net_dev);
- err = priv->mac_dev->change_addr(priv->mac_dev->fman_mac,
+ err = mac_dev->change_addr(mac_dev->fman_mac,
(const enet_addr_t *)net_dev->dev_addr);
if (err) {
dev_err(dev, "Failed to set random MAC address\n");
@@ -261,6 +264,9 @@ static int dpaa_netdev_init(struct net_device *net_dev,
net_dev->needed_headroom = priv->tx_headroom;
net_dev->watchdog_timeo = msecs_to_jiffies(tx_timeout);
+ mac_dev->net_dev = net_dev;
+ mac_dev->update_speed = dpaa_eth_cgr_set_speed;
+
/* start without the RUNNING flag, phylib controls it later */
netif_carrier_off(net_dev);
@@ -288,10 +294,9 @@ static int dpaa_stop(struct net_device *net_dev)
*/
msleep(200);
- err = mac_dev->stop(mac_dev);
- if (err < 0)
- netif_err(priv, ifdown, net_dev, "mac_dev->stop() = %d\n",
- err);
+ if (mac_dev->phy_dev)
+ phy_stop(mac_dev->phy_dev);
+ mac_dev->disable(mac_dev->fman_mac);
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
error = fman_port_disable(mac_dev->port[i]);
@@ -826,10 +831,10 @@ static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
initcgr.cgr.cscn_en = QM_CGR_EN;
- /* Set different thresholds based on the MAC speed.
- * This may turn suboptimal if the MAC is reconfigured at a speed
- * lower than its max, e.g. if a dTSEC later negotiates a 100Mbps link.
- * In such cases, we ought to reconfigure the threshold, too.
+ /* Set different thresholds based on the configured MAC speed.
+ * This may turn suboptimal if the MAC is reconfigured at another
+ * speed, so MACs must call dpaa_eth_cgr_set_speed in their adjust_link
+ * callback.
*/
if (priv->mac_dev->if_support & SUPPORTED_10000baseT_Full)
cs_th = DPAA_CS_THRESHOLD_10G;
@@ -858,6 +863,31 @@ out_error:
return err;
}
+static void dpaa_eth_cgr_set_speed(struct mac_device *mac_dev, int speed)
+{
+ struct net_device *net_dev = mac_dev->net_dev;
+ struct dpaa_priv *priv = netdev_priv(net_dev);
+ struct qm_mcc_initcgr opts = { };
+ u32 cs_th;
+ int err;
+
+ opts.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
+ switch (speed) {
+ case SPEED_10000:
+ cs_th = DPAA_CS_THRESHOLD_10G;
+ break;
+ case SPEED_1000:
+ default:
+ cs_th = DPAA_CS_THRESHOLD_1G;
+ break;
+ }
+ qm_cgr_cs_thres_set64(&opts.cgr.cs_thres, cs_th, 1);
+
+ err = qman_update_cgr_safe(&priv->cgr_data.cgr, &opts);
+ if (err)
+ netdev_err(net_dev, "could not update speed: %d\n", err);
+}
+
static inline void dpaa_setup_ingress(const struct dpaa_priv *priv,
struct dpaa_fq *fq,
const struct qman_fq *template)
@@ -2946,11 +2976,12 @@ static int dpaa_open(struct net_device *net_dev)
goto mac_start_failed;
}
- err = priv->mac_dev->start(mac_dev);
+ err = priv->mac_dev->enable(mac_dev->fman_mac);
if (err < 0) {
- netif_err(priv, ifup, net_dev, "mac_dev->start() = %d\n", err);
+ netif_err(priv, ifup, net_dev, "mac_dev->enable() = %d\n", err);
goto mac_start_failed;
}
+ phy_start(priv->mac_dev->phy_dev);
netif_tx_start_all_queues(net_dev);
@@ -3152,8 +3183,7 @@ static int dpaa_napi_add(struct net_device *net_dev)
for_each_possible_cpu(cpu) {
percpu_priv = per_cpu_ptr(priv->percpu_priv, cpu);
- netif_napi_add(net_dev, &percpu_priv->np.napi,
- dpaa_eth_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(net_dev, &percpu_priv->np.napi, dpaa_eth_poll);
}
return 0;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
index 4fee74c024bd..258eb6c8f4c0 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
@@ -18,7 +18,7 @@ static ssize_t dpaa_eth_show_addr(struct device *dev,
if (mac_dev)
return sprintf(buf, "%llx",
- (unsigned long long)mac_dev->res->start);
+ (unsigned long long)mac_dev->vaddr);
else
return sprintf(buf, "none");
}
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 73f07881ce2d..769e936a263c 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -80,9 +80,9 @@ static int dpaa_set_link_ksettings(struct net_device *net_dev,
static void dpaa_get_drvinfo(struct net_device *net_dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, KBUILD_MODNAME,
+ strscpy(drvinfo->driver, KBUILD_MODNAME,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
+ strscpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 75d51572693d..8d029addddad 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -4565,8 +4565,7 @@ static void dpaa2_eth_add_ch_napi(struct dpaa2_eth_priv *priv)
for (i = 0; i < priv->num_channels; i++) {
ch = priv->channel[i];
/* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */
- netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll);
}
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index c9bee9a0c9b2..49ff85633783 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -549,7 +549,7 @@ void dpaa2_mac_get_strings(u8 *data)
int i;
for (i = 0; i < DPAA2_MAC_NUM_STATS; i++) {
- strlcpy(p, dpaa2_mac_ethtool_stats[i], ETH_GSTRING_LEN);
+ strscpy(p, dpaa2_mac_ethtool_stats[i], ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
}
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index e507e9065214..2b5909fa93cf 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -3373,9 +3373,8 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev)
* different queues for each switch ports.
*/
for (i = 0; i < DPAA2_SWITCH_RX_NUM_FQS; i++)
- netif_napi_add(ethsw->ports[0]->netdev,
- ðsw->fq[i].napi, dpaa2_switch_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(ethsw->ports[0]->netdev, ðsw->fq[i].napi,
+ dpaa2_switch_poll);
/* Setup IRQs */
err = dpaa2_switch_setup_irqs(sw_dev);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 9f5b921039bd..54bc92fc6bf0 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -2116,13 +2116,14 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
static void enetc_setup_bdrs(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_setup_txbdr(&priv->si->hw, priv->tx_ring[i]);
+ enetc_setup_txbdr(hw, priv->tx_ring[i]);
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_setup_rxbdr(&priv->si->hw, priv->rx_ring[i]);
+ enetc_setup_rxbdr(hw, priv->rx_ring[i]);
}
static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring)
@@ -2155,13 +2156,14 @@ static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring)
static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_clear_txbdr(&priv->si->hw, priv->tx_ring[i]);
+ enetc_clear_txbdr(hw, priv->tx_ring[i]);
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_clear_rxbdr(&priv->si->hw, priv->rx_ring[i]);
+ enetc_clear_rxbdr(hw, priv->rx_ring[i]);
udelay(1);
}
@@ -2169,13 +2171,13 @@ static void enetc_clear_bdrs(struct enetc_ndev_priv *priv)
static int enetc_setup_irqs(struct enetc_ndev_priv *priv)
{
struct pci_dev *pdev = priv->si->pdev;
+ struct enetc_hw *hw = &priv->si->hw;
int i, j, err;
for (i = 0; i < priv->bdr_int_num; i++) {
int irq = pci_irq_vector(pdev, ENETC_BDR_INT_BASE_IDX + i);
struct enetc_int_vector *v = priv->int_vector[i];
int entry = ENETC_BDR_INT_BASE_IDX + i;
- struct enetc_hw *hw = &priv->si->hw;
snprintf(v->name, sizeof(v->name), "%s-rxtx%d",
priv->ndev->name, i);
@@ -2263,13 +2265,14 @@ static void enetc_setup_interrupts(struct enetc_ndev_priv *priv)
static void enetc_clear_interrupts(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_txbdr_wr(&priv->si->hw, i, ENETC_TBIER, 0);
+ enetc_txbdr_wr(hw, i, ENETC_TBIER, 0);
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_rxbdr_wr(&priv->si->hw, i, ENETC_RBIER, 0);
+ enetc_rxbdr_wr(hw, i, ENETC_RBIER, 0);
}
static int enetc_phylink_connect(struct net_device *ndev)
@@ -2436,6 +2439,7 @@ int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct tc_mqprio_qopt *mqprio = type_data;
+ struct enetc_hw *hw = &priv->si->hw;
struct enetc_bdr *tx_ring;
int num_stack_tx_queues;
u8 num_tc;
@@ -2452,7 +2456,7 @@ int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
/* Reset all ring priorities to 0 */
for (i = 0; i < priv->num_tx_rings; i++) {
tx_ring = priv->tx_ring[i];
- enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, 0);
+ enetc_set_bdr_prio(hw, tx_ring->index, 0);
}
return 0;
@@ -2471,7 +2475,7 @@ int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data)
*/
for (i = 0; i < num_tc; i++) {
tx_ring = priv->tx_ring[i];
- enetc_set_bdr_prio(&priv->si->hw, tx_ring->index, i);
+ enetc_set_bdr_prio(hw, tx_ring->index, i);
}
/* Reset the number of netdev queues based on the TC count */
@@ -2584,19 +2588,21 @@ static int enetc_set_rss(struct net_device *ndev, int en)
static void enetc_enable_rxvlan(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_rx_rings; i++)
- enetc_bdr_enable_rxvlan(&priv->si->hw, i, en);
+ enetc_bdr_enable_rxvlan(hw, i, en);
}
static void enetc_enable_txvlan(struct net_device *ndev, bool en)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int i;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_bdr_enable_txvlan(&priv->si->hw, i, en);
+ enetc_bdr_enable_txvlan(hw, i, en);
}
void enetc_set_features(struct net_device *ndev, netdev_features_t features)
@@ -2759,8 +2765,7 @@ int enetc_alloc_msix(struct enetc_ndev_priv *priv)
v->rx_dim_en = true;
}
INIT_WORK(&v->rx_dim.work, enetc_rx_dim_work);
- netif_napi_add(priv->ndev, &v->napi, enetc_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->ndev, &v->napi, enetc_poll);
v->count_tx_rings = v_tx_rings;
for (j = 0; j < v_tx_rings; j++) {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 2cfe6944ebd3..161930a65f61 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -453,7 +453,11 @@ static inline void enetc_cbd_free_data_mem(struct enetc_si *si, int size,
data, *dma);
}
+void enetc_reset_ptcmsdur(struct enetc_hw *hw);
+void enetc_set_ptcmsdur(struct enetc_hw *hw, u32 *queue_max_sdu);
+
#ifdef CONFIG_FSL_ENETC_QOS
+int enetc_qos_query_caps(struct net_device *ndev, void *type_data);
int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data);
void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed);
int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data);
@@ -467,19 +471,20 @@ int enetc_set_psfp(struct net_device *ndev, bool en);
static inline void enetc_get_max_cap(struct enetc_ndev_priv *priv)
{
+ struct enetc_hw *hw = &priv->si->hw;
u32 reg;
- reg = enetc_port_rd(&priv->si->hw, ENETC_PSIDCAPR);
+ reg = enetc_port_rd(hw, ENETC_PSIDCAPR);
priv->psfp_cap.max_streamid = reg & ENETC_PSIDCAPR_MSK;
/* Port stream filter capability */
- reg = enetc_port_rd(&priv->si->hw, ENETC_PSFCAPR);
+ reg = enetc_port_rd(hw, ENETC_PSFCAPR);
priv->psfp_cap.max_psfp_filter = reg & ENETC_PSFCAPR_MSK;
/* Port stream gate capability */
- reg = enetc_port_rd(&priv->si->hw, ENETC_PSGCAPR);
+ reg = enetc_port_rd(hw, ENETC_PSGCAPR);
priv->psfp_cap.max_psfp_gate = (reg & ENETC_PSGCAPR_SGIT_MSK);
priv->psfp_cap.max_psfp_gatelist = (reg & ENETC_PSGCAPR_GCL_MSK) >> 16;
/* Port flow meter capability */
- reg = enetc_port_rd(&priv->si->hw, ENETC_PFMCAPR);
+ reg = enetc_port_rd(hw, ENETC_PFMCAPR);
priv->psfp_cap.max_psfp_meter = reg & ENETC_PFMCAPR_MSK;
}
@@ -520,6 +525,7 @@ static inline int enetc_psfp_disable(struct enetc_ndev_priv *priv)
}
#else
+#define enetc_qos_query_caps(ndev, type_data) -EOPNOTSUPP
#define enetc_setup_tc_taprio(ndev, type_data) -EOPNOTSUPP
#define enetc_sched_speed_set(priv, speed) (void)0
#define enetc_setup_tc_cbs(ndev, type_data) -EOPNOTSUPP
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index ff872e40ce85..c8369e3752b0 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -125,68 +125,68 @@ static const struct {
int reg;
char name[ETH_GSTRING_LEN];
} enetc_port_counters[] = {
- { ENETC_PM0_REOCT, "MAC rx ethernet octets" },
- { ENETC_PM0_RALN, "MAC rx alignment errors" },
- { ENETC_PM0_RXPF, "MAC rx valid pause frames" },
- { ENETC_PM0_RFRM, "MAC rx valid frames" },
- { ENETC_PM0_RFCS, "MAC rx fcs errors" },
- { ENETC_PM0_RVLAN, "MAC rx VLAN frames" },
- { ENETC_PM0_RERR, "MAC rx frame errors" },
- { ENETC_PM0_RUCA, "MAC rx unicast frames" },
- { ENETC_PM0_RMCA, "MAC rx multicast frames" },
- { ENETC_PM0_RBCA, "MAC rx broadcast frames" },
- { ENETC_PM0_RDRP, "MAC rx dropped packets" },
- { ENETC_PM0_RPKT, "MAC rx packets" },
- { ENETC_PM0_RUND, "MAC rx undersized packets" },
- { ENETC_PM0_R64, "MAC rx 64 byte packets" },
- { ENETC_PM0_R127, "MAC rx 65-127 byte packets" },
- { ENETC_PM0_R255, "MAC rx 128-255 byte packets" },
- { ENETC_PM0_R511, "MAC rx 256-511 byte packets" },
- { ENETC_PM0_R1023, "MAC rx 512-1023 byte packets" },
- { ENETC_PM0_R1522, "MAC rx 1024-1522 byte packets" },
- { ENETC_PM0_R1523X, "MAC rx 1523 to max-octet packets" },
- { ENETC_PM0_ROVR, "MAC rx oversized packets" },
- { ENETC_PM0_RJBR, "MAC rx jabber packets" },
- { ENETC_PM0_RFRG, "MAC rx fragment packets" },
- { ENETC_PM0_RCNP, "MAC rx control packets" },
- { ENETC_PM0_RDRNTP, "MAC rx fifo drop" },
- { ENETC_PM0_TEOCT, "MAC tx ethernet octets" },
- { ENETC_PM0_TOCT, "MAC tx octets" },
- { ENETC_PM0_TCRSE, "MAC tx carrier sense errors" },
- { ENETC_PM0_TXPF, "MAC tx valid pause frames" },
- { ENETC_PM0_TFRM, "MAC tx frames" },
- { ENETC_PM0_TFCS, "MAC tx fcs errors" },
- { ENETC_PM0_TVLAN, "MAC tx VLAN frames" },
- { ENETC_PM0_TERR, "MAC tx frame errors" },
- { ENETC_PM0_TUCA, "MAC tx unicast frames" },
- { ENETC_PM0_TMCA, "MAC tx multicast frames" },
- { ENETC_PM0_TBCA, "MAC tx broadcast frames" },
- { ENETC_PM0_TPKT, "MAC tx packets" },
- { ENETC_PM0_TUND, "MAC tx undersized packets" },
- { ENETC_PM0_T64, "MAC tx 64 byte packets" },
- { ENETC_PM0_T127, "MAC tx 65-127 byte packets" },
- { ENETC_PM0_T255, "MAC tx 128-255 byte packets" },
- { ENETC_PM0_T511, "MAC tx 256-511 byte packets" },
- { ENETC_PM0_T1023, "MAC tx 512-1023 byte packets" },
- { ENETC_PM0_T1522, "MAC tx 1024-1522 byte packets" },
- { ENETC_PM0_T1523X, "MAC tx 1523 to max-octet packets" },
- { ENETC_PM0_TCNP, "MAC tx control packets" },
- { ENETC_PM0_TDFR, "MAC tx deferred packets" },
- { ENETC_PM0_TMCOL, "MAC tx multiple collisions" },
- { ENETC_PM0_TSCOL, "MAC tx single collisions" },
- { ENETC_PM0_TLCOL, "MAC tx late collisions" },
- { ENETC_PM0_TECOL, "MAC tx excessive collisions" },
- { ENETC_UFDMF, "SI MAC nomatch u-cast discards" },
- { ENETC_MFDMF, "SI MAC nomatch m-cast discards" },
- { ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" },
- { ENETC_PUFDVFR, "SI VLAN nomatch u-cast discards" },
- { ENETC_PMFDVFR, "SI VLAN nomatch m-cast discards" },
- { ENETC_PBFDVFR, "SI VLAN nomatch b-cast discards" },
- { ENETC_PFDMSAPR, "SI pruning discarded frames" },
- { ENETC_PICDR(0), "ICM DR0 discarded frames" },
- { ENETC_PICDR(1), "ICM DR1 discarded frames" },
- { ENETC_PICDR(2), "ICM DR2 discarded frames" },
- { ENETC_PICDR(3), "ICM DR3 discarded frames" },
+ { ENETC_PM_REOCT(0), "MAC rx ethernet octets" },
+ { ENETC_PM_RALN(0), "MAC rx alignment errors" },
+ { ENETC_PM_RXPF(0), "MAC rx valid pause frames" },
+ { ENETC_PM_RFRM(0), "MAC rx valid frames" },
+ { ENETC_PM_RFCS(0), "MAC rx fcs errors" },
+ { ENETC_PM_RVLAN(0), "MAC rx VLAN frames" },
+ { ENETC_PM_RERR(0), "MAC rx frame errors" },
+ { ENETC_PM_RUCA(0), "MAC rx unicast frames" },
+ { ENETC_PM_RMCA(0), "MAC rx multicast frames" },
+ { ENETC_PM_RBCA(0), "MAC rx broadcast frames" },
+ { ENETC_PM_RDRP(0), "MAC rx dropped packets" },
+ { ENETC_PM_RPKT(0), "MAC rx packets" },
+ { ENETC_PM_RUND(0), "MAC rx undersized packets" },
+ { ENETC_PM_R64(0), "MAC rx 64 byte packets" },
+ { ENETC_PM_R127(0), "MAC rx 65-127 byte packets" },
+ { ENETC_PM_R255(0), "MAC rx 128-255 byte packets" },
+ { ENETC_PM_R511(0), "MAC rx 256-511 byte packets" },
+ { ENETC_PM_R1023(0), "MAC rx 512-1023 byte packets" },
+ { ENETC_PM_R1522(0), "MAC rx 1024-1522 byte packets" },
+ { ENETC_PM_R1523X(0), "MAC rx 1523 to max-octet packets" },
+ { ENETC_PM_ROVR(0), "MAC rx oversized packets" },
+ { ENETC_PM_RJBR(0), "MAC rx jabber packets" },
+ { ENETC_PM_RFRG(0), "MAC rx fragment packets" },
+ { ENETC_PM_RCNP(0), "MAC rx control packets" },
+ { ENETC_PM_RDRNTP(0), "MAC rx fifo drop" },
+ { ENETC_PM_TEOCT(0), "MAC tx ethernet octets" },
+ { ENETC_PM_TOCT(0), "MAC tx octets" },
+ { ENETC_PM_TCRSE(0), "MAC tx carrier sense errors" },
+ { ENETC_PM_TXPF(0), "MAC tx valid pause frames" },
+ { ENETC_PM_TFRM(0), "MAC tx frames" },
+ { ENETC_PM_TFCS(0), "MAC tx fcs errors" },
+ { ENETC_PM_TVLAN(0), "MAC tx VLAN frames" },
+ { ENETC_PM_TERR(0), "MAC tx frame errors" },
+ { ENETC_PM_TUCA(0), "MAC tx unicast frames" },
+ { ENETC_PM_TMCA(0), "MAC tx multicast frames" },
+ { ENETC_PM_TBCA(0), "MAC tx broadcast frames" },
+ { ENETC_PM_TPKT(0), "MAC tx packets" },
+ { ENETC_PM_TUND(0), "MAC tx undersized packets" },
+ { ENETC_PM_T64(0), "MAC tx 64 byte packets" },
+ { ENETC_PM_T127(0), "MAC tx 65-127 byte packets" },
+ { ENETC_PM_T255(0), "MAC tx 128-255 byte packets" },
+ { ENETC_PM_T511(0), "MAC tx 256-511 byte packets" },
+ { ENETC_PM_T1023(0), "MAC tx 512-1023 byte packets" },
+ { ENETC_PM_T1522(0), "MAC tx 1024-1522 byte packets" },
+ { ENETC_PM_T1523X(0), "MAC tx 1523 to max-octet packets" },
+ { ENETC_PM_TCNP(0), "MAC tx control packets" },
+ { ENETC_PM_TDFR(0), "MAC tx deferred packets" },
+ { ENETC_PM_TMCOL(0), "MAC tx multiple collisions" },
+ { ENETC_PM_TSCOL(0), "MAC tx single collisions" },
+ { ENETC_PM_TLCOL(0), "MAC tx late collisions" },
+ { ENETC_PM_TECOL(0), "MAC tx excessive collisions" },
+ { ENETC_UFDMF, "SI MAC nomatch u-cast discards" },
+ { ENETC_MFDMF, "SI MAC nomatch m-cast discards" },
+ { ENETC_PBFDSIR, "SI MAC nomatch b-cast discards" },
+ { ENETC_PUFDVFR, "SI VLAN nomatch u-cast discards" },
+ { ENETC_PMFDVFR, "SI VLAN nomatch m-cast discards" },
+ { ENETC_PBFDVFR, "SI VLAN nomatch b-cast discards" },
+ { ENETC_PFDMSAPR, "SI pruning discarded frames" },
+ { ENETC_PICDR(0), "ICM DR0 discarded frames" },
+ { ENETC_PICDR(1), "ICM DR1 discarded frames" },
+ { ENETC_PICDR(2), "ICM DR2 discarded frames" },
+ { ENETC_PICDR(3), "ICM DR3 discarded frames" },
};
static const char rx_ring_stats[][ETH_GSTRING_LEN] = {
@@ -236,7 +236,7 @@ static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(enetc_si_counters); i++) {
- strlcpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN);
+ strscpy(p, enetc_si_counters[i].name, ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < priv->num_tx_rings; i++) {
@@ -258,7 +258,7 @@ static void enetc_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
break;
for (i = 0; i < ARRAY_SIZE(enetc_port_counters); i++) {
- strlcpy(p, enetc_port_counters[i].name,
+ strscpy(p, enetc_port_counters[i].name,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
@@ -301,6 +301,113 @@ static void enetc_get_ethtool_stats(struct net_device *ndev,
data[o++] = enetc_port_rd(hw, enetc_port_counters[i].reg);
}
+static void enetc_get_pause_stats(struct net_device *ndev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ pause_stats->tx_pause_frames = enetc_port_rd(hw, ENETC_PM_TXPF(0));
+ pause_stats->rx_pause_frames = enetc_port_rd(hw, ENETC_PM_RXPF(0));
+}
+
+static void enetc_mac_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_eth_mac_stats *s)
+{
+ s->FramesTransmittedOK = enetc_port_rd(hw, ENETC_PM_TFRM(mac));
+ s->SingleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TSCOL(mac));
+ s->MultipleCollisionFrames = enetc_port_rd(hw, ENETC_PM_TMCOL(mac));
+ s->FramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RFRM(mac));
+ s->FrameCheckSequenceErrors = enetc_port_rd(hw, ENETC_PM_RFCS(mac));
+ s->AlignmentErrors = enetc_port_rd(hw, ENETC_PM_RALN(mac));
+ s->OctetsTransmittedOK = enetc_port_rd(hw, ENETC_PM_TEOCT(mac));
+ s->FramesWithDeferredXmissions = enetc_port_rd(hw, ENETC_PM_TDFR(mac));
+ s->LateCollisions = enetc_port_rd(hw, ENETC_PM_TLCOL(mac));
+ s->FramesAbortedDueToXSColls = enetc_port_rd(hw, ENETC_PM_TECOL(mac));
+ s->FramesLostDueToIntMACXmitError = enetc_port_rd(hw, ENETC_PM_TERR(mac));
+ s->CarrierSenseErrors = enetc_port_rd(hw, ENETC_PM_TCRSE(mac));
+ s->OctetsReceivedOK = enetc_port_rd(hw, ENETC_PM_REOCT(mac));
+ s->FramesLostDueToIntMACRcvError = enetc_port_rd(hw, ENETC_PM_RDRNTP(mac));
+ s->MulticastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TMCA(mac));
+ s->BroadcastFramesXmittedOK = enetc_port_rd(hw, ENETC_PM_TBCA(mac));
+ s->MulticastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RMCA(mac));
+ s->BroadcastFramesReceivedOK = enetc_port_rd(hw, ENETC_PM_RBCA(mac));
+}
+
+static void enetc_ctrl_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_eth_ctrl_stats *s)
+{
+ s->MACControlFramesTransmitted = enetc_port_rd(hw, ENETC_PM_TCNP(mac));
+ s->MACControlFramesReceived = enetc_port_rd(hw, ENETC_PM_RCNP(mac));
+}
+
+static const struct ethtool_rmon_hist_range enetc_rmon_ranges[] = {
+ { 64, 64 },
+ { 65, 127 },
+ { 128, 255 },
+ { 256, 511 },
+ { 512, 1023 },
+ { 1024, 1522 },
+ { 1523, ENETC_MAC_MAXFRM_SIZE },
+ {},
+};
+
+static void enetc_rmon_stats(struct enetc_hw *hw, int mac,
+ struct ethtool_rmon_stats *s,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ s->undersize_pkts = enetc_port_rd(hw, ENETC_PM_RUND(mac));
+ s->oversize_pkts = enetc_port_rd(hw, ENETC_PM_ROVR(mac));
+ s->fragments = enetc_port_rd(hw, ENETC_PM_RFRG(mac));
+ s->jabbers = enetc_port_rd(hw, ENETC_PM_RJBR(mac));
+
+ s->hist[0] = enetc_port_rd(hw, ENETC_PM_R64(mac));
+ s->hist[1] = enetc_port_rd(hw, ENETC_PM_R127(mac));
+ s->hist[2] = enetc_port_rd(hw, ENETC_PM_R255(mac));
+ s->hist[3] = enetc_port_rd(hw, ENETC_PM_R511(mac));
+ s->hist[4] = enetc_port_rd(hw, ENETC_PM_R1023(mac));
+ s->hist[5] = enetc_port_rd(hw, ENETC_PM_R1522(mac));
+ s->hist[6] = enetc_port_rd(hw, ENETC_PM_R1523X(mac));
+
+ s->hist_tx[0] = enetc_port_rd(hw, ENETC_PM_T64(mac));
+ s->hist_tx[1] = enetc_port_rd(hw, ENETC_PM_T127(mac));
+ s->hist_tx[2] = enetc_port_rd(hw, ENETC_PM_T255(mac));
+ s->hist_tx[3] = enetc_port_rd(hw, ENETC_PM_T511(mac));
+ s->hist_tx[4] = enetc_port_rd(hw, ENETC_PM_T1023(mac));
+ s->hist_tx[5] = enetc_port_rd(hw, ENETC_PM_T1522(mac));
+ s->hist_tx[6] = enetc_port_rd(hw, ENETC_PM_T1523X(mac));
+
+ *ranges = enetc_rmon_ranges;
+}
+
+static void enetc_get_eth_mac_stats(struct net_device *ndev,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ enetc_mac_stats(hw, 0, mac_stats);
+}
+
+static void enetc_get_eth_ctrl_stats(struct net_device *ndev,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ enetc_ctrl_stats(hw, 0, ctrl_stats);
+}
+
+static void enetc_get_rmon_stats(struct net_device *ndev,
+ struct ethtool_rmon_stats *rmon_stats,
+ const struct ethtool_rmon_hist_range **ranges)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
+
+ enetc_rmon_stats(hw, 0, rmon_stats, ranges);
+}
+
#define ENETC_RSSHASH_L3 (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO | RXH_IP_SRC | \
RXH_IP_DST)
#define ENETC_RSSHASH_L4 (ENETC_RSSHASH_L3 | RXH_L4_B_0_1 | RXH_L4_B_2_3)
@@ -766,6 +873,10 @@ static const struct ethtool_ops enetc_pf_ethtool_ops = {
.get_sset_count = enetc_get_sset_count,
.get_strings = enetc_get_strings,
.get_ethtool_stats = enetc_get_ethtool_stats,
+ .get_pause_stats = enetc_get_pause_stats,
+ .get_rmon_stats = enetc_get_rmon_stats,
+ .get_eth_ctrl_stats = enetc_get_eth_ctrl_stats,
+ .get_eth_mac_stats = enetc_get_eth_mac_stats,
.get_rxnfc = enetc_get_rxnfc,
.set_rxnfc = enetc_set_rxnfc,
.get_rxfh_key_size = enetc_get_rxfh_key_size,
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 647c87f73bf7..18ca1f42b1f7 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -276,58 +276,60 @@ enum enetc_bdr_type {TX, RX};
#define ENETC_PFMCAPR 0x1b38
#define ENETC_PFMCAPR_MSK GENMASK(15, 0)
-/* MAC counters */
-#define ENETC_PM0_REOCT 0x8100
-#define ENETC_PM0_RALN 0x8110
-#define ENETC_PM0_RXPF 0x8118
-#define ENETC_PM0_RFRM 0x8120
-#define ENETC_PM0_RFCS 0x8128
-#define ENETC_PM0_RVLAN 0x8130
-#define ENETC_PM0_RERR 0x8138
-#define ENETC_PM0_RUCA 0x8140
-#define ENETC_PM0_RMCA 0x8148
-#define ENETC_PM0_RBCA 0x8150
-#define ENETC_PM0_RDRP 0x8158
-#define ENETC_PM0_RPKT 0x8160
-#define ENETC_PM0_RUND 0x8168
-#define ENETC_PM0_R64 0x8170
-#define ENETC_PM0_R127 0x8178
-#define ENETC_PM0_R255 0x8180
-#define ENETC_PM0_R511 0x8188
-#define ENETC_PM0_R1023 0x8190
-#define ENETC_PM0_R1522 0x8198
-#define ENETC_PM0_R1523X 0x81A0
-#define ENETC_PM0_ROVR 0x81A8
-#define ENETC_PM0_RJBR 0x81B0
-#define ENETC_PM0_RFRG 0x81B8
-#define ENETC_PM0_RCNP 0x81C0
-#define ENETC_PM0_RDRNTP 0x81C8
-#define ENETC_PM0_TEOCT 0x8200
-#define ENETC_PM0_TOCT 0x8208
-#define ENETC_PM0_TCRSE 0x8210
-#define ENETC_PM0_TXPF 0x8218
-#define ENETC_PM0_TFRM 0x8220
-#define ENETC_PM0_TFCS 0x8228
-#define ENETC_PM0_TVLAN 0x8230
-#define ENETC_PM0_TERR 0x8238
-#define ENETC_PM0_TUCA 0x8240
-#define ENETC_PM0_TMCA 0x8248
-#define ENETC_PM0_TBCA 0x8250
-#define ENETC_PM0_TPKT 0x8260
-#define ENETC_PM0_TUND 0x8268
-#define ENETC_PM0_T64 0x8270
-#define ENETC_PM0_T127 0x8278
-#define ENETC_PM0_T255 0x8280
-#define ENETC_PM0_T511 0x8288
-#define ENETC_PM0_T1023 0x8290
-#define ENETC_PM0_T1522 0x8298
-#define ENETC_PM0_T1523X 0x82A0
-#define ENETC_PM0_TCNP 0x82C0
-#define ENETC_PM0_TDFR 0x82D0
-#define ENETC_PM0_TMCOL 0x82D8
-#define ENETC_PM0_TSCOL 0x82E0
-#define ENETC_PM0_TLCOL 0x82E8
-#define ENETC_PM0_TECOL 0x82F0
+/* Port MAC counters: Port MAC 0 corresponds to the eMAC and
+ * Port MAC 1 to the pMAC.
+ */
+#define ENETC_PM_REOCT(mac) (0x8100 + 0x1000 * (mac))
+#define ENETC_PM_RALN(mac) (0x8110 + 0x1000 * (mac))
+#define ENETC_PM_RXPF(mac) (0x8118 + 0x1000 * (mac))
+#define ENETC_PM_RFRM(mac) (0x8120 + 0x1000 * (mac))
+#define ENETC_PM_RFCS(mac) (0x8128 + 0x1000 * (mac))
+#define ENETC_PM_RVLAN(mac) (0x8130 + 0x1000 * (mac))
+#define ENETC_PM_RERR(mac) (0x8138 + 0x1000 * (mac))
+#define ENETC_PM_RUCA(mac) (0x8140 + 0x1000 * (mac))
+#define ENETC_PM_RMCA(mac) (0x8148 + 0x1000 * (mac))
+#define ENETC_PM_RBCA(mac) (0x8150 + 0x1000 * (mac))
+#define ENETC_PM_RDRP(mac) (0x8158 + 0x1000 * (mac))
+#define ENETC_PM_RPKT(mac) (0x8160 + 0x1000 * (mac))
+#define ENETC_PM_RUND(mac) (0x8168 + 0x1000 * (mac))
+#define ENETC_PM_R64(mac) (0x8170 + 0x1000 * (mac))
+#define ENETC_PM_R127(mac) (0x8178 + 0x1000 * (mac))
+#define ENETC_PM_R255(mac) (0x8180 + 0x1000 * (mac))
+#define ENETC_PM_R511(mac) (0x8188 + 0x1000 * (mac))
+#define ENETC_PM_R1023(mac) (0x8190 + 0x1000 * (mac))
+#define ENETC_PM_R1522(mac) (0x8198 + 0x1000 * (mac))
+#define ENETC_PM_R1523X(mac) (0x81A0 + 0x1000 * (mac))
+#define ENETC_PM_ROVR(mac) (0x81A8 + 0x1000 * (mac))
+#define ENETC_PM_RJBR(mac) (0x81B0 + 0x1000 * (mac))
+#define ENETC_PM_RFRG(mac) (0x81B8 + 0x1000 * (mac))
+#define ENETC_PM_RCNP(mac) (0x81C0 + 0x1000 * (mac))
+#define ENETC_PM_RDRNTP(mac) (0x81C8 + 0x1000 * (mac))
+#define ENETC_PM_TEOCT(mac) (0x8200 + 0x1000 * (mac))
+#define ENETC_PM_TOCT(mac) (0x8208 + 0x1000 * (mac))
+#define ENETC_PM_TCRSE(mac) (0x8210 + 0x1000 * (mac))
+#define ENETC_PM_TXPF(mac) (0x8218 + 0x1000 * (mac))
+#define ENETC_PM_TFRM(mac) (0x8220 + 0x1000 * (mac))
+#define ENETC_PM_TFCS(mac) (0x8228 + 0x1000 * (mac))
+#define ENETC_PM_TVLAN(mac) (0x8230 + 0x1000 * (mac))
+#define ENETC_PM_TERR(mac) (0x8238 + 0x1000 * (mac))
+#define ENETC_PM_TUCA(mac) (0x8240 + 0x1000 * (mac))
+#define ENETC_PM_TMCA(mac) (0x8248 + 0x1000 * (mac))
+#define ENETC_PM_TBCA(mac) (0x8250 + 0x1000 * (mac))
+#define ENETC_PM_TPKT(mac) (0x8260 + 0x1000 * (mac))
+#define ENETC_PM_TUND(mac) (0x8268 + 0x1000 * (mac))
+#define ENETC_PM_T64(mac) (0x8270 + 0x1000 * (mac))
+#define ENETC_PM_T127(mac) (0x8278 + 0x1000 * (mac))
+#define ENETC_PM_T255(mac) (0x8280 + 0x1000 * (mac))
+#define ENETC_PM_T511(mac) (0x8288 + 0x1000 * (mac))
+#define ENETC_PM_T1023(mac) (0x8290 + 0x1000 * (mac))
+#define ENETC_PM_T1522(mac) (0x8298 + 0x1000 * (mac))
+#define ENETC_PM_T1523X(mac) (0x82A0 + 0x1000 * (mac))
+#define ENETC_PM_TCNP(mac) (0x82C0 + 0x1000 * (mac))
+#define ENETC_PM_TDFR(mac) (0x82D0 + 0x1000 * (mac))
+#define ENETC_PM_TMCOL(mac) (0x82D8 + 0x1000 * (mac))
+#define ENETC_PM_TSCOL(mac) (0x82E0 + 0x1000 * (mac))
+#define ENETC_PM_TLCOL(mac) (0x82E8 + 0x1000 * (mac))
+#define ENETC_PM_TECOL(mac) (0x82F0 + 0x1000 * (mac))
/* Port counters */
#define ENETC_PICDR(n) (0x0700 + (n) * 8) /* n = [0..3] */
@@ -943,13 +945,13 @@ static inline u32 enetc_usecs_to_cycles(u32 usecs)
}
/* port time gating control register */
-#define ENETC_QBV_PTGCR_OFFSET 0x11a00
-#define ENETC_QBV_TGE BIT(31)
-#define ENETC_QBV_TGPE BIT(30)
+#define ENETC_PTGCR 0x11a00
+#define ENETC_PTGCR_TGE BIT(31)
+#define ENETC_PTGCR_TGPE BIT(30)
/* Port time gating capability register */
-#define ENETC_QBV_PTGCAPR_OFFSET 0x11a08
-#define ENETC_QBV_MAX_GCL_LEN_MASK GENMASK(15, 0)
+#define ENETC_PTGCAPR 0x11a08
+#define ENETC_PTGCAPR_MAX_GCL_LEN_MASK GENMASK(15, 0)
/* Port time specific departure */
#define ENETC_PTCTSDR(n) (0x1210 + 4 * (n))
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index bb7750222691..bdf94335ee99 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -516,15 +516,34 @@ static void enetc_port_si_configure(struct enetc_si *si)
enetc_port_wr(hw, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS);
}
-static void enetc_configure_port_mac(struct enetc_hw *hw)
+void enetc_set_ptcmsdur(struct enetc_hw *hw, u32 *max_sdu)
{
int tc;
- enetc_port_wr(hw, ENETC_PM0_MAXFRM,
- ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
+ for (tc = 0; tc < 8; tc++) {
+ u32 val = ENETC_MAC_MAXFRM_SIZE;
+
+ if (max_sdu[tc])
+ val = max_sdu[tc] + VLAN_ETH_HLEN;
+
+ enetc_port_wr(hw, ENETC_PTCMSDUR(tc), val);
+ }
+}
+
+void enetc_reset_ptcmsdur(struct enetc_hw *hw)
+{
+ int tc;
for (tc = 0; tc < 8; tc++)
enetc_port_wr(hw, ENETC_PTCMSDUR(tc), ENETC_MAC_MAXFRM_SIZE);
+}
+
+static void enetc_configure_port_mac(struct enetc_hw *hw)
+{
+ enetc_port_wr(hw, ENETC_PM0_MAXFRM,
+ ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
+
+ enetc_reset_ptcmsdur(hw);
enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
@@ -738,6 +757,8 @@ static int enetc_pf_setup_tc(struct net_device *ndev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
+ case TC_QUERY_CAPS:
+ return enetc_qos_query_caps(ndev, type_data);
case TC_SETUP_QDISC_MQPRIO:
return enetc_setup_tc_mqprio(ndev, type_data);
case TC_SETUP_QDISC_TAPRIO:
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index f8a2f02ce22d..e6416332ec79 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -7,18 +7,19 @@
#include
#include
#include
+#include
#include
static u16 enetc_get_max_gcl_len(struct enetc_hw *hw)
{
- return enetc_rd(hw, ENETC_QBV_PTGCAPR_OFFSET)
- & ENETC_QBV_MAX_GCL_LEN_MASK;
+ return enetc_rd(hw, ENETC_PTGCAPR) & ENETC_PTGCAPR_MAX_GCL_LEN_MASK;
}
void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed)
{
+ struct enetc_hw *hw = &priv->si->hw;
u32 old_speed = priv->speed;
- u32 pspeed;
+ u32 pspeed, tmp;
if (speed == old_speed)
return;
@@ -39,16 +40,15 @@ void enetc_sched_speed_set(struct enetc_ndev_priv *priv, int speed)
}
priv->speed = speed;
- enetc_port_wr(&priv->si->hw, ENETC_PMR,
- (enetc_port_rd(&priv->si->hw, ENETC_PMR)
- & (~ENETC_PMR_PSPEED_MASK))
- | pspeed);
+ tmp = enetc_port_rd(hw, ENETC_PMR);
+ enetc_port_wr(hw, ENETC_PMR, (tmp & ~ENETC_PMR_PSPEED_MASK) | pspeed);
}
static int enetc_setup_taprio(struct net_device *ndev,
struct tc_taprio_qopt_offload *admin_conf)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
struct enetc_cbd cbd = {.cmd = 0};
struct tgs_gcl_conf *gcl_config;
struct tgs_gcl_data *gcl_data;
@@ -61,15 +61,14 @@ static int enetc_setup_taprio(struct net_device *ndev,
int err;
int i;
- if (admin_conf->num_entries > enetc_get_max_gcl_len(&priv->si->hw))
+ if (admin_conf->num_entries > enetc_get_max_gcl_len(hw))
return -EINVAL;
gcl_len = admin_conf->num_entries;
- tge = enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET);
+ tge = enetc_rd(hw, ENETC_PTGCR);
if (!admin_conf->enable) {
- enetc_wr(&priv->si->hw,
- ENETC_QBV_PTGCR_OFFSET,
- tge & (~ENETC_QBV_TGE));
+ enetc_wr(hw, ENETC_PTGCR, tge & ~ENETC_PTGCR_TGE);
+ enetc_reset_ptcmsdur(hw);
priv->active_offloads &= ~ENETC_F_QBV;
@@ -117,27 +116,28 @@ static int enetc_setup_taprio(struct net_device *ndev,
cbd.cls = BDCR_CMD_PORT_GCL;
cbd.status_flags = 0;
- enetc_wr(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET,
- tge | ENETC_QBV_TGE);
+ enetc_wr(hw, ENETC_PTGCR, tge | ENETC_PTGCR_TGE);
err = enetc_send_cmd(priv->si, &cbd);
if (err)
- enetc_wr(&priv->si->hw,
- ENETC_QBV_PTGCR_OFFSET,
- tge & (~ENETC_QBV_TGE));
+ enetc_wr(hw, ENETC_PTGCR, tge & ~ENETC_PTGCR_TGE);
enetc_cbd_free_data_mem(priv->si, data_size, tmp, &dma);
- if (!err)
- priv->active_offloads |= ENETC_F_QBV;
+ if (err)
+ return err;
- return err;
+ enetc_set_ptcmsdur(hw, admin_conf->max_sdu);
+ priv->active_offloads |= ENETC_F_QBV;
+
+ return 0;
}
int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
{
struct tc_taprio_qopt_offload *taprio = type_data;
struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int err;
int i;
@@ -147,16 +147,14 @@ int enetc_setup_tc_taprio(struct net_device *ndev, void *type_data)
return -EBUSY;
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_set_bdr_prio(&priv->si->hw,
- priv->tx_ring[i]->index,
+ enetc_set_bdr_prio(hw, priv->tx_ring[i]->index,
taprio->enable ? i : 0);
err = enetc_setup_taprio(ndev, taprio);
if (err)
for (i = 0; i < priv->num_tx_rings; i++)
- enetc_set_bdr_prio(&priv->si->hw,
- priv->tx_ring[i]->index,
+ enetc_set_bdr_prio(hw, priv->tx_ring[i]->index,
taprio->enable ? 0 : i);
return err;
@@ -178,7 +176,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
struct tc_cbs_qopt_offload *cbs = type_data;
u32 port_transmit_rate = priv->speed;
u8 tc_nums = netdev_get_num_tc(ndev);
- struct enetc_si *si = priv->si;
+ struct enetc_hw *hw = &priv->si->hw;
u32 hi_credit_bit, hi_credit_reg;
u32 max_interference_size;
u32 port_frame_max_size;
@@ -199,15 +197,15 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
* lower than this TC have been disabled.
*/
if (tc == prio_top &&
- enetc_get_cbs_enable(&si->hw, prio_next)) {
+ enetc_get_cbs_enable(hw, prio_next)) {
dev_err(&ndev->dev,
"Disable TC%d before disable TC%d\n",
prio_next, tc);
return -EINVAL;
}
- enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), 0);
- enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), 0);
+ enetc_port_wr(hw, ENETC_PTCCBSR1(tc), 0);
+ enetc_port_wr(hw, ENETC_PTCCBSR0(tc), 0);
return 0;
}
@@ -224,13 +222,13 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
* higher than this TC have been enabled.
*/
if (tc == prio_next) {
- if (!enetc_get_cbs_enable(&si->hw, prio_top)) {
+ if (!enetc_get_cbs_enable(hw, prio_top)) {
dev_err(&ndev->dev,
"Enable TC%d first before enable TC%d\n",
prio_top, prio_next);
return -EINVAL;
}
- bw_sum += enetc_get_cbs_bw(&si->hw, prio_top);
+ bw_sum += enetc_get_cbs_bw(hw, prio_top);
}
if (bw_sum + bw >= 100) {
@@ -239,7 +237,7 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
return -EINVAL;
}
- enetc_port_rd(&si->hw, ENETC_PTCMSDUR(tc));
+ enetc_port_rd(hw, ENETC_PTCMSDUR(tc));
/* For top prio TC, the max_interfrence_size is maxSizedFrame.
*
@@ -259,8 +257,8 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
u32 m0, ma, r0, ra;
m0 = port_frame_max_size * 8;
- ma = enetc_port_rd(&si->hw, ENETC_PTCMSDUR(prio_top)) * 8;
- ra = enetc_get_cbs_bw(&si->hw, prio_top) *
+ ma = enetc_port_rd(hw, ENETC_PTCMSDUR(prio_top)) * 8;
+ ra = enetc_get_cbs_bw(hw, prio_top) *
port_transmit_rate * 10000ULL;
r0 = port_transmit_rate * 1000000ULL;
max_interference_size = m0 + ma +
@@ -280,10 +278,10 @@ int enetc_setup_tc_cbs(struct net_device *ndev, void *type_data)
hi_credit_reg = (u32)div_u64((ENETC_CLK * 100ULL) * hi_credit_bit,
port_transmit_rate * 1000000ULL);
- enetc_port_wr(&si->hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
+ enetc_port_wr(hw, ENETC_PTCCBSR1(tc), hi_credit_reg);
/* Set bw register and enable this traffic class */
- enetc_port_wr(&si->hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE);
+ enetc_port_wr(hw, ENETC_PTCCBSR0(tc), bw | ENETC_CBSE);
return 0;
}
@@ -293,6 +291,7 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct tc_etf_qopt_offload *qopt = type_data;
u8 tc_nums = netdev_get_num_tc(ndev);
+ struct enetc_hw *hw = &priv->si->hw;
int tc;
if (!tc_nums)
@@ -304,12 +303,11 @@ int enetc_setup_tc_txtime(struct net_device *ndev, void *type_data)
return -EINVAL;
/* TSD and Qbv are mutually exclusive in hardware */
- if (enetc_rd(&priv->si->hw, ENETC_QBV_PTGCR_OFFSET) & ENETC_QBV_TGE)
+ if (enetc_rd(hw, ENETC_PTGCR) & ENETC_PTGCR_TGE)
return -EBUSY;
priv->tx_ring[tc]->tsd_enable = qopt->enable;
- enetc_port_wr(&priv->si->hw, ENETC_PTCTSDR(tc),
- qopt->enable ? ENETC_TSDE : 0);
+ enetc_port_wr(hw, ENETC_PTCTSDR(tc), qopt->enable ? ENETC_TSDE : 0);
return 0;
}
@@ -1601,3 +1599,23 @@ int enetc_setup_tc_psfp(struct net_device *ndev, void *type_data)
return 0;
}
+
+int enetc_qos_query_caps(struct net_device *ndev, void *type_data)
+{
+ struct enetc_ndev_priv *priv = netdev_priv(ndev);
+ struct tc_query_caps_base *base = type_data;
+ struct enetc_si *si = priv->si;
+
+ switch (base->type) {
+ case TC_SETUP_QDISC_TAPRIO: {
+ struct tc_taprio_caps *caps = base->caps;
+
+ if (si->hw_features & ENETC_SI_F_QBV)
+ caps->supports_queue_max_sdu = true;
+
+ return 0;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index a5fed00cb971..33f84a30e167 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -17,8 +17,11 @@
#include
#include
#include
+#include
#include
#include
+#include
+#include
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
@@ -344,8 +347,11 @@ struct bufdesc_ex {
* the skbuffer directly.
*/
+#define FEC_ENET_XDP_HEADROOM (XDP_PACKET_HEADROOM)
+
#define FEC_ENET_RX_PAGES 256
-#define FEC_ENET_RX_FRSIZE 2048
+#define FEC_ENET_RX_FRSIZE (PAGE_SIZE - FEC_ENET_XDP_HEADROOM \
+ - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
#define FEC_ENET_TX_FRSIZE 2048
@@ -515,6 +521,12 @@ struct bufdesc_prop {
unsigned char dsize_log2;
};
+struct fec_enet_priv_txrx_info {
+ int offset;
+ struct page *page;
+ struct sk_buff *skb;
+};
+
struct fec_enet_priv_tx_q {
struct bufdesc_prop bd;
unsigned char *tx_bounce[TX_RING_SIZE];
@@ -530,7 +542,14 @@ struct fec_enet_priv_tx_q {
struct fec_enet_priv_rx_q {
struct bufdesc_prop bd;
- struct sk_buff *rx_skbuff[RX_RING_SIZE];
+ struct fec_enet_priv_txrx_info rx_skb_info[RX_RING_SIZE];
+
+ /* page_pool */
+ struct page_pool *page_pool;
+ struct xdp_rxq_info xdp_rxq;
+
+ /* rx queue number, in the range 0-7 */
+ u8 id;
};
struct fec_stop_mode_gpr {
@@ -583,6 +602,7 @@ struct fec_enet_private {
struct device_node *phy_node;
bool rgmii_txc_dly;
bool rgmii_rxc_dly;
+ bool rpm_active;
int link;
int full_duplex;
int speed;
@@ -639,6 +659,8 @@ struct fec_enet_private {
int pps_enable;
unsigned int next_counter;
+ struct imx_sc_ipc *ipc_handle;
+
u64 ethtool_stats[];
};
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 92c55e1a5507..98d5cd313fdd 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -66,6 +66,8 @@
#include
#include
#include
+#include
+#include
#include
@@ -156,6 +158,13 @@ static const struct fec_devinfo fec_imx8qm_info = {
FEC_QUIRK_DELAYED_CLKS_SUPPORT,
};
+static const struct fec_devinfo fec_s32v234_info = {
+ .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+ FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+ FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+ FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE,
+};
+
static struct platform_device_id fec_devtype[] = {
{
/* keep it for coldfire */
@@ -188,6 +197,9 @@ static struct platform_device_id fec_devtype[] = {
}, {
.name = "imx8qm-fec",
.driver_data = (kernel_ulong_t)&fec_imx8qm_info,
+ }, {
+ .name = "s32v234-fec",
+ .driver_data = (kernel_ulong_t)&fec_s32v234_info,
}, {
/* sentinel */
}
@@ -204,6 +216,7 @@ enum imx_fec_type {
IMX6UL_FEC,
IMX8MQ_FEC,
IMX8QM_FEC,
+ S32V234_FEC,
};
static const struct of_device_id fec_dt_ids[] = {
@@ -216,6 +229,7 @@ static const struct of_device_id fec_dt_ids[] = {
{ .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
{ .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], },
{ .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], },
+ { .compatible = "fsl,s32v234-fec", .data = &fec_devtype[S32V234_FEC], },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fec_dt_ids);
@@ -410,6 +424,48 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
return 0;
}
+static int
+fec_enet_create_page_pool(struct fec_enet_private *fep,
+ struct fec_enet_priv_rx_q *rxq, int size)
+{
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = size,
+ .nid = dev_to_node(&fep->pdev->dev),
+ .dev = &fep->pdev->dev,
+ .dma_dir = DMA_FROM_DEVICE,
+ .offset = FEC_ENET_XDP_HEADROOM,
+ .max_len = FEC_ENET_RX_FRSIZE,
+ };
+ int err;
+
+ rxq->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rxq->page_pool)) {
+ err = PTR_ERR(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return err;
+ }
+
+ err = xdp_rxq_info_reg(&rxq->xdp_rxq, fep->netdev, rxq->id, 0);
+ if (err < 0)
+ goto err_free_pp;
+
+ err = xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
+ rxq->page_pool);
+ if (err)
+ goto err_unregister_rxq;
+
+ return 0;
+
+err_unregister_rxq:
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+err_free_pp:
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
+ return err;
+}
+
static struct bufdesc *
fec_enet_txq_submit_frag_skb(struct fec_enet_priv_tx_q *txq,
struct sk_buff *skb,
@@ -1168,6 +1224,34 @@ fec_restart(struct net_device *ndev)
}
+static int fec_enet_ipc_handle_init(struct fec_enet_private *fep)
+{
+ if (!(of_machine_is_compatible("fsl,imx8qm") ||
+ of_machine_is_compatible("fsl,imx8qxp") ||
+ of_machine_is_compatible("fsl,imx8dxl")))
+ return 0;
+
+ return imx_scu_get_handle(&fep->ipc_handle);
+}
+
+static void fec_enet_ipg_stop_set(struct fec_enet_private *fep, bool enabled)
+{
+ struct device_node *np = fep->pdev->dev.of_node;
+ u32 rsrc_id, val;
+ int idx;
+
+ if (!np || !fep->ipc_handle)
+ return;
+
+ idx = of_alias_get_id(np, "ethernet");
+ if (idx < 0)
+ idx = 0;
+ rsrc_id = idx ? IMX_SC_R_ENET_1 : IMX_SC_R_ENET_0;
+
+ val = enabled ? 1 : 0;
+ imx_sc_misc_set_control(fep->ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val);
+}
+
static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
{
struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
@@ -1183,6 +1267,8 @@ static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
BIT(stop_gpr->bit), 0);
} else if (pdata && pdata->sleep_mode_enable) {
pdata->sleep_mode_enable(enabled);
+ } else {
+ fec_enet_ipg_stop_set(fep, enabled);
}
}
@@ -1408,7 +1494,7 @@ static void fec_enet_tx(struct net_device *ndev)
fec_enet_tx_queue(ndev, i);
}
-static int
+static int __maybe_unused
fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff *skb)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1428,8 +1514,9 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
return 0;
}
-static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
- struct bufdesc *bdp, u32 length, bool swap)
+static bool __maybe_unused
+fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
+ struct bufdesc *bdp, u32 length, bool swap)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct sk_buff *new_skb;
@@ -1454,6 +1541,21 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
return true;
}
+static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq,
+ struct bufdesc *bdp, int index)
+{
+ struct page *new_page;
+ dma_addr_t phys_addr;
+
+ new_page = page_pool_dev_alloc_pages(rxq->page_pool);
+ WARN_ON(!new_page);
+ rxq->rx_skb_info[index].page = new_page;
+
+ rxq->rx_skb_info[index].offset = FEC_ENET_XDP_HEADROOM;
+ phys_addr = page_pool_get_dma_addr(new_page) + FEC_ENET_XDP_HEADROOM;
+ bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
+}
+
/* During a receive, the bd_rx.cur points to the current incoming buffer.
* When we update through the ring, if the next incoming buffer has
* not been given to the system, we just set the empty indicator,
@@ -1466,7 +1568,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
struct fec_enet_priv_rx_q *rxq;
struct bufdesc *bdp;
unsigned short status;
- struct sk_buff *skb_new = NULL;
struct sk_buff *skb;
ushort pkt_len;
__u8 *data;
@@ -1475,8 +1576,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
bool vlan_packet_rcvd = false;
u16 vlan_tag;
int index = 0;
- bool is_copybreak;
bool need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
+ struct page *page;
#ifdef CONFIG_M532x
flush_cache_all();
@@ -1528,31 +1629,25 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
ndev->stats.rx_bytes += pkt_len;
index = fec_enet_get_bd_index(bdp, &rxq->bd);
- skb = rxq->rx_skbuff[index];
+ page = rxq->rx_skb_info[index].page;
+ dma_sync_single_for_cpu(&fep->pdev->dev,
+ fec32_to_cpu(bdp->cbd_bufaddr),
+ pkt_len,
+ DMA_FROM_DEVICE);
+ prefetch(page_address(page));
+ fec_enet_update_cbd(rxq, bdp, index);
/* The packet length includes FCS, but we don't want to
* include that when passing upstream as it messes up
* bridging applications.
*/
- is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
- need_swap);
- if (!is_copybreak) {
- skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
- if (unlikely(!skb_new)) {
- ndev->stats.rx_dropped++;
- goto rx_processing_done;
- }
- dma_unmap_single(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- }
-
- prefetch(skb->data - NET_IP_ALIGN);
+ skb = build_skb(page_address(page), PAGE_SIZE);
+ skb_reserve(skb, FEC_ENET_XDP_HEADROOM);
skb_put(skb, pkt_len - 4);
+ skb_mark_for_recycle(skb);
data = skb->data;
- if (!is_copybreak && need_swap)
+ if (need_swap)
swap_buffer(data, pkt_len);
#if !defined(CONFIG_M5272)
@@ -1607,16 +1702,6 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
skb_record_rx_queue(skb, queue_id);
napi_gro_receive(&fep->napi, skb);
- if (is_copybreak) {
- dma_sync_single_for_device(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- } else {
- rxq->rx_skbuff[index] = skb_new;
- fec_enet_new_rxbdp(ndev, bdp, skb_new);
- }
-
rx_processing_done:
/* Clear the status flags for this buffer */
status &= ~BD_ENET_RX_STATS;
@@ -2105,13 +2190,13 @@ static int fec_enet_mii_probe(struct net_device *ndev)
continue;
if (dev_id--)
continue;
- strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
+ strscpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
break;
}
if (phy_id >= PHY_MAX_ADDR) {
netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
- strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
+ strscpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
phy_id = 0;
}
@@ -2295,9 +2380,9 @@ static void fec_enet_get_drvinfo(struct net_device *ndev,
{
struct fec_enet_private *fep = netdev_priv(ndev);
- strlcpy(info->driver, fep->pdev->dev.driver->name,
+ strscpy(info->driver, fep->pdev->dev.driver->name,
sizeof(info->driver));
- strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
+ strscpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
}
static int fec_enet_get_regs_len(struct net_device *ndev)
@@ -2960,26 +3045,19 @@ static void fec_enet_free_buffers(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
unsigned int i;
struct sk_buff *skb;
- struct bufdesc *bdp;
struct fec_enet_priv_tx_q *txq;
struct fec_enet_priv_rx_q *rxq;
unsigned int q;
for (q = 0; q < fep->num_rx_queues; q++) {
rxq = fep->rx_queue[q];
- bdp = rxq->bd.base;
- for (i = 0; i < rxq->bd.ring_size; i++) {
- skb = rxq->rx_skbuff[i];
- rxq->rx_skbuff[i] = NULL;
- if (skb) {
- dma_unmap_single(&fep->pdev->dev,
- fec32_to_cpu(bdp->cbd_bufaddr),
- FEC_ENET_RX_FRSIZE - fep->rx_align,
- DMA_FROM_DEVICE);
- dev_kfree_skb(skb);
- }
- bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
- }
+ for (i = 0; i < rxq->bd.ring_size; i++)
+ page_pool_release_page(rxq->page_pool, rxq->rx_skb_info[i].page);
+
+ if (xdp_rxq_info_is_reg(&rxq->xdp_rxq))
+ xdp_rxq_info_unreg(&rxq->xdp_rxq);
+ page_pool_destroy(rxq->page_pool);
+ rxq->page_pool = NULL;
}
for (q = 0; q < fep->num_tx_queues; q++) {
@@ -3069,24 +3147,31 @@ static int
fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
{
struct fec_enet_private *fep = netdev_priv(ndev);
- unsigned int i;
- struct sk_buff *skb;
- struct bufdesc *bdp;
struct fec_enet_priv_rx_q *rxq;
+ dma_addr_t phys_addr;
+ struct bufdesc *bdp;
+ struct page *page;
+ int i, err;
rxq = fep->rx_queue[queue];
bdp = rxq->bd.base;
+
+ err = fec_enet_create_page_pool(fep, rxq, rxq->bd.ring_size);
+ if (err < 0) {
+ netdev_err(ndev, "%s failed queue %d (%d)\n", __func__, queue, err);
+ return err;
+ }
+
for (i = 0; i < rxq->bd.ring_size; i++) {
- skb = __netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE, GFP_KERNEL);
- if (!skb)
+ page = page_pool_dev_alloc_pages(rxq->page_pool);
+ if (!page)
goto err_alloc;
- if (fec_enet_new_rxbdp(ndev, bdp, skb)) {
- dev_kfree_skb(skb);
- goto err_alloc;
- }
+ phys_addr = page_pool_get_dma_addr(page) + FEC_ENET_XDP_HEADROOM;
+ bdp->cbd_bufaddr = cpu_to_fec32(phys_addr);
- rxq->rx_skbuff[i] = skb;
+ rxq->rx_skb_info[i].page = page;
+ rxq->rx_skb_info[i].offset = FEC_ENET_XDP_HEADROOM;
bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
if (fep->bufdesc_ex) {
@@ -3566,7 +3651,7 @@ static int fec_enet_init(struct net_device *ndev)
ndev->ethtool_ops = &fec_enet_ethtool_ops;
writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
- netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi);
if (fep->quirks & FEC_QUIRK_HAS_VLAN)
/* enable hw VLAN support */
@@ -3824,6 +3909,10 @@ fec_probe(struct platform_device *pdev)
!of_property_read_bool(np, "fsl,err006687-workaround-present"))
fep->quirks |= FEC_QUIRK_ERR006687;
+ ret = fec_enet_ipc_handle_init(fep);
+ if (ret)
+ goto failed_ipc_init;
+
if (of_get_property(np, "fsl,magic-packet", NULL))
fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
@@ -4021,6 +4110,7 @@ failed_rgmii_delay:
of_phy_deregister_fixed_link(np);
of_node_put(phy_node);
failed_stop_mode:
+failed_ipc_init:
failed_phy:
dev_id--;
failed_ioremap:
@@ -4065,6 +4155,7 @@ static int __maybe_unused fec_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ int ret;
rtnl_lock();
if (netif_running(ndev)) {
@@ -4089,6 +4180,15 @@ static int __maybe_unused fec_suspend(struct device *dev)
}
/* It's safe to disable clocks since interrupts are masked */
fec_enet_clk_enable(ndev, false);
+
+ fep->rpm_active = !pm_runtime_status_suspended(dev);
+ if (fep->rpm_active) {
+ ret = pm_runtime_force_suspend(dev);
+ if (ret < 0) {
+ rtnl_unlock();
+ return ret;
+ }
+ }
}
rtnl_unlock();
@@ -4119,6 +4219,9 @@ static int __maybe_unused fec_resume(struct device *dev)
rtnl_lock();
if (netif_running(ndev)) {
+ if (fep->rpm_active)
+ pm_runtime_force_resume(dev);
+
ret = fec_enet_clk_enable(ndev, true);
if (ret) {
rtnl_unlock();
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index 3dc3c0b626c2..cffd9ad499dd 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -578,7 +578,7 @@ void fec_ptp_init(struct platform_device *pdev, int irq_idx)
int ret;
fep->ptp_caps.owner = THIS_MODULE;
- strlcpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
+ strscpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
fep->ptp_caps.max_adj = 250000000;
fep->ptp_caps.n_alarm = 0;
diff --git a/drivers/net/ethernet/freescale/fman/fman.c b/drivers/net/ethernet/freescale/fman/fman.c
index 8f0db61cb1f6..9d85fb136e34 100644
--- a/drivers/net/ethernet/freescale/fman/fman.c
+++ b/drivers/net/ethernet/freescale/fman/fman.c
@@ -1,34 +1,7 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
* Copyright 2020 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/freescale/fman/fman.h b/drivers/net/ethernet/freescale/fman/fman.h
index f2ede1360f03..2ea575a46675 100644
--- a/drivers/net/ethernet/freescale/fman/fman.h
+++ b/drivers/net/ethernet/freescale/fman/fman.h
@@ -1,34 +1,7 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
* Copyright 2020 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FM_H
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index 1950a8936bc0..6617932fd3fd 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -1,39 +1,13 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "fman_dtsec.h"
#include "fman.h"
+#include "mac.h"
#include
#include
@@ -327,7 +301,7 @@ struct fman_mac {
/* Ethernet physical interface */
phy_interface_t phy_if;
u16 max_speed;
- void *dev_id; /* device cookie used by the exception cbs */
+ struct mac_device *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *exception_cb;
fman_mac_exception_cb *event_cb;
/* Number of individual addresses in registers for this station */
@@ -840,73 +814,45 @@ static void free_init_resources(struct fman_mac *dtsec)
dtsec->unicast_addr_hash = NULL;
}
-int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val)
-{
- if (is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- dtsec->dtsec_drv_param->maximum_frame = new_val;
-
- return 0;
-}
-
-int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val)
-{
- if (is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
-
- dtsec->dtsec_drv_param->tx_pad_crc = new_val;
-
- return 0;
-}
-
-static void graceful_start(struct fman_mac *dtsec, enum comm_mode mode)
+static void graceful_start(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- if (mode & COMM_MODE_TX)
- iowrite32be(ioread32be(®s->tctrl) &
- ~TCTRL_GTS, ®s->tctrl);
- if (mode & COMM_MODE_RX)
- iowrite32be(ioread32be(®s->rctrl) &
- ~RCTRL_GRS, ®s->rctrl);
+ iowrite32be(ioread32be(®s->tctrl) & ~TCTRL_GTS, ®s->tctrl);
+ iowrite32be(ioread32be(®s->rctrl) & ~RCTRL_GRS, ®s->rctrl);
}
-static void graceful_stop(struct fman_mac *dtsec, enum comm_mode mode)
+static void graceful_stop(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 tmp;
/* Graceful stop - Assert the graceful Rx stop bit */
- if (mode & COMM_MODE_RX) {
- tmp = ioread32be(®s->rctrl) | RCTRL_GRS;
- iowrite32be(tmp, ®s->rctrl);
+ tmp = ioread32be(®s->rctrl) | RCTRL_GRS;
+ iowrite32be(tmp, ®s->rctrl);
- if (dtsec->fm_rev_info.major == 2) {
- /* Workaround for dTSEC Errata A002 */
- usleep_range(100, 200);
- } else {
- /* Workaround for dTSEC Errata A004839 */
- usleep_range(10, 50);
- }
+ if (dtsec->fm_rev_info.major == 2) {
+ /* Workaround for dTSEC Errata A002 */
+ usleep_range(100, 200);
+ } else {
+ /* Workaround for dTSEC Errata A004839 */
+ usleep_range(10, 50);
}
/* Graceful stop - Assert the graceful Tx stop bit */
- if (mode & COMM_MODE_TX) {
- if (dtsec->fm_rev_info.major == 2) {
- /* dTSEC Errata A004: Do not use TCTRL[GTS]=1 */
- pr_debug("GTS not supported due to DTSEC_A004 Errata.\n");
- } else {
- tmp = ioread32be(®s->tctrl) | TCTRL_GTS;
- iowrite32be(tmp, ®s->tctrl);
+ if (dtsec->fm_rev_info.major == 2) {
+ /* dTSEC Errata A004: Do not use TCTRL[GTS]=1 */
+ pr_debug("GTS not supported due to DTSEC_A004 Errata.\n");
+ } else {
+ tmp = ioread32be(®s->tctrl) | TCTRL_GTS;
+ iowrite32be(tmp, ®s->tctrl);
- /* Workaround for dTSEC Errata A0012, A0014 */
- usleep_range(10, 50);
- }
+ /* Workaround for dTSEC Errata A0012, A0014 */
+ usleep_range(10, 50);
}
}
-int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
+static int dtsec_enable(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 tmp;
@@ -916,58 +862,42 @@ int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode)
/* Enable */
tmp = ioread32be(®s->maccfg1);
- if (mode & COMM_MODE_RX)
- tmp |= MACCFG1_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp |= MACCFG1_TX_EN;
-
+ tmp |= MACCFG1_RX_EN | MACCFG1_TX_EN;
iowrite32be(tmp, ®s->maccfg1);
/* Graceful start - clear the graceful Rx/Tx stop bit */
- graceful_start(dtsec, mode);
+ graceful_start(dtsec);
return 0;
}
-int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode)
+static void dtsec_disable(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 tmp;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
+ WARN_ON_ONCE(!is_init_done(dtsec->dtsec_drv_param));
/* Graceful stop - Assert the graceful Rx/Tx stop bit */
- graceful_stop(dtsec, mode);
+ graceful_stop(dtsec);
tmp = ioread32be(®s->maccfg1);
- if (mode & COMM_MODE_RX)
- tmp &= ~MACCFG1_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp &= ~MACCFG1_TX_EN;
-
+ tmp &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
iowrite32be(tmp, ®s->maccfg1);
-
- return 0;
}
-int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
- u8 __maybe_unused priority,
- u16 pause_time, u16 __maybe_unused thresh_time)
+static int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
+ u8 __maybe_unused priority,
+ u16 pause_time,
+ u16 __maybe_unused thresh_time)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- enum comm_mode mode = COMM_MODE_NONE;
u32 ptv = 0;
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
- if ((ioread32be(®s->rctrl) & RCTRL_GRS) == 0)
- mode |= COMM_MODE_RX;
- if ((ioread32be(®s->tctrl) & TCTRL_GTS) == 0)
- mode |= COMM_MODE_TX;
-
- graceful_stop(dtsec, mode);
+ graceful_stop(dtsec);
if (pause_time) {
/* FM_BAD_TX_TS_IN_B_2_B_ERRATA_DTSEC_A003 Errata workaround */
@@ -989,26 +919,20 @@ int dtsec_set_tx_pause_frames(struct fman_mac *dtsec,
iowrite32be(ioread32be(®s->maccfg1) & ~MACCFG1_TX_FLOW,
®s->maccfg1);
- graceful_start(dtsec, mode);
+ graceful_start(dtsec);
return 0;
}
-int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
+static int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- enum comm_mode mode = COMM_MODE_NONE;
u32 tmp;
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
- if ((ioread32be(®s->rctrl) & RCTRL_GRS) == 0)
- mode |= COMM_MODE_RX;
- if ((ioread32be(®s->tctrl) & TCTRL_GTS) == 0)
- mode |= COMM_MODE_TX;
-
- graceful_stop(dtsec, mode);
+ graceful_stop(dtsec);
tmp = ioread32be(®s->maccfg1);
if (en)
@@ -1017,25 +941,18 @@ int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
tmp &= ~MACCFG1_RX_FLOW;
iowrite32be(tmp, ®s->maccfg1);
- graceful_start(dtsec, mode);
+ graceful_start(dtsec);
return 0;
}
-int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr)
+static int dtsec_modify_mac_address(struct fman_mac *dtsec,
+ const enet_addr_t *enet_addr)
{
- struct dtsec_regs __iomem *regs = dtsec->regs;
- enum comm_mode mode = COMM_MODE_NONE;
-
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
- if ((ioread32be(®s->rctrl) & RCTRL_GRS) == 0)
- mode |= COMM_MODE_RX;
- if ((ioread32be(®s->tctrl) & TCTRL_GTS) == 0)
- mode |= COMM_MODE_TX;
-
- graceful_stop(dtsec, mode);
+ graceful_stop(dtsec);
/* Initialize MAC Station Address registers (1 & 2)
* Station address have to be swapped (big endian to little endian
@@ -1043,12 +960,13 @@ int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_add
dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
set_mac_address(dtsec->regs, (const u8 *)(*enet_addr));
- graceful_start(dtsec, mode);
+ graceful_start(dtsec);
return 0;
}
-int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
+static int dtsec_add_hash_mac_address(struct fman_mac *dtsec,
+ enet_addr_t *eth_addr)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
struct eth_hash_entry *hash_entry;
@@ -1114,7 +1032,7 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
return 0;
}
-int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
+static int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
{
u32 tmp;
struct dtsec_regs __iomem *regs = dtsec->regs;
@@ -1133,7 +1051,7 @@ int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
return 0;
}
-int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
+static int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 rctrl, tctrl;
@@ -1158,7 +1076,8 @@ int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable)
return 0;
}
-int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
+static int dtsec_del_hash_mac_address(struct fman_mac *dtsec,
+ enet_addr_t *eth_addr)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
struct list_head *pos;
@@ -1229,7 +1148,7 @@ int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
return 0;
}
-int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
+static int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 tmp;
@@ -1258,21 +1177,15 @@ int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val)
return 0;
}
-int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
+static int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
- enum comm_mode mode = COMM_MODE_NONE;
u32 tmp;
if (!is_init_done(dtsec->dtsec_drv_param))
return -EINVAL;
- if ((ioread32be(®s->rctrl) & RCTRL_GRS) == 0)
- mode |= COMM_MODE_RX;
- if ((ioread32be(®s->tctrl) & TCTRL_GTS) == 0)
- mode |= COMM_MODE_TX;
-
- graceful_stop(dtsec, mode);
+ graceful_stop(dtsec);
tmp = ioread32be(®s->maccfg2);
@@ -1293,12 +1206,12 @@ int dtsec_adjust_link(struct fman_mac *dtsec, u16 speed)
tmp &= ~DTSEC_ECNTRL_R100M;
iowrite32be(tmp, ®s->ecntrl);
- graceful_start(dtsec, mode);
+ graceful_start(dtsec);
return 0;
}
-int dtsec_restart_autoneg(struct fman_mac *dtsec)
+static int dtsec_restart_autoneg(struct fman_mac *dtsec)
{
u16 tmp_reg16;
@@ -1316,20 +1229,31 @@ int dtsec_restart_autoneg(struct fman_mac *dtsec)
return 0;
}
-int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version)
+static void adjust_link_dtsec(struct mac_device *mac_dev)
{
- struct dtsec_regs __iomem *regs = dtsec->regs;
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+ struct fman_mac *fman_mac;
+ bool rx_pause, tx_pause;
+ int err;
- if (!is_init_done(dtsec->dtsec_drv_param))
- return -EINVAL;
+ fman_mac = mac_dev->fman_mac;
+ if (!phy_dev->link) {
+ dtsec_restart_autoneg(fman_mac);
- *mac_version = ioread32be(®s->tsec_id);
+ return;
+ }
- return 0;
+ dtsec_adjust_link(fman_mac, phy_dev->speed);
+ mac_dev->update_speed(mac_dev, phy_dev->speed);
+ fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
+ err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
+ if (err < 0)
+ dev_err(mac_dev->dev, "fman_set_mac_active_pause() = %d\n",
+ err);
}
-int dtsec_set_exception(struct fman_mac *dtsec,
- enum fman_mac_exceptions exception, bool enable)
+static int dtsec_set_exception(struct fman_mac *dtsec,
+ enum fman_mac_exceptions exception, bool enable)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
u32 bit_mask = 0;
@@ -1382,7 +1306,7 @@ int dtsec_set_exception(struct fman_mac *dtsec,
return 0;
}
-int dtsec_init(struct fman_mac *dtsec)
+static int dtsec_init(struct fman_mac *dtsec)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
struct dtsec_cfg *dtsec_drv_param;
@@ -1476,7 +1400,7 @@ int dtsec_init(struct fman_mac *dtsec)
return 0;
}
-int dtsec_free(struct fman_mac *dtsec)
+static int dtsec_free(struct fman_mac *dtsec)
{
free_init_resources(dtsec);
@@ -1487,13 +1411,11 @@ int dtsec_free(struct fman_mac *dtsec)
return 0;
}
-struct fman_mac *dtsec_config(struct fman_mac_params *params)
+static struct fman_mac *dtsec_config(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
{
struct fman_mac *dtsec;
struct dtsec_cfg *dtsec_drv_param;
- void __iomem *base_addr;
-
- base_addr = params->base_addr;
/* allocate memory for the UCC GETH data structure. */
dtsec = kzalloc(sizeof(*dtsec), GFP_KERNEL);
@@ -1510,10 +1432,10 @@ struct fman_mac *dtsec_config(struct fman_mac_params *params)
set_dflts(dtsec_drv_param);
- dtsec->regs = base_addr;
- dtsec->addr = ENET_ADDR_TO_UINT64(params->addr);
+ dtsec->regs = mac_dev->vaddr;
+ dtsec->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
dtsec->max_speed = params->max_speed;
- dtsec->phy_if = params->phy_if;
+ dtsec->phy_if = mac_dev->phy_if;
dtsec->mac_id = params->mac_id;
dtsec->exceptions = (DTSEC_IMASK_BREN |
DTSEC_IMASK_RXCEN |
@@ -1530,34 +1452,87 @@ struct fman_mac *dtsec_config(struct fman_mac_params *params)
DTSEC_IMASK_RDPEEN);
dtsec->exception_cb = params->exception_cb;
dtsec->event_cb = params->event_cb;
- dtsec->dev_id = params->dev_id;
+ dtsec->dev_id = mac_dev;
dtsec->ptp_tsu_enabled = dtsec->dtsec_drv_param->ptp_tsu_en;
dtsec->en_tsu_err_exception = dtsec->dtsec_drv_param->ptp_exception_en;
dtsec->fm = params->fm;
dtsec->basex_if = params->basex_if;
- if (!params->internal_phy_node) {
- pr_err("TBI PHY node is not available\n");
- goto err_dtsec_drv_param;
- }
-
- dtsec->tbiphy = of_phy_find_device(params->internal_phy_node);
- if (!dtsec->tbiphy) {
- pr_err("of_phy_find_device (TBI PHY) failed\n");
- goto err_dtsec_drv_param;
- }
-
- put_device(&dtsec->tbiphy->mdio.dev);
-
/* Save FMan revision */
fman_get_revision(dtsec->fm, &dtsec->fm_rev_info);
return dtsec;
-err_dtsec_drv_param:
- kfree(dtsec_drv_param);
err_dtsec:
kfree(dtsec);
return NULL;
}
+
+int dtsec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params)
+{
+ int err;
+ struct fman_mac *dtsec;
+ struct device_node *phy_node;
+
+ mac_dev->set_promisc = dtsec_set_promiscuous;
+ mac_dev->change_addr = dtsec_modify_mac_address;
+ mac_dev->add_hash_mac_addr = dtsec_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = dtsec_del_hash_mac_address;
+ mac_dev->set_tx_pause = dtsec_set_tx_pause_frames;
+ mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
+ mac_dev->set_exception = dtsec_set_exception;
+ mac_dev->set_allmulti = dtsec_set_allmulti;
+ mac_dev->set_tstamp = dtsec_set_tstamp;
+ mac_dev->set_multi = fman_set_multi;
+ mac_dev->adjust_link = adjust_link_dtsec;
+ mac_dev->enable = dtsec_enable;
+ mac_dev->disable = dtsec_disable;
+
+ mac_dev->fman_mac = dtsec_config(mac_dev, params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ dtsec = mac_dev->fman_mac;
+ dtsec->dtsec_drv_param->maximum_frame = fman_get_max_frm();
+ dtsec->dtsec_drv_param->tx_pad_crc = true;
+
+ phy_node = of_parse_phandle(mac_node, "tbi-handle", 0);
+ if (!phy_node) {
+ pr_err("TBI PHY node is not available\n");
+ err = -EINVAL;
+ goto _return_fm_mac_free;
+ }
+
+ dtsec->tbiphy = of_phy_find_device(phy_node);
+ if (!dtsec->tbiphy) {
+ pr_err("of_phy_find_device (TBI PHY) failed\n");
+ err = -EINVAL;
+ goto _return_fm_mac_free;
+ }
+ put_device(&dtsec->tbiphy->mdio.dev);
+
+ err = dtsec_init(dtsec);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ /* For 1G MAC, disable by default the MIB counters overflow interrupt */
+ err = dtsec_set_exception(dtsec, FM_MAC_EX_1G_RX_MIB_CNT_OVFL, false);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ dev_info(mac_dev->dev, "FMan dTSEC version: 0x%08x\n",
+ ioread32be(&dtsec->regs->tsec_id));
+
+ goto _return;
+
+_return_fm_mac_free:
+ dtsec_free(dtsec);
+
+_return:
+ return err;
+}
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
index 68512c3bd6e5..8c72d280c51a 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#ifndef __DTSEC_H
@@ -35,27 +8,10 @@
#include "fman_mac.h"
-struct fman_mac *dtsec_config(struct fman_mac_params *params);
-int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val);
-int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr);
-int dtsec_adjust_link(struct fman_mac *dtsec,
- u16 speed);
-int dtsec_restart_autoneg(struct fman_mac *dtsec);
-int dtsec_cfg_max_frame_len(struct fman_mac *dtsec, u16 new_val);
-int dtsec_cfg_pad_and_crc(struct fman_mac *dtsec, bool new_val);
-int dtsec_enable(struct fman_mac *dtsec, enum comm_mode mode);
-int dtsec_disable(struct fman_mac *dtsec, enum comm_mode mode);
-int dtsec_init(struct fman_mac *dtsec);
-int dtsec_free(struct fman_mac *dtsec);
-int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en);
-int dtsec_set_tx_pause_frames(struct fman_mac *dtsec, u8 priority,
- u16 pause_time, u16 thresh_time);
-int dtsec_set_exception(struct fman_mac *dtsec,
- enum fman_mac_exceptions exception, bool enable);
-int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
-int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
-int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version);
-int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable);
-int dtsec_set_tstamp(struct fman_mac *dtsec, bool enable);
+struct mac_device;
+
+int dtsec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params);
#endif /* __DTSEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_keygen.c b/drivers/net/ethernet/freescale/fman/fman_keygen.c
index e1bdfed16134..e73f6ef3c6ee 100644
--- a/drivers/net/ethernet/freescale/fman/fman_keygen.c
+++ b/drivers/net/ethernet/freescale/fman/fman_keygen.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
* Copyright 2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of NXP nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY NXP ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL NXP BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/freescale/fman/fman_keygen.h b/drivers/net/ethernet/freescale/fman/fman_keygen.h
index c4640de3f4cb..2cb0df453074 100644
--- a/drivers/net/ethernet/freescale/fman/fman_keygen.h
+++ b/drivers/net/ethernet/freescale/fman/fman_keygen.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
* Copyright 2017 NXP
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of NXP nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY NXP ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL NXP BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __KEYGEN_H
diff --git a/drivers/net/ethernet/freescale/fman/fman_mac.h b/drivers/net/ethernet/freescale/fman/fman_mac.h
index 19f327efdaff..65887a3160d7 100644
--- a/drivers/net/ethernet/freescale/fman/fman_mac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_mac.h
@@ -41,6 +41,7 @@
#include
struct fman_mac;
+struct mac_device;
/* Ethernet Address */
typedef u8 enet_addr_t[ETH_ALEN];
@@ -75,16 +76,6 @@ typedef u8 enet_addr_t[ETH_ALEN];
#define ETH_HASH_ENTRY_OBJ(ptr) \
hlist_entry_safe(ptr, struct eth_hash_entry, node)
-/* Enumeration (bit flags) of communication modes (Transmit,
- * receive or both).
- */
-enum comm_mode {
- COMM_MODE_NONE = 0, /* No transmit/receive communication */
- COMM_MODE_RX = 1, /* Only receive communication */
- COMM_MODE_TX = 2, /* Only transmit communication */
- COMM_MODE_RX_AND_TX = 3 /* Both transmit and receive communication */
-};
-
/* FM MAC Exceptions */
enum fman_mac_exceptions {
FM_MAC_EX_10G_MDIO_SCAN_EVENT = 0
@@ -168,30 +159,23 @@ struct eth_hash_entry {
struct list_head node;
};
-typedef void (fman_mac_exception_cb)(void *dev_id,
- enum fman_mac_exceptions exceptions);
+typedef void (fman_mac_exception_cb)(struct mac_device *dev_id,
+ enum fman_mac_exceptions exceptions);
/* FMan MAC config input */
struct fman_mac_params {
- /* Base of memory mapped FM MAC registers */
- void __iomem *base_addr;
- /* MAC address of device; First octet is sent first */
- enet_addr_t addr;
/* MAC ID; numbering of dTSEC and 1G-mEMAC:
* 0 - FM_MAX_NUM_OF_1G_MACS;
* numbering of 10G-MAC (TGEC) and 10G-mEMAC:
* 0 - FM_MAX_NUM_OF_10G_MACS
*/
u8 mac_id;
- /* PHY interface */
- phy_interface_t phy_if;
/* Note that the speed should indicate the maximum rate that
* this MAC should support rather than the actual speed;
*/
u16 max_speed;
/* A handle to the FM object this port related to */
void *fm;
- void *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *event_cb; /* MDIO Events Callback Routine */
fman_mac_exception_cb *exception_cb;/* Exception Callback Routine */
/* SGMII/QSGII interface with 1000BaseX auto-negotiation between MAC
@@ -200,8 +184,6 @@ struct fman_mac_params {
* synchronize with far-end phy at 10Mbps, 100Mbps or 1000Mbps
*/
bool basex_if;
- /* Pointer to TBI/PCS PHY node, used for TBI/PCS PHY access */
- struct device_node *internal_phy_node;
};
struct eth_hash_t {
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 2216b7f51d26..32d26cf17843 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -1,39 +1,13 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "fman_memac.h"
#include "fman.h"
+#include "mac.h"
#include
#include
@@ -337,7 +311,7 @@ struct fman_mac {
/* Ethernet physical interface */
phy_interface_t phy_if;
u16 max_speed;
- void *dev_id; /* device cookie used by the exception cbs */
+ struct mac_device *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *exception_cb;
fman_mac_exception_cb *event_cb;
/* Pointer to driver's global address hash table */
@@ -712,7 +686,7 @@ static bool is_init_done(struct memac_cfg *memac_drv_params)
return false;
}
-int memac_enable(struct fman_mac *memac, enum comm_mode mode)
+static int memac_enable(struct fman_mac *memac)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
@@ -721,36 +695,26 @@ int memac_enable(struct fman_mac *memac, enum comm_mode mode)
return -EINVAL;
tmp = ioread32be(®s->command_config);
- if (mode & COMM_MODE_RX)
- tmp |= CMD_CFG_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp |= CMD_CFG_TX_EN;
-
+ tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN;
iowrite32be(tmp, ®s->command_config);
return 0;
}
-int memac_disable(struct fman_mac *memac, enum comm_mode mode)
+static void memac_disable(struct fman_mac *memac)
+
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
- if (!is_init_done(memac->memac_drv_param))
- return -EINVAL;
+ WARN_ON_ONCE(!is_init_done(memac->memac_drv_param));
tmp = ioread32be(®s->command_config);
- if (mode & COMM_MODE_RX)
- tmp &= ~CMD_CFG_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp &= ~CMD_CFG_TX_EN;
-
+ tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN);
iowrite32be(tmp, ®s->command_config);
-
- return 0;
}
-int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
+static int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
@@ -769,7 +733,7 @@ int memac_set_promiscuous(struct fman_mac *memac, bool new_val)
return 0;
}
-int memac_adjust_link(struct fman_mac *memac, u16 speed)
+static int memac_adjust_link(struct fman_mac *memac, u16 speed)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
@@ -809,39 +773,26 @@ int memac_adjust_link(struct fman_mac *memac, u16 speed)
return 0;
}
-int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val)
+static void adjust_link_memac(struct mac_device *mac_dev)
{
- if (is_init_done(memac->memac_drv_param))
- return -EINVAL;
+ struct phy_device *phy_dev = mac_dev->phy_dev;
+ struct fman_mac *fman_mac;
+ bool rx_pause, tx_pause;
+ int err;
- memac->memac_drv_param->max_frame_length = new_val;
+ fman_mac = mac_dev->fman_mac;
+ memac_adjust_link(fman_mac, phy_dev->speed);
+ mac_dev->update_speed(mac_dev, phy_dev->speed);
- return 0;
+ fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
+ err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
+ if (err < 0)
+ dev_err(mac_dev->dev, "fman_set_mac_active_pause() = %d\n",
+ err);
}
-int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable)
-{
- if (is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
- memac->memac_drv_param->reset_on_init = enable;
-
- return 0;
-}
-
-int memac_cfg_fixed_link(struct fman_mac *memac,
- struct fixed_phy_status *fixed_link)
-{
- if (is_init_done(memac->memac_drv_param))
- return -EINVAL;
-
- memac->memac_drv_param->fixed_link = fixed_link;
-
- return 0;
-}
-
-int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
- u16 pause_time, u16 thresh_time)
+static int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
+ u16 pause_time, u16 thresh_time)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
@@ -878,7 +829,7 @@ int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
return 0;
}
-int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
+static int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
{
struct memac_regs __iomem *regs = memac->regs;
u32 tmp;
@@ -897,7 +848,8 @@ int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
return 0;
}
-int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr)
+static int memac_modify_mac_address(struct fman_mac *memac,
+ const enet_addr_t *enet_addr)
{
if (!is_init_done(memac->memac_drv_param))
return -EINVAL;
@@ -907,7 +859,8 @@ int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_add
return 0;
}
-int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
+static int memac_add_hash_mac_address(struct fman_mac *memac,
+ enet_addr_t *eth_addr)
{
struct memac_regs __iomem *regs = memac->regs;
struct eth_hash_entry *hash_entry;
@@ -940,7 +893,7 @@ int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
return 0;
}
-int memac_set_allmulti(struct fman_mac *memac, bool enable)
+static int memac_set_allmulti(struct fman_mac *memac, bool enable)
{
u32 entry;
struct memac_regs __iomem *regs = memac->regs;
@@ -963,12 +916,13 @@ int memac_set_allmulti(struct fman_mac *memac, bool enable)
return 0;
}
-int memac_set_tstamp(struct fman_mac *memac, bool enable)
+static int memac_set_tstamp(struct fman_mac *memac, bool enable)
{
return 0; /* Always enabled. */
}
-int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
+static int memac_del_hash_mac_address(struct fman_mac *memac,
+ enet_addr_t *eth_addr)
{
struct memac_regs __iomem *regs = memac->regs;
struct eth_hash_entry *hash_entry = NULL;
@@ -1001,8 +955,8 @@ int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
return 0;
}
-int memac_set_exception(struct fman_mac *memac,
- enum fman_mac_exceptions exception, bool enable)
+static int memac_set_exception(struct fman_mac *memac,
+ enum fman_mac_exceptions exception, bool enable)
{
u32 bit_mask = 0;
@@ -1024,13 +978,13 @@ int memac_set_exception(struct fman_mac *memac,
return 0;
}
-int memac_init(struct fman_mac *memac)
+static int memac_init(struct fman_mac *memac)
{
struct memac_cfg *memac_drv_param;
u8 i;
enet_addr_t eth_addr;
bool slow_10g_if = false;
- struct fixed_phy_status *fixed_link;
+ struct fixed_phy_status *fixed_link = NULL;
int err;
u32 reg32 = 0;
@@ -1141,7 +1095,7 @@ int memac_init(struct fman_mac *memac)
return 0;
}
-int memac_free(struct fman_mac *memac)
+static int memac_free(struct fman_mac *memac)
{
free_init_resources(memac);
@@ -1154,13 +1108,12 @@ int memac_free(struct fman_mac *memac)
return 0;
}
-struct fman_mac *memac_config(struct fman_mac_params *params)
+static struct fman_mac *memac_config(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
{
struct fman_mac *memac;
struct memac_cfg *memac_drv_param;
- void __iomem *base_addr;
- base_addr = params->base_addr;
/* allocate memory for the m_emac data structure */
memac = kzalloc(sizeof(*memac), GFP_KERNEL);
if (!memac)
@@ -1178,38 +1131,121 @@ struct fman_mac *memac_config(struct fman_mac_params *params)
set_dflts(memac_drv_param);
- memac->addr = ENET_ADDR_TO_UINT64(params->addr);
+ memac->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
- memac->regs = base_addr;
+ memac->regs = mac_dev->vaddr;
memac->max_speed = params->max_speed;
- memac->phy_if = params->phy_if;
+ memac->phy_if = mac_dev->phy_if;
memac->mac_id = params->mac_id;
memac->exceptions = (MEMAC_IMASK_TSECC_ER | MEMAC_IMASK_TECC_ER |
MEMAC_IMASK_RECC_ER | MEMAC_IMASK_MGI);
memac->exception_cb = params->exception_cb;
memac->event_cb = params->event_cb;
- memac->dev_id = params->dev_id;
+ memac->dev_id = mac_dev;
memac->fm = params->fm;
memac->basex_if = params->basex_if;
/* Save FMan revision */
fman_get_revision(memac->fm, &memac->fm_rev_info);
+ return memac;
+}
+
+int memac_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params)
+{
+ int err;
+ struct device_node *phy_node;
+ struct fixed_phy_status *fixed_link;
+ struct fman_mac *memac;
+
+ mac_dev->set_promisc = memac_set_promiscuous;
+ mac_dev->change_addr = memac_modify_mac_address;
+ mac_dev->add_hash_mac_addr = memac_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = memac_del_hash_mac_address;
+ mac_dev->set_tx_pause = memac_set_tx_pause_frames;
+ mac_dev->set_rx_pause = memac_accept_rx_pause_frames;
+ mac_dev->set_exception = memac_set_exception;
+ mac_dev->set_allmulti = memac_set_allmulti;
+ mac_dev->set_tstamp = memac_set_tstamp;
+ mac_dev->set_multi = fman_set_multi;
+ mac_dev->adjust_link = adjust_link_memac;
+ mac_dev->enable = memac_enable;
+ mac_dev->disable = memac_disable;
+
+ if (params->max_speed == SPEED_10000)
+ mac_dev->phy_if = PHY_INTERFACE_MODE_XGMII;
+
+ mac_dev->fman_mac = memac_config(mac_dev, params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ memac = mac_dev->fman_mac;
+ memac->memac_drv_param->max_frame_length = fman_get_max_frm();
+ memac->memac_drv_param->reset_on_init = true;
if (memac->phy_if == PHY_INTERFACE_MODE_SGMII ||
memac->phy_if == PHY_INTERFACE_MODE_QSGMII) {
- if (!params->internal_phy_node) {
+ phy_node = of_parse_phandle(mac_node, "pcsphy-handle", 0);
+ if (!phy_node) {
pr_err("PCS PHY node is not available\n");
- memac_free(memac);
- return NULL;
+ err = -EINVAL;
+ goto _return_fm_mac_free;
}
- memac->pcsphy = of_phy_find_device(params->internal_phy_node);
+ memac->pcsphy = of_phy_find_device(phy_node);
if (!memac->pcsphy) {
pr_err("of_phy_find_device (PCS PHY) failed\n");
- memac_free(memac);
- return NULL;
+ err = -EINVAL;
+ goto _return_fm_mac_free;
}
}
- return memac;
+ if (!mac_dev->phy_node && of_phy_is_fixed_link(mac_node)) {
+ struct phy_device *phy;
+
+ err = of_phy_register_fixed_link(mac_node);
+ if (err)
+ goto _return_fm_mac_free;
+
+ fixed_link = kzalloc(sizeof(*fixed_link), GFP_KERNEL);
+ if (!fixed_link) {
+ err = -ENOMEM;
+ goto _return_fm_mac_free;
+ }
+
+ mac_dev->phy_node = of_node_get(mac_node);
+ phy = of_phy_find_device(mac_dev->phy_node);
+ if (!phy) {
+ err = -EINVAL;
+ of_node_put(mac_dev->phy_node);
+ goto _return_fixed_link_free;
+ }
+
+ fixed_link->link = phy->link;
+ fixed_link->speed = phy->speed;
+ fixed_link->duplex = phy->duplex;
+ fixed_link->pause = phy->pause;
+ fixed_link->asym_pause = phy->asym_pause;
+
+ put_device(&phy->mdio.dev);
+ memac->memac_drv_param->fixed_link = fixed_link;
+ }
+
+ err = memac_init(mac_dev->fman_mac);
+ if (err < 0)
+ goto _return_fixed_link_free;
+
+ dev_info(mac_dev->dev, "FMan MEMAC\n");
+
+ goto _return;
+
+_return_fixed_link_free:
+ kfree(fixed_link);
+_return_fm_mac_free:
+ memac_free(mac_dev->fman_mac);
+_return:
+ return err;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h
index 3820f7a22983..5a3a14f9684f 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#ifndef __MEMAC_H
@@ -38,26 +11,10 @@
#include
#include
-struct fman_mac *memac_config(struct fman_mac_params *params);
-int memac_set_promiscuous(struct fman_mac *memac, bool new_val);
-int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr);
-int memac_adjust_link(struct fman_mac *memac, u16 speed);
-int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val);
-int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable);
-int memac_cfg_fixed_link(struct fman_mac *memac,
- struct fixed_phy_status *fixed_link);
-int memac_enable(struct fman_mac *memac, enum comm_mode mode);
-int memac_disable(struct fman_mac *memac, enum comm_mode mode);
-int memac_init(struct fman_mac *memac);
-int memac_free(struct fman_mac *memac);
-int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en);
-int memac_set_tx_pause_frames(struct fman_mac *memac, u8 priority,
- u16 pause_time, u16 thresh_time);
-int memac_set_exception(struct fman_mac *memac,
- enum fman_mac_exceptions exception, bool enable);
-int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
-int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
-int memac_set_allmulti(struct fman_mac *memac, bool enable);
-int memac_set_tstamp(struct fman_mac *memac, bool enable);
+struct mac_device;
+
+int memac_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params);
#endif /* __MEMAC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.c b/drivers/net/ethernet/freescale/fman/fman_muram.c
index 7ad317e622bc..f557d68e5b76 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.c
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#include "fman_muram.h"
diff --git a/drivers/net/ethernet/freescale/fman/fman_muram.h b/drivers/net/ethernet/freescale/fman/fman_muram.h
index 453bf849eee1..3643af61bae2 100644
--- a/drivers/net/ethernet/freescale/fman/fman_muram.h
+++ b/drivers/net/ethernet/freescale/fman/fman_muram.h
@@ -1,34 +1,8 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
+
#ifndef __FM_MURAM_EXT
#define __FM_MURAM_EXT
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index 4c9d05c45c03..ab90fe2bee5e 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.h b/drivers/net/ethernet/freescale/fman/fman_port.h
index 82f12661a46d..4917fe8f0617 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.h
+++ b/drivers/net/ethernet/freescale/fman/fman_port.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FMAN_PORT_H
diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.c b/drivers/net/ethernet/freescale/fman/fman_sp.c
index 248f5bcca468..0fac60aa5283 100644
--- a/drivers/net/ethernet/freescale/fman/fman_sp.c
+++ b/drivers/net/ethernet/freescale/fman/fman_sp.c
@@ -1,33 +1,6 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "fman_sp.h"
diff --git a/drivers/net/ethernet/freescale/fman/fman_sp.h b/drivers/net/ethernet/freescale/fman/fman_sp.h
index 820b7f63088f..a62dd21c81f1 100644
--- a/drivers/net/ethernet/freescale/fman/fman_sp.h
+++ b/drivers/net/ethernet/freescale/fman/fman_sp.h
@@ -1,32 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
* Copyright 2008 - 2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef __FM_SP_H
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index 311c1906e044..5a4be54ad459 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -1,39 +1,13 @@
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "fman_tgec.h"
#include "fman.h"
+#include "mac.h"
#include
#include
@@ -206,7 +180,7 @@ struct fman_mac {
/* MAC address of device; */
u64 addr;
u16 max_speed;
- void *dev_id; /* device cookie used by the exception cbs */
+ struct mac_device *dev_id; /* device cookie used by the exception cbs */
fman_mac_exception_cb *exception_cb;
fman_mac_exception_cb *event_cb;
/* pointer to driver's global address hash table */
@@ -419,7 +393,7 @@ static bool is_init_done(struct tgec_cfg *cfg)
return false;
}
-int tgec_enable(struct fman_mac *tgec, enum comm_mode mode)
+static int tgec_enable(struct fman_mac *tgec)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
@@ -428,34 +402,25 @@ int tgec_enable(struct fman_mac *tgec, enum comm_mode mode)
return -EINVAL;
tmp = ioread32be(®s->command_config);
- if (mode & COMM_MODE_RX)
- tmp |= CMD_CFG_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp |= CMD_CFG_TX_EN;
+ tmp |= CMD_CFG_RX_EN | CMD_CFG_TX_EN;
iowrite32be(tmp, ®s->command_config);
return 0;
}
-int tgec_disable(struct fman_mac *tgec, enum comm_mode mode)
+static void tgec_disable(struct fman_mac *tgec)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
+ WARN_ON_ONCE(!is_init_done(tgec->cfg));
tmp = ioread32be(®s->command_config);
- if (mode & COMM_MODE_RX)
- tmp &= ~CMD_CFG_RX_EN;
- if (mode & COMM_MODE_TX)
- tmp &= ~CMD_CFG_TX_EN;
+ tmp &= ~(CMD_CFG_RX_EN | CMD_CFG_TX_EN);
iowrite32be(tmp, ®s->command_config);
-
- return 0;
}
-int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
+static int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
@@ -473,18 +438,9 @@ int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val)
return 0;
}
-int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val)
-{
- if (is_init_done(tgec->cfg))
- return -EINVAL;
-
- tgec->cfg->max_frame_length = new_val;
-
- return 0;
-}
-
-int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 __maybe_unused priority,
- u16 pause_time, u16 __maybe_unused thresh_time)
+static int tgec_set_tx_pause_frames(struct fman_mac *tgec,
+ u8 __maybe_unused priority, u16 pause_time,
+ u16 __maybe_unused thresh_time)
{
struct tgec_regs __iomem *regs = tgec->regs;
@@ -496,7 +452,7 @@ int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 __maybe_unused priority,
return 0;
}
-int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
+static int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
@@ -514,7 +470,8 @@ int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
return 0;
}
-int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *p_enet_addr)
+static int tgec_modify_mac_address(struct fman_mac *tgec,
+ const enet_addr_t *p_enet_addr)
{
if (!is_init_done(tgec->cfg))
return -EINVAL;
@@ -525,7 +482,8 @@ int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *p_enet_add
return 0;
}
-int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
+static int tgec_add_hash_mac_address(struct fman_mac *tgec,
+ enet_addr_t *eth_addr)
{
struct tgec_regs __iomem *regs = tgec->regs;
struct eth_hash_entry *hash_entry;
@@ -562,7 +520,7 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
return 0;
}
-int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
+static int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
{
u32 entry;
struct tgec_regs __iomem *regs = tgec->regs;
@@ -585,7 +543,7 @@ int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
return 0;
}
-int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
+static int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 tmp;
@@ -605,7 +563,8 @@ int tgec_set_tstamp(struct fman_mac *tgec, bool enable)
return 0;
}
-int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
+static int tgec_del_hash_mac_address(struct fman_mac *tgec,
+ enet_addr_t *eth_addr)
{
struct tgec_regs __iomem *regs = tgec->regs;
struct eth_hash_entry *hash_entry = NULL;
@@ -642,20 +601,15 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
return 0;
}
-int tgec_get_version(struct fman_mac *tgec, u32 *mac_version)
+static void tgec_adjust_link(struct mac_device *mac_dev)
{
- struct tgec_regs __iomem *regs = tgec->regs;
+ struct phy_device *phy_dev = mac_dev->phy_dev;
- if (!is_init_done(tgec->cfg))
- return -EINVAL;
-
- *mac_version = ioread32be(®s->tgec_id);
-
- return 0;
+ mac_dev->update_speed(mac_dev, phy_dev->speed);
}
-int tgec_set_exception(struct fman_mac *tgec,
- enum fman_mac_exceptions exception, bool enable)
+static int tgec_set_exception(struct fman_mac *tgec,
+ enum fman_mac_exceptions exception, bool enable)
{
struct tgec_regs __iomem *regs = tgec->regs;
u32 bit_mask = 0;
@@ -681,7 +635,7 @@ int tgec_set_exception(struct fman_mac *tgec,
return 0;
}
-int tgec_init(struct fman_mac *tgec)
+static int tgec_init(struct fman_mac *tgec)
{
struct tgec_cfg *cfg;
enet_addr_t eth_addr;
@@ -764,7 +718,7 @@ int tgec_init(struct fman_mac *tgec)
return 0;
}
-int tgec_free(struct fman_mac *tgec)
+static int tgec_free(struct fman_mac *tgec)
{
free_init_resources(tgec);
@@ -774,13 +728,12 @@ int tgec_free(struct fman_mac *tgec)
return 0;
}
-struct fman_mac *tgec_config(struct fman_mac_params *params)
+static struct fman_mac *tgec_config(struct mac_device *mac_dev,
+ struct fman_mac_params *params)
{
struct fman_mac *tgec;
struct tgec_cfg *cfg;
- void __iomem *base_addr;
- base_addr = params->base_addr;
/* allocate memory for the UCC GETH data structure. */
tgec = kzalloc(sizeof(*tgec), GFP_KERNEL);
if (!tgec)
@@ -798,8 +751,8 @@ struct fman_mac *tgec_config(struct fman_mac_params *params)
set_dflts(cfg);
- tgec->regs = base_addr;
- tgec->addr = ENET_ADDR_TO_UINT64(params->addr);
+ tgec->regs = mac_dev->vaddr;
+ tgec->addr = ENET_ADDR_TO_UINT64(mac_dev->addr);
tgec->max_speed = params->max_speed;
tgec->mac_id = params->mac_id;
tgec->exceptions = (TGEC_IMASK_MDIO_SCAN_EVENT |
@@ -819,7 +772,7 @@ struct fman_mac *tgec_config(struct fman_mac_params *params)
TGEC_IMASK_RX_ALIGN_ER);
tgec->exception_cb = params->exception_cb;
tgec->event_cb = params->event_cb;
- tgec->dev_id = params->dev_id;
+ tgec->dev_id = mac_dev;
tgec->fm = params->fm;
/* Save FMan revision */
@@ -827,3 +780,52 @@ struct fman_mac *tgec_config(struct fman_mac_params *params)
return tgec;
}
+
+int tgec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params)
+{
+ int err;
+ struct fman_mac *tgec;
+
+ mac_dev->set_promisc = tgec_set_promiscuous;
+ mac_dev->change_addr = tgec_modify_mac_address;
+ mac_dev->add_hash_mac_addr = tgec_add_hash_mac_address;
+ mac_dev->remove_hash_mac_addr = tgec_del_hash_mac_address;
+ mac_dev->set_tx_pause = tgec_set_tx_pause_frames;
+ mac_dev->set_rx_pause = tgec_accept_rx_pause_frames;
+ mac_dev->set_exception = tgec_set_exception;
+ mac_dev->set_allmulti = tgec_set_allmulti;
+ mac_dev->set_tstamp = tgec_set_tstamp;
+ mac_dev->set_multi = fman_set_multi;
+ mac_dev->adjust_link = tgec_adjust_link;
+ mac_dev->enable = tgec_enable;
+ mac_dev->disable = tgec_disable;
+
+ mac_dev->fman_mac = tgec_config(mac_dev, params);
+ if (!mac_dev->fman_mac) {
+ err = -EINVAL;
+ goto _return;
+ }
+
+ tgec = mac_dev->fman_mac;
+ tgec->cfg->max_frame_length = fman_get_max_frm();
+ err = tgec_init(tgec);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ /* For 10G MAC, disable Tx ECC exception */
+ err = tgec_set_exception(tgec, FM_MAC_EX_10G_TX_ECC_ER, false);
+ if (err < 0)
+ goto _return_fm_mac_free;
+
+ pr_info("FMan XGEC version: 0x%08x\n",
+ ioread32be(&tgec->regs->tgec_id));
+ goto _return;
+
+_return_fm_mac_free:
+ tgec_free(mac_dev->fman_mac);
+
+_return:
+ return err;
+}
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h
index b28b20b26148..768b8d165e05 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h
@@ -1,33 +1,6 @@
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
/*
- * Copyright 2008-2015 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#ifndef __TGEC_H
@@ -35,23 +8,10 @@
#include "fman_mac.h"
-struct fman_mac *tgec_config(struct fman_mac_params *params);
-int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val);
-int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *enet_addr);
-int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val);
-int tgec_enable(struct fman_mac *tgec, enum comm_mode mode);
-int tgec_disable(struct fman_mac *tgec, enum comm_mode mode);
-int tgec_init(struct fman_mac *tgec);
-int tgec_free(struct fman_mac *tgec);
-int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en);
-int tgec_set_tx_pause_frames(struct fman_mac *tgec, u8 priority,
- u16 pause_time, u16 thresh_time);
-int tgec_set_exception(struct fman_mac *tgec,
- enum fman_mac_exceptions exception, bool enable);
-int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
-int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
-int tgec_get_version(struct fman_mac *tgec, u32 *mac_version);
-int tgec_set_allmulti(struct fman_mac *tgec, bool enable);
-int tgec_set_tstamp(struct fman_mac *tgec, bool enable);
+struct mac_device;
+
+int tgec_initialization(struct mac_device *mac_dev,
+ struct device_node *mac_node,
+ struct fman_mac_params *params);
#endif /* __TGEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 39ae965cd4f6..7b7526fd7da3 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -1,32 +1,6 @@
-/* Copyright 2008-2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -54,20 +28,12 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("FSL FMan MAC API based driver");
struct mac_priv_s {
- struct device *dev;
- void __iomem *vaddr;
u8 cell_index;
struct fman *fman;
- struct device_node *internal_phy_node;
/* List of multicast addresses */
struct list_head mc_addr_list;
struct platform_device *eth_dev;
- struct fixed_phy_status *fixed_link;
u16 speed;
- u16 max_speed;
-
- int (*enable)(struct fman_mac *mac_dev, enum comm_mode mode);
- int (*disable)(struct fman_mac *mac_dev, enum comm_mode mode);
};
struct mac_address {
@@ -75,222 +41,21 @@ struct mac_address {
struct list_head list;
};
-static void mac_exception(void *handle, enum fman_mac_exceptions ex)
+static void mac_exception(struct mac_device *mac_dev,
+ enum fman_mac_exceptions ex)
{
- struct mac_device *mac_dev;
- struct mac_priv_s *priv;
-
- mac_dev = handle;
- priv = mac_dev->priv;
-
if (ex == FM_MAC_EX_10G_RX_FIFO_OVFL) {
/* don't flag RX FIFO after the first */
mac_dev->set_exception(mac_dev->fman_mac,
FM_MAC_EX_10G_RX_FIFO_OVFL, false);
- dev_err(priv->dev, "10G MAC got RX FIFO Error = %x\n", ex);
+ dev_err(mac_dev->dev, "10G MAC got RX FIFO Error = %x\n", ex);
}
- dev_dbg(priv->dev, "%s:%s() -> %d\n", KBUILD_BASENAME ".c",
+ dev_dbg(mac_dev->dev, "%s:%s() -> %d\n", KBUILD_BASENAME ".c",
__func__, ex);
}
-static int set_fman_mac_params(struct mac_device *mac_dev,
- struct fman_mac_params *params)
-{
- struct mac_priv_s *priv = mac_dev->priv;
-
- params->base_addr = (typeof(params->base_addr))
- devm_ioremap(priv->dev, mac_dev->res->start,
- resource_size(mac_dev->res));
- if (!params->base_addr)
- return -ENOMEM;
-
- memcpy(¶ms->addr, mac_dev->addr, sizeof(mac_dev->addr));
- params->max_speed = priv->max_speed;
- params->phy_if = mac_dev->phy_if;
- params->basex_if = false;
- params->mac_id = priv->cell_index;
- params->fm = (void *)priv->fman;
- params->exception_cb = mac_exception;
- params->event_cb = mac_exception;
- params->dev_id = mac_dev;
- params->internal_phy_node = priv->internal_phy_node;
-
- return 0;
-}
-
-static int tgec_initialization(struct mac_device *mac_dev)
-{
- int err;
- struct mac_priv_s *priv;
- struct fman_mac_params params;
- u32 version;
-
- priv = mac_dev->priv;
-
- err = set_fman_mac_params(mac_dev, ¶ms);
- if (err)
- goto _return;
-
- mac_dev->fman_mac = tgec_config(¶ms);
- if (!mac_dev->fman_mac) {
- err = -EINVAL;
- goto _return;
- }
-
- err = tgec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = tgec_init(mac_dev->fman_mac);
- if (err < 0)
- goto _return_fm_mac_free;
-
- /* For 10G MAC, disable Tx ECC exception */
- err = mac_dev->set_exception(mac_dev->fman_mac,
- FM_MAC_EX_10G_TX_ECC_ER, false);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = tgec_get_version(mac_dev->fman_mac, &version);
- if (err < 0)
- goto _return_fm_mac_free;
-
- dev_info(priv->dev, "FMan XGEC version: 0x%08x\n", version);
-
- goto _return;
-
-_return_fm_mac_free:
- tgec_free(mac_dev->fman_mac);
-
-_return:
- return err;
-}
-
-static int dtsec_initialization(struct mac_device *mac_dev)
-{
- int err;
- struct mac_priv_s *priv;
- struct fman_mac_params params;
- u32 version;
-
- priv = mac_dev->priv;
-
- err = set_fman_mac_params(mac_dev, ¶ms);
- if (err)
- goto _return;
-
- mac_dev->fman_mac = dtsec_config(¶ms);
- if (!mac_dev->fman_mac) {
- err = -EINVAL;
- goto _return;
- }
-
- err = dtsec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = dtsec_cfg_pad_and_crc(mac_dev->fman_mac, true);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = dtsec_init(mac_dev->fman_mac);
- if (err < 0)
- goto _return_fm_mac_free;
-
- /* For 1G MAC, disable by default the MIB counters overflow interrupt */
- err = mac_dev->set_exception(mac_dev->fman_mac,
- FM_MAC_EX_1G_RX_MIB_CNT_OVFL, false);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = dtsec_get_version(mac_dev->fman_mac, &version);
- if (err < 0)
- goto _return_fm_mac_free;
-
- dev_info(priv->dev, "FMan dTSEC version: 0x%08x\n", version);
-
- goto _return;
-
-_return_fm_mac_free:
- dtsec_free(mac_dev->fman_mac);
-
-_return:
- return err;
-}
-
-static int memac_initialization(struct mac_device *mac_dev)
-{
- int err;
- struct mac_priv_s *priv;
- struct fman_mac_params params;
-
- priv = mac_dev->priv;
-
- err = set_fman_mac_params(mac_dev, ¶ms);
- if (err)
- goto _return;
-
- if (priv->max_speed == SPEED_10000)
- params.phy_if = PHY_INTERFACE_MODE_XGMII;
-
- mac_dev->fman_mac = memac_config(¶ms);
- if (!mac_dev->fman_mac) {
- err = -EINVAL;
- goto _return;
- }
-
- err = memac_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm());
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = memac_cfg_reset_on_init(mac_dev->fman_mac, true);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = memac_cfg_fixed_link(mac_dev->fman_mac, priv->fixed_link);
- if (err < 0)
- goto _return_fm_mac_free;
-
- err = memac_init(mac_dev->fman_mac);
- if (err < 0)
- goto _return_fm_mac_free;
-
- dev_info(priv->dev, "FMan MEMAC\n");
-
- goto _return;
-
-_return_fm_mac_free:
- memac_free(mac_dev->fman_mac);
-
-_return:
- return err;
-}
-
-static int start(struct mac_device *mac_dev)
-{
- int err;
- struct phy_device *phy_dev = mac_dev->phy_dev;
- struct mac_priv_s *priv = mac_dev->priv;
-
- err = priv->enable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX);
- if (!err && phy_dev)
- phy_start(phy_dev);
-
- return err;
-}
-
-static int stop(struct mac_device *mac_dev)
-{
- struct mac_priv_s *priv = mac_dev->priv;
-
- if (mac_dev->phy_dev)
- phy_stop(mac_dev->phy_dev);
-
- return priv->disable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX);
-}
-
-static int set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
+int fman_set_multi(struct net_device *net_dev, struct mac_device *mac_dev)
{
struct mac_priv_s *priv;
struct mac_address *old_addr, *tmp;
@@ -424,109 +189,6 @@ void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
}
EXPORT_SYMBOL(fman_get_pause_cfg);
-static void adjust_link_void(struct mac_device *mac_dev)
-{
-}
-
-static void adjust_link_dtsec(struct mac_device *mac_dev)
-{
- struct phy_device *phy_dev = mac_dev->phy_dev;
- struct fman_mac *fman_mac;
- bool rx_pause, tx_pause;
- int err;
-
- fman_mac = mac_dev->fman_mac;
- if (!phy_dev->link) {
- dtsec_restart_autoneg(fman_mac);
-
- return;
- }
-
- dtsec_adjust_link(fman_mac, phy_dev->speed);
- fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
- err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
- if (err < 0)
- dev_err(mac_dev->priv->dev, "fman_set_mac_active_pause() = %d\n",
- err);
-}
-
-static void adjust_link_memac(struct mac_device *mac_dev)
-{
- struct phy_device *phy_dev = mac_dev->phy_dev;
- struct fman_mac *fman_mac;
- bool rx_pause, tx_pause;
- int err;
-
- fman_mac = mac_dev->fman_mac;
- memac_adjust_link(fman_mac, phy_dev->speed);
-
- fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause);
- err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause);
- if (err < 0)
- dev_err(mac_dev->priv->dev, "fman_set_mac_active_pause() = %d\n",
- err);
-}
-
-static void setup_dtsec(struct mac_device *mac_dev)
-{
- mac_dev->init = dtsec_initialization;
- mac_dev->set_promisc = dtsec_set_promiscuous;
- mac_dev->change_addr = dtsec_modify_mac_address;
- mac_dev->add_hash_mac_addr = dtsec_add_hash_mac_address;
- mac_dev->remove_hash_mac_addr = dtsec_del_hash_mac_address;
- mac_dev->set_tx_pause = dtsec_set_tx_pause_frames;
- mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
- mac_dev->set_exception = dtsec_set_exception;
- mac_dev->set_allmulti = dtsec_set_allmulti;
- mac_dev->set_tstamp = dtsec_set_tstamp;
- mac_dev->set_multi = set_multi;
- mac_dev->start = start;
- mac_dev->stop = stop;
- mac_dev->adjust_link = adjust_link_dtsec;
- mac_dev->priv->enable = dtsec_enable;
- mac_dev->priv->disable = dtsec_disable;
-}
-
-static void setup_tgec(struct mac_device *mac_dev)
-{
- mac_dev->init = tgec_initialization;
- mac_dev->set_promisc = tgec_set_promiscuous;
- mac_dev->change_addr = tgec_modify_mac_address;
- mac_dev->add_hash_mac_addr = tgec_add_hash_mac_address;
- mac_dev->remove_hash_mac_addr = tgec_del_hash_mac_address;
- mac_dev->set_tx_pause = tgec_set_tx_pause_frames;
- mac_dev->set_rx_pause = tgec_accept_rx_pause_frames;
- mac_dev->set_exception = tgec_set_exception;
- mac_dev->set_allmulti = tgec_set_allmulti;
- mac_dev->set_tstamp = tgec_set_tstamp;
- mac_dev->set_multi = set_multi;
- mac_dev->start = start;
- mac_dev->stop = stop;
- mac_dev->adjust_link = adjust_link_void;
- mac_dev->priv->enable = tgec_enable;
- mac_dev->priv->disable = tgec_disable;
-}
-
-static void setup_memac(struct mac_device *mac_dev)
-{
- mac_dev->init = memac_initialization;
- mac_dev->set_promisc = memac_set_promiscuous;
- mac_dev->change_addr = memac_modify_mac_address;
- mac_dev->add_hash_mac_addr = memac_add_hash_mac_address;
- mac_dev->remove_hash_mac_addr = memac_del_hash_mac_address;
- mac_dev->set_tx_pause = memac_set_tx_pause_frames;
- mac_dev->set_rx_pause = memac_accept_rx_pause_frames;
- mac_dev->set_exception = memac_set_exception;
- mac_dev->set_allmulti = memac_set_allmulti;
- mac_dev->set_tstamp = memac_set_tstamp;
- mac_dev->set_multi = set_multi;
- mac_dev->start = start;
- mac_dev->stop = stop;
- mac_dev->adjust_link = adjust_link_memac;
- mac_dev->priv->enable = memac_enable;
- mac_dev->priv->disable = memac_disable;
-}
-
#define DTSEC_SUPPORTED \
(SUPPORTED_10baseT_Half \
| SUPPORTED_10baseT_Full \
@@ -577,7 +239,7 @@ static struct platform_device *dpaa_eth_add_device(int fman_id,
goto no_mem;
}
- pdev->dev.parent = priv->dev;
+ pdev->dev.parent = mac_dev->dev;
ret = platform_device_add_data(pdev, &data, sizeof(data));
if (ret)
@@ -601,9 +263,9 @@ no_mem:
}
static const struct of_device_id mac_match[] = {
- { .compatible = "fsl,fman-dtsec" },
- { .compatible = "fsl,fman-xgec" },
- { .compatible = "fsl,fman-memac" },
+ { .compatible = "fsl,fman-dtsec", .data = dtsec_initialization },
+ { .compatible = "fsl,fman-xgec", .data = tgec_initialization },
+ { .compatible = "fsl,fman-memac", .data = memac_initialization },
{}
};
MODULE_DEVICE_TABLE(of, mac_match);
@@ -611,50 +273,33 @@ MODULE_DEVICE_TABLE(of, mac_match);
static int mac_probe(struct platform_device *_of_dev)
{
int err, i, nph;
+ int (*init)(struct mac_device *mac_dev, struct device_node *mac_node,
+ struct fman_mac_params *params);
struct device *dev;
struct device_node *mac_node, *dev_node;
struct mac_device *mac_dev;
struct platform_device *of_dev;
- struct resource res;
+ struct resource *res;
struct mac_priv_s *priv;
+ struct fman_mac_params params;
u32 val;
u8 fman_id;
phy_interface_t phy_if;
dev = &_of_dev->dev;
mac_node = dev->of_node;
+ init = of_device_get_match_data(dev);
mac_dev = devm_kzalloc(dev, sizeof(*mac_dev), GFP_KERNEL);
- if (!mac_dev) {
- err = -ENOMEM;
- goto _return;
- }
+ if (!mac_dev)
+ return -ENOMEM;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- err = -ENOMEM;
- goto _return;
- }
+ if (!priv)
+ return -ENOMEM;
/* Save private information */
mac_dev->priv = priv;
- priv->dev = dev;
-
- if (of_device_is_compatible(mac_node, "fsl,fman-dtsec")) {
- setup_dtsec(mac_dev);
- priv->internal_phy_node = of_parse_phandle(mac_node,
- "tbi-handle", 0);
- } else if (of_device_is_compatible(mac_node, "fsl,fman-xgec")) {
- setup_tgec(mac_dev);
- } else if (of_device_is_compatible(mac_node, "fsl,fman-memac")) {
- setup_memac(mac_dev);
- priv->internal_phy_node = of_parse_phandle(mac_node,
- "pcsphy-handle", 0);
- } else {
- dev_err(dev, "MAC node (%pOF) contains unsupported MAC\n",
- mac_node);
- err = -EINVAL;
- goto _return;
- }
+ mac_dev->dev = dev;
INIT_LIST_HEAD(&priv->mc_addr_list);
@@ -663,8 +308,7 @@ static int mac_probe(struct platform_device *_of_dev)
if (!dev_node) {
dev_err(dev, "of_get_parent(%pOF) failed\n",
mac_node);
- err = -EINVAL;
- goto _return_of_get_parent;
+ return -EINVAL;
}
of_dev = of_find_device_by_node(dev_node);
@@ -694,42 +338,33 @@ static int mac_probe(struct platform_device *_of_dev)
of_node_put(dev_node);
/* Get the address of the memory mapped registers */
- err = of_address_to_resource(mac_node, 0, &res);
- if (err < 0) {
- dev_err(dev, "of_address_to_resource(%pOF) = %d\n",
- mac_node, err);
- goto _return_of_get_parent;
+ res = platform_get_mem_or_io(_of_dev, 0);
+ if (!res) {
+ dev_err(dev, "could not get registers\n");
+ return -EINVAL;
}
- mac_dev->res = __devm_request_region(dev,
- fman_get_mem_region(priv->fman),
- res.start, resource_size(&res),
- "mac");
- if (!mac_dev->res) {
- dev_err(dev, "__devm_request_mem_region(mac) failed\n");
- err = -EBUSY;
- goto _return_of_get_parent;
+ err = devm_request_resource(dev, fman_get_mem_region(priv->fman), res);
+ if (err) {
+ dev_err_probe(dev, err, "could not request resource\n");
+ return err;
}
- priv->vaddr = devm_ioremap(dev, mac_dev->res->start,
- resource_size(mac_dev->res));
- if (!priv->vaddr) {
+ mac_dev->vaddr = devm_ioremap(dev, res->start, resource_size(res));
+ if (!mac_dev->vaddr) {
dev_err(dev, "devm_ioremap() failed\n");
- err = -EIO;
- goto _return_of_get_parent;
+ return -EIO;
}
+ mac_dev->vaddr_end = mac_dev->vaddr + resource_size(res);
- if (!of_device_is_available(mac_node)) {
- err = -ENODEV;
- goto _return_of_get_parent;
- }
+ if (!of_device_is_available(mac_node))
+ return -ENODEV;
/* Get the cell-index */
err = of_property_read_u32(mac_node, "cell-index", &val);
if (err) {
dev_err(dev, "failed to read cell-index for %pOF\n", mac_node);
- err = -EINVAL;
- goto _return_of_get_parent;
+ return -EINVAL;
}
priv->cell_index = (u8)val;
@@ -743,15 +378,13 @@ static int mac_probe(struct platform_device *_of_dev)
if (unlikely(nph < 0)) {
dev_err(dev, "of_count_phandle_with_args(%pOF, fsl,fman-ports) failed\n",
mac_node);
- err = nph;
- goto _return_of_get_parent;
+ return nph;
}
if (nph != ARRAY_SIZE(mac_dev->port)) {
dev_err(dev, "Not supported number of fman-ports handles of mac node %pOF from device tree\n",
mac_node);
- err = -EINVAL;
- goto _return_of_get_parent;
+ return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
@@ -760,8 +393,7 @@ static int mac_probe(struct platform_device *_of_dev)
if (!dev_node) {
dev_err(dev, "of_parse_phandle(%pOF, fsl,fman-ports) failed\n",
mac_node);
- err = -EINVAL;
- goto _return_of_node_put;
+ return -EINVAL;
}
of_dev = of_find_device_by_node(dev_node);
@@ -793,7 +425,7 @@ static int mac_probe(struct platform_device *_of_dev)
mac_dev->phy_if = phy_if;
priv->speed = phy2speed[mac_dev->phy_if];
- priv->max_speed = priv->speed;
+ params.max_speed = priv->speed;
mac_dev->if_support = DTSEC_SUPPORTED;
/* We don't support half-duplex in SGMII mode */
if (mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII)
@@ -801,7 +433,7 @@ static int mac_probe(struct platform_device *_of_dev)
SUPPORTED_100baseT_Half);
/* Gigabit support (no half-duplex) */
- if (priv->max_speed == 1000)
+ if (params.max_speed == 1000)
mac_dev->if_support |= SUPPORTED_1000baseT_Full;
/* The 10G interface only supports one mode */
@@ -810,42 +442,18 @@ static int mac_probe(struct platform_device *_of_dev)
/* Get the rest of the PHY information */
mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0);
- if (!mac_dev->phy_node && of_phy_is_fixed_link(mac_node)) {
- struct phy_device *phy;
- err = of_phy_register_fixed_link(mac_node);
- if (err)
- goto _return_of_get_parent;
+ params.basex_if = false;
+ params.mac_id = priv->cell_index;
+ params.fm = (void *)priv->fman;
+ params.exception_cb = mac_exception;
+ params.event_cb = mac_exception;
- priv->fixed_link = kzalloc(sizeof(*priv->fixed_link),
- GFP_KERNEL);
- if (!priv->fixed_link) {
- err = -ENOMEM;
- goto _return_of_get_parent;
- }
-
- mac_dev->phy_node = of_node_get(mac_node);
- phy = of_phy_find_device(mac_dev->phy_node);
- if (!phy) {
- err = -EINVAL;
- of_node_put(mac_dev->phy_node);
- goto _return_of_get_parent;
- }
-
- priv->fixed_link->link = phy->link;
- priv->fixed_link->speed = phy->speed;
- priv->fixed_link->duplex = phy->duplex;
- priv->fixed_link->pause = phy->pause;
- priv->fixed_link->asym_pause = phy->asym_pause;
-
- put_device(&phy->mdio.dev);
- }
-
- err = mac_dev->init(mac_dev);
+ err = init(mac_dev, mac_node, ¶ms);
if (err < 0) {
dev_err(dev, "mac_dev->init() = %d\n", err);
of_node_put(mac_dev->phy_node);
- goto _return_of_get_parent;
+ return err;
}
/* pause frame autonegotiation enabled */
@@ -872,13 +480,10 @@ static int mac_probe(struct platform_device *_of_dev)
priv->eth_dev = NULL;
}
- goto _return;
+ return err;
_return_of_node_put:
of_node_put(dev_node);
-_return_of_get_parent:
- kfree(priv->fixed_link);
-_return:
return err;
}
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index daa285a9b8b2..b95d384271bd 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -1,32 +1,6 @@
-/* Copyright 2008-2015 Freescale Semiconductor, Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-or-later */
+/*
+ * Copyright 2008 - 2015 Freescale Semiconductor Inc.
*/
#ifndef __MAC_H
@@ -45,13 +19,16 @@ struct fman_mac;
struct mac_priv_s;
struct mac_device {
- struct resource *res;
+ void __iomem *vaddr;
+ void __iomem *vaddr_end;
+ struct device *dev;
u8 addr[ETH_ALEN];
struct fman_port *port[2];
u32 if_support;
struct phy_device *phy_dev;
phy_interface_t phy_if;
struct device_node *phy_node;
+ struct net_device *net_dev;
bool autoneg_pause;
bool rx_pause_req;
@@ -61,9 +38,8 @@ struct mac_device {
bool promisc;
bool allmulti;
- int (*init)(struct mac_device *mac_dev);
- int (*start)(struct mac_device *mac_dev);
- int (*stop)(struct mac_device *mac_dev);
+ int (*enable)(struct fman_mac *mac_dev);
+ void (*disable)(struct fman_mac *mac_dev);
void (*adjust_link)(struct mac_device *mac_dev);
int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
int (*change_addr)(struct fman_mac *mac_dev, const enet_addr_t *enet_addr);
@@ -81,6 +57,8 @@ struct mac_device {
int (*remove_hash_mac_addr)(struct fman_mac *mac_dev,
enet_addr_t *eth_addr);
+ void (*update_speed)(struct mac_device *mac_dev, int speed);
+
struct fman_mac *fman_mac;
struct mac_priv_s *priv;
};
@@ -97,5 +75,6 @@ int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx);
void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause,
bool *tx_pause);
+int fman_set_multi(struct net_device *net_dev, struct mac_device *mac_dev);
#endif /* __MAC_H */
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index b3dae17e067e..8844a9a04fcf 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -791,7 +791,7 @@ static int fs_enet_close(struct net_device *dev)
static void fs_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strscpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
}
static int fs_get_regs_len(struct net_device *dev)
@@ -883,9 +883,6 @@ static const struct ethtool_ops fs_ethtool_ops = {
.set_tunable = fs_set_tunable,
};
-extern int fs_mii_connect(struct net_device *dev);
-extern void fs_mii_disconnect(struct net_device *dev);
-
/**************************************************************************************/
#ifdef CONFIG_FS_ENET_HAS_FEC
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index 99fe2c210d0f..61f4b6e50d29 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -98,7 +98,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
return -EINVAL;
fep->fec.fecp = of_iomap(ofdev->dev.of_node, 0);
- if (!fep->fcc.fccp)
+ if (!fep->fec.fecp)
return -EINVAL;
return 0;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index e7bf1524b68e..b2def295523a 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -3233,7 +3233,7 @@ static int gfar_probe(struct platform_device *ofdev)
/* Register for napi ...We are registering NAPI for each grp */
for (i = 0; i < priv->num_grps; i++) {
netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
- gfar_poll_rx_sq, NAPI_POLL_WEIGHT);
+ gfar_poll_rx_sq);
netif_napi_add_tx_weight(dev, &priv->gfargrp[i].napi_tx,
gfar_poll_tx_sq, 2);
}
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 81fb68730138..b2b0d3c26fcc 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -163,7 +163,7 @@ static int gfar_sset_count(struct net_device *dev, int sset)
static void gfar_gdrvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
}
/* Return the length of the register structure */
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 823221c912ab..7a4cb4f07c32 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3712,7 +3712,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
dev->netdev_ops = &ucc_geth_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
- netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
+ netif_napi_add(dev, &ugeth->napi, ucc_geth_poll);
dev->mtu = 1500;
dev->max_mtu = 1518;
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 69b2b98b1525..601beb93d3b3 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -337,8 +337,8 @@ static void
uec_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, "QUICC ENGINE", sizeof(drvinfo->bus_info));
}
#ifdef CONFIG_PM
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index ec90da1de030..d7d39a58cd80 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -355,7 +355,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev)
if (ret)
return ret;
- fwnode = pdev->dev.fwnode;
+ fwnode = dev_fwnode(&pdev->dev);
if (is_of_node(fwnode))
ret = of_mdiobus_register(bus, to_of_node(fwnode));
else if (is_acpi_node(fwnode))
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index b0d733e9a7c6..4859493471db 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -1046,8 +1046,8 @@ static void fjn_rx(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info),
"PCMCIA 0x%lx", dev->base_addr);
}
diff --git a/drivers/net/ethernet/fungible/funeth/funeth_main.c b/drivers/net/ethernet/fungible/funeth/funeth_main.c
index f247b7ad3a88..095f51c4d9d9 100644
--- a/drivers/net/ethernet/fungible/funeth/funeth_main.c
+++ b/drivers/net/ethernet/fungible/funeth/funeth_main.c
@@ -339,8 +339,7 @@ static int fun_alloc_queue_irqs(struct net_device *dev, unsigned int ntx,
return PTR_ERR(irq);
fp->num_rx_irqs++;
- netif_napi_add(dev, &irq->napi, fun_rxq_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &irq->napi, fun_rxq_napi_poll);
}
netif_info(fp, intr, dev, "Reserved %u/%u IRQs for Tx/Rx queues\n",
@@ -1802,16 +1801,14 @@ static int fun_create_netdev(struct fun_ethdev *ed, unsigned int portid)
if (rc)
goto unreg_devlink;
- if (fp->dl_port.devlink)
- devlink_port_type_eth_set(&fp->dl_port, netdev);
+ devlink_port_type_eth_set(&fp->dl_port, netdev);
return 0;
unreg_devlink:
ed->netdevs[portid] = NULL;
fun_ktls_cleanup(fp);
- if (fp->dl_port.devlink)
- devlink_port_unregister(&fp->dl_port);
+ devlink_port_unregister(&fp->dl_port);
free_stats:
fun_free_stats_area(fp);
free_rss:
@@ -1830,11 +1827,9 @@ static void fun_destroy_netdev(struct net_device *netdev)
struct funeth_priv *fp;
fp = netdev_priv(netdev);
- if (fp->dl_port.devlink) {
- devlink_port_type_clear(&fp->dl_port);
- devlink_port_unregister(&fp->dl_port);
- }
+ devlink_port_type_clear(&fp->dl_port);
unregister_netdev(netdev);
+ devlink_port_unregister(&fp->dl_port);
fun_ktls_cleanup(fp);
fun_free_stats_area(fp);
fun_free_rss(fp);
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 044db3ebb071..d3e3ac242bfc 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -526,8 +526,7 @@ static void gve_add_napi(struct gve_priv *priv, int ntfy_idx,
{
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
- netif_napi_add(priv->dev, &block->napi, gve_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->dev, &block->napi, gve_poll);
}
static void gve_remove_napi(struct gve_priv *priv, int ntfy_idx)
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index c84ef494bd60..50c3f5d6611f 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -830,8 +830,8 @@ static int hip04_set_coalesce(struct net_device *netdev,
static void hip04_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strscpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strscpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
}
static const struct ethtool_ops hip04_ethtool_ops = {
@@ -990,7 +990,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
ndev->watchdog_timeo = TX_TIMEOUT;
ndev->priv_flags |= IFF_UNICAST_FLT;
ndev->irq = irq;
- netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, hip04_rx_poll);
hip04_reset_dreq(priv);
hip04_reset_ppe(priv);
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index d7e62eca050f..ffcf797dfa90 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -1243,7 +1243,7 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
if (ret)
goto out_phy_node;
- netif_napi_add(ndev, &priv->napi, hix5hd2_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(ndev, &priv->napi, hix5hd2_poll);
if (HAS_CAP_TSO(priv->hw_cap)) {
ret = hix5hd2_init_sg_desc_queue(priv);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index d94cc8c6681f..7cf10d1e2b31 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -2109,8 +2109,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro :
hns_nic_tx_fini_pro_v2;
- netif_napi_add(priv->netdev, &rd->napi,
- hns_nic_common_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->netdev, &rd->napi, hns_nic_common_poll);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
for (i = h->q_num; i < h->q_num * 2; i++) {
@@ -2122,8 +2121,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro :
hns_nic_rx_fini_pro_v2;
- netif_napi_add(priv->netdev, &rd->napi,
- hns_nic_common_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(priv->netdev, &rd->napi, hns_nic_common_poll);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
index 7d4ae467f3ad..abcd7877f7d2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -233,6 +233,17 @@ struct hclgevf_mbx_arq_ring {
__le16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE];
};
+struct hclge_dev;
+
+#define HCLGE_MBX_OPCODE_MAX 256
+struct hclge_mbx_ops_param {
+ struct hclge_vport *vport;
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclge_respond_to_vf_msg *resp_msg;
+};
+
+typedef int (*hclge_mbx_ops_fn)(struct hclge_mbx_ops_param *param);
+
#define hclge_mbx_ring_ptr_move_crq(crq) \
(crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
#define hclge_mbx_tail_ptr_move_arq(arq) \
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 94f80e1c4020..0179fc288f5f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -97,13 +97,15 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
HNAE3_DEV_SUPPORT_MC_MAC_MNG_B,
HNAE3_DEV_SUPPORT_CQ_B,
+ HNAE3_DEV_SUPPORT_FEC_STATS_B,
+ HNAE3_DEV_SUPPORT_LANE_NUM_B,
};
-#define hnae3_dev_fd_supported(hdev) \
- test_bit(HNAE3_DEV_SUPPORT_FD_B, (hdev)->ae_dev->caps)
+#define hnae3_ae_dev_fd_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_FD_B, (ae_dev)->caps)
-#define hnae3_dev_gro_supported(hdev) \
- test_bit(HNAE3_DEV_SUPPORT_GRO_B, (hdev)->ae_dev->caps)
+#define hnae3_ae_dev_gro_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_GRO_B, (ae_dev)->caps)
#define hnae3_dev_fec_supported(hdev) \
test_bit(HNAE3_DEV_SUPPORT_FEC_B, (hdev)->ae_dev->caps)
@@ -159,6 +161,12 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_ae_dev_cq_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_CQ_B, (ae_dev)->caps)
+#define hnae3_ae_dev_fec_stats_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_FEC_STATS_B, (ae_dev)->caps)
+
+#define hnae3_ae_dev_lane_num_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_LANE_NUM_B, (ae_dev)->caps)
+
enum HNAE3_PF_CAP_BITS {
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
};
@@ -187,6 +195,7 @@ struct hns3_mac_stats {
/* hnae3 loop mode */
enum hnae3_loop {
+ HNAE3_LOOP_EXTERNAL,
HNAE3_LOOP_APP,
HNAE3_LOOP_SERIAL_SERDES,
HNAE3_LOOP_PARALLEL_SERDES,
@@ -223,6 +232,8 @@ enum hnae3_fec_mode {
HNAE3_FEC_AUTO = 0,
HNAE3_FEC_BASER,
HNAE3_FEC_RS,
+ HNAE3_FEC_LLRS,
+ HNAE3_FEC_NONE,
HNAE3_FEC_USER_DEF,
};
@@ -270,6 +281,7 @@ enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_TC_SCH_INFO,
HNAE3_DBG_CMD_QOS_PAUSE_CFG,
HNAE3_DBG_CMD_QOS_PRI_MAP,
+ HNAE3_DBG_CMD_QOS_DSCP_MAP,
HNAE3_DBG_CMD_QOS_BUF_CFG,
HNAE3_DBG_CMD_DEV_INFO,
HNAE3_DBG_CMD_TX_BD,
@@ -308,6 +320,11 @@ enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_UNKNOWN,
};
+enum hnae3_tc_map_mode {
+ HNAE3_TC_MAP_MODE_PRIO,
+ HNAE3_TC_MAP_MODE_DSCP,
+};
+
struct hnae3_vector_info {
u8 __iomem *io_addr;
int vector;
@@ -560,14 +577,17 @@ struct hnae3_ae_ops {
void (*client_stop)(struct hnae3_handle *handle);
int (*get_status)(struct hnae3_handle *handle);
void (*get_ksettings_an_result)(struct hnae3_handle *handle,
- u8 *auto_neg, u32 *speed, u8 *duplex);
+ u8 *auto_neg, u32 *speed, u8 *duplex,
+ u32 *lane_num);
int (*cfg_mac_speed_dup_h)(struct hnae3_handle *handle, int speed,
- u8 duplex);
+ u8 duplex, u8 lane_num);
void (*get_media_type)(struct hnae3_handle *handle, u8 *media_type,
u8 *module_type);
int (*check_port_speed)(struct hnae3_handle *handle, u32 speed);
+ void (*get_fec_stats)(struct hnae3_handle *handle,
+ struct ethtool_fec_stats *fec_stats);
void (*get_fec)(struct hnae3_handle *handle, u8 *fec_ability,
u8 *fec_mode);
int (*set_fec)(struct hnae3_handle *handle, u32 fec_mode);
@@ -737,6 +757,8 @@ struct hnae3_ae_ops {
int (*get_link_diagnosis_info)(struct hnae3_handle *handle,
u32 *status_code);
void (*clean_vf_config)(struct hnae3_ae_dev *ae_dev, int num_vfs);
+ int (*get_dscp_prio)(struct hnae3_handle *handle, u8 dscp,
+ u8 *tc_map_mode, u8 *priority);
};
struct hnae3_dcb_ops {
@@ -745,6 +767,8 @@ struct hnae3_dcb_ops {
int (*ieee_setets)(struct hnae3_handle *, struct ieee_ets *);
int (*ieee_getpfc)(struct hnae3_handle *, struct ieee_pfc *);
int (*ieee_setpfc)(struct hnae3_handle *, struct ieee_pfc *);
+ int (*ieee_setapp)(struct hnae3_handle *h, struct dcb_app *app);
+ int (*ieee_delapp)(struct hnae3_handle *h, struct dcb_app *app);
/* DCBX configuration */
u8 (*getdcbx)(struct hnae3_handle *);
@@ -774,6 +798,8 @@ struct hnae3_tc_info {
bool mqprio_active;
};
+#define HNAE3_MAX_DSCP 64
+#define HNAE3_PRIO_ID_INVALID 0xff
struct hnae3_knic_private_info {
struct net_device *netdev; /* Set by KNIC client when init instance */
u16 rss_size; /* Allocated RSS queues */
@@ -784,6 +810,9 @@ struct hnae3_knic_private_info {
u32 tx_spare_buf_size;
struct hnae3_tc_info tc_info;
+ u8 tc_map_mode;
+ u8 dscp_app_cnt;
+ u8 dscp_prio[HNAE3_MAX_DSCP];
u16 num_tqps; /* total number of TQPs in this handle */
struct hnae3_queue **tqp; /* array base of all TQPs in this instance */
@@ -815,6 +844,7 @@ struct hnae3_roce_private_info {
#define HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK BIT(2)
#define HNAE3_SUPPORT_VF BIT(3)
#define HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK BIT(4)
+#define HNAE3_SUPPORT_EXTERNAL_LOOPBACK BIT(5)
#define HNAE3_USER_UPE BIT(0) /* unicast promisc enabled by user */
#define HNAE3_USER_MPE BIT(1) /* mulitcast promisc enabled by user */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
index c8b151d29f53..f671a63cecde 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c
@@ -52,9 +52,9 @@ void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev,
bool is_pf)
{
- set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
- if (is_pf && ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
+ if (is_pf) {
+ set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps);
}
@@ -91,6 +91,7 @@ int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev,
hnae3_set_bit(compat, HCLGE_COMM_PHY_IMP_EN_B, 1);
hnae3_set_bit(compat, HCLGE_COMM_MAC_STATS_EXT_EN_B, 1);
hnae3_set_bit(compat, HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_COMM_LLRS_FEC_EN_B, 1);
req->compat = cpu_to_le32(compat);
}
@@ -150,6 +151,10 @@ static const struct hclge_comm_caps_bit_map hclge_pf_cmd_caps[] = {
HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B},
{HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B},
{HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
+ {HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B},
+ {HCLGE_COMM_CAP_FD_B, HNAE3_DEV_SUPPORT_FD_B},
+ {HCLGE_COMM_CAP_FEC_STATS_B, HNAE3_DEV_SUPPORT_FEC_STATS_B},
+ {HCLGE_COMM_CAP_LANE_NUM_B, HNAE3_DEV_SUPPORT_LANE_NUM_B},
};
static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
@@ -162,6 +167,7 @@ static const struct hclge_comm_caps_bit_map hclge_vf_cmd_caps[] = {
{HCLGE_COMM_CAP_TX_PUSH_B, HNAE3_DEV_SUPPORT_TX_PUSH_B},
{HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B},
{HCLGE_COMM_CAP_CQ_B, HNAE3_DEV_SUPPORT_CQ_B},
+ {HCLGE_COMM_CAP_GRO_B, HNAE3_DEV_SUPPORT_GRO_B},
};
static void
@@ -220,8 +226,10 @@ int hclge_comm_cmd_query_version_and_capability(struct hnae3_ae_dev *ae_dev,
HNAE3_PCI_REVISION_BIT_SIZE;
ae_dev->dev_version |= ae_dev->pdev->revision;
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ if (ae_dev->dev_version == HNAE3_DEVICE_VERSION_V2) {
hclge_comm_set_default_capability(ae_dev, is_pf);
+ return 0;
+ }
hclge_comm_parse_capability(ae_dev, is_pf, resp);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
index 7a7d4cf9bf35..b1f9383b418f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.h
@@ -20,6 +20,7 @@
#define HCLGE_COMM_PHY_IMP_EN_B 2
#define HCLGE_COMM_MAC_STATS_EXT_EN_B 3
#define HCLGE_COMM_SYNC_RX_RING_HEAD_EN_B 4
+#define HCLGE_COMM_LLRS_FEC_EN_B 5
#define hclge_comm_dev_phy_imp_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, (ae_dev)->caps)
@@ -102,6 +103,7 @@ enum hclge_opcode_type {
HCLGE_OPC_MAC_TNL_INT_EN = 0x0311,
HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312,
HCLGE_OPC_COMMON_LOOPBACK = 0x0315,
+ HCLGE_OPC_QUERY_FEC_STATS = 0x0316,
HCLGE_OPC_CONFIG_FEC_MODE = 0x031A,
HCLGE_OPC_QUERY_ROH_TYPE_INFO = 0x0389,
@@ -339,6 +341,10 @@ enum HCLGE_COMM_CAP_BITS {
HCLGE_COMM_CAP_RXD_ADV_LAYOUT_B = 15,
HCLGE_COMM_CAP_PORT_VLAN_BYPASS_B = 17,
HCLGE_COMM_CAP_CQ_B = 18,
+ HCLGE_COMM_CAP_GRO_B = 20,
+ HCLGE_COMM_CAP_FD_B = 21,
+ HCLGE_COMM_CAP_FEC_STATS_B = 25,
+ HCLGE_COMM_CAP_LANE_NUM_B = 27,
};
enum HCLGE_COMM_API_CAP_BITS {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
index d2ec4c573bf8..3b6dbf158b98 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -56,6 +56,32 @@ static int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
return -EOPNOTSUPP;
}
+static int hns3_dcbnl_ieee_setapp(struct net_device *ndev, struct dcb_app *app)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
+ if (h->kinfo.dcb_ops->ieee_setapp)
+ return h->kinfo.dcb_ops->ieee_setapp(h, app);
+
+ return -EOPNOTSUPP;
+}
+
+static int hns3_dcbnl_ieee_delapp(struct net_device *ndev, struct dcb_app *app)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
+ if (h->kinfo.dcb_ops->ieee_setapp)
+ return h->kinfo.dcb_ops->ieee_delapp(h, app);
+
+ return -EOPNOTSUPP;
+}
+
/* DCBX configuration */
static u8 hns3_dcbnl_getdcbx(struct net_device *ndev)
{
@@ -83,6 +109,8 @@ static const struct dcbnl_rtnl_ops hns3_dcbnl_ops = {
.ieee_setets = hns3_dcbnl_ieee_setets,
.ieee_getpfc = hns3_dcbnl_ieee_getpfc,
.ieee_setpfc = hns3_dcbnl_ieee_setpfc,
+ .ieee_setapp = hns3_dcbnl_ieee_setapp,
+ .ieee_delapp = hns3_dcbnl_ieee_delapp,
.getdcbx = hns3_dcbnl_getdcbx,
.setdcbx = hns3_dcbnl_setdcbx,
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 93aeb615191d..66feb23f7b7b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -105,6 +105,13 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.buf_len = HNS3_DBG_READ_LEN,
.init = hns3_dbg_common_file_init,
},
+ {
+ .name = "qos_dscp_map",
+ .cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
+ .dentry = HNS3_DBG_DENTRY_TM,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
{
.name = "qos_buf_cfg",
.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
@@ -395,6 +402,12 @@ static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
}, {
.name = "support modify vlan filter state",
.cap_bit = HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
+ }, {
+ .name = "support FEC statistics",
+ .cap_bit = HNAE3_DEV_SUPPORT_FEC_STATS_B,
+ }, {
+ .name = "support lane num",
+ .cap_bit = HNAE3_DEV_SUPPORT_LANE_NUM_B,
}
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 35d70041b9e8..4cb2421e71a7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2963,6 +2963,48 @@ static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
return h->ae_algo->ops->set_vf_mac(h, vf_id, mac);
}
+#define HNS3_INVALID_DSCP 0xff
+#define HNS3_DSCP_SHIFT 2
+
+static u8 hns3_get_skb_dscp(struct sk_buff *skb)
+{
+ __be16 protocol = skb->protocol;
+ u8 dscp = HNS3_INVALID_DSCP;
+
+ if (protocol == htons(ETH_P_8021Q))
+ protocol = vlan_get_protocol(skb);
+
+ if (protocol == htons(ETH_P_IP))
+ dscp = ipv4_get_dsfield(ip_hdr(skb)) >> HNS3_DSCP_SHIFT;
+ else if (protocol == htons(ETH_P_IPV6))
+ dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> HNS3_DSCP_SHIFT;
+
+ return dscp;
+}
+
+static u16 hns3_nic_select_queue(struct net_device *netdev,
+ struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u8 dscp;
+
+ if (h->kinfo.tc_map_mode != HNAE3_TC_MAP_MODE_DSCP ||
+ !h->ae_algo->ops->get_dscp_prio)
+ goto out;
+
+ dscp = hns3_get_skb_dscp(skb);
+ if (unlikely(dscp >= HNAE3_MAX_DSCP))
+ goto out;
+
+ skb->priority = h->kinfo.dscp_prio[dscp];
+ if (skb->priority == HNAE3_PRIO_ID_INVALID)
+ skb->priority = 0;
+
+out:
+ return netdev_pick_tx(netdev, skb, sb_dev);
+}
+
static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_open = hns3_nic_net_open,
.ndo_stop = hns3_nic_net_stop,
@@ -2988,6 +3030,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = {
.ndo_set_vf_link_state = hns3_nic_set_vf_link_state,
.ndo_set_vf_rate = hns3_nic_set_vf_rate,
.ndo_set_vf_mac = hns3_nic_set_vf_mac,
+ .ndo_select_queue = hns3_nic_select_queue,
};
bool hns3_is_phys_func(struct pci_dev *pdev)
@@ -3271,12 +3314,11 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST;
- if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ if (hnae3_ae_dev_gro_supported(ae_dev))
netdev->features |= NETIF_F_GRO_HW;
- if (!(h->flags & HNAE3_SUPPORT_VF))
- netdev->features |= NETIF_F_NTUPLE;
- }
+ if (hnae3_ae_dev_fd_supported(ae_dev))
+ netdev->features |= NETIF_F_NTUPLE;
if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps))
netdev->features |= NETIF_F_GSO_UDP_L4;
@@ -4650,7 +4692,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
goto map_ring_fail;
netif_napi_add(priv->netdev, &tqp_vector->napi,
- hns3_nic_common_poll, NAPI_POLL_WEIGHT);
+ hns3_nic_common_poll);
}
return 0;
@@ -5782,6 +5824,57 @@ int hns3_set_channels(struct net_device *netdev,
return 0;
}
+void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ if (!if_running)
+ return;
+
+ netif_carrier_off(ndev);
+ netif_tx_disable(ndev);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_vector_disable(&priv->tqp_vector[i]);
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_disable(h->kinfo.tqp[i]);
+
+ /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
+ * during reset process, because driver may not be able
+ * to disable the ring through firmware when downing the netdev.
+ */
+ if (!hns3_nic_resetting(ndev))
+ hns3_nic_reset_all_ring(priv->ae_handle);
+
+ hns3_reset_tx_queue(priv->ae_handle);
+}
+
+void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ if (!if_running)
+ return;
+
+ hns3_nic_reset_all_ring(priv->ae_handle);
+
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_vector_enable(&priv->tqp_vector[i]);
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_enable(h->kinfo.tqp[i]);
+
+ netif_tx_wake_all_queues(ndev);
+
+ if (h->ae_algo->ops->get_status(h))
+ netif_carrier_on(ndev);
+}
+
static const struct hns3_hw_error_info hns3_hw_err[] = {
{ .type = HNAE3_PPU_POISON_ERROR,
.msg = "PPU poison" },
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 4a3253692dcc..133a054af6b7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -744,4 +744,7 @@ u16 hns3_get_max_available_channels(struct hnae3_handle *h);
void hns3_cq_period_mode_init(struct hns3_nic_priv *priv,
enum dim_cq_period_mode tx_mode,
enum dim_cq_period_mode rx_mode);
+
+void hns3_external_lb_prepare(struct net_device *ndev, bool if_running);
+void hns3_external_lb_restore(struct net_device *ndev, bool if_running);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 4c7988e308a2..cdf76fb58d45 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -69,7 +69,6 @@ static const struct hns3_stats hns3_rxq_stats[] = {
#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
-#define HNS3_SELF_TEST_TYPE_NUM 4
#define HNS3_NIC_LB_TEST_PKT_NUM 1
#define HNS3_NIC_LB_TEST_RING_ID 0
#define HNS3_NIC_LB_TEST_PACKET_SIZE 128
@@ -95,6 +94,7 @@ static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
case HNAE3_LOOP_PARALLEL_SERDES:
case HNAE3_LOOP_APP:
case HNAE3_LOOP_PHY:
+ case HNAE3_LOOP_EXTERNAL:
ret = h->ae_algo->ops->set_loopback(h, loop, en);
break;
default:
@@ -304,6 +304,10 @@ out:
static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2])
{
+ st_param[HNAE3_LOOP_EXTERNAL][0] = HNAE3_LOOP_EXTERNAL;
+ st_param[HNAE3_LOOP_EXTERNAL][1] =
+ h->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK;
+
st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP;
st_param[HNAE3_LOOP_APP][1] =
h->flags & HNAE3_SUPPORT_APP_LOOPBACK;
@@ -322,17 +326,11 @@ static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2])
h->flags & HNAE3_SUPPORT_PHY_LOOPBACK;
}
-static void hns3_selftest_prepare(struct net_device *ndev,
- bool if_running, int (*st_param)[2])
+static void hns3_selftest_prepare(struct net_device *ndev, bool if_running)
{
struct hns3_nic_priv *priv = netdev_priv(ndev);
struct hnae3_handle *h = priv->ae_handle;
- if (netif_msg_ifdown(h))
- netdev_info(ndev, "self test start\n");
-
- hns3_set_selftest_param(h, st_param);
-
if (if_running)
ndev->netdev_ops->ndo_stop(ndev);
@@ -371,18 +369,15 @@ static void hns3_selftest_restore(struct net_device *ndev, bool if_running)
if (if_running)
ndev->netdev_ops->ndo_open(ndev);
-
- if (netif_msg_ifdown(h))
- netdev_info(ndev, "self test end\n");
}
static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
struct ethtool_test *eth_test, u64 *data)
{
- int test_index = 0;
+ int test_index = HNAE3_LOOP_APP;
u32 i;
- for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) {
+ for (i = HNAE3_LOOP_APP; i < HNAE3_LOOP_NONE; i++) {
enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0];
if (!st_param[i][1])
@@ -401,6 +396,20 @@ static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
}
}
+static void hns3_do_external_lb(struct net_device *ndev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ data[HNAE3_LOOP_EXTERNAL] = hns3_lp_up(ndev, HNAE3_LOOP_EXTERNAL);
+ if (!data[HNAE3_LOOP_EXTERNAL])
+ data[HNAE3_LOOP_EXTERNAL] = hns3_lp_run_test(ndev, HNAE3_LOOP_EXTERNAL);
+ hns3_lp_down(ndev, HNAE3_LOOP_EXTERNAL);
+
+ if (data[HNAE3_LOOP_EXTERNAL])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ eth_test->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
+}
+
/**
* hns3_self_test - self test
* @ndev: net device
@@ -410,7 +419,9 @@ static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
static void hns3_self_test(struct net_device *ndev,
struct ethtool_test *eth_test, u64 *data)
{
- int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int st_param[HNAE3_LOOP_NONE][2];
bool if_running = netif_running(ndev);
if (hns3_nic_resetting(ndev)) {
@@ -418,13 +429,29 @@ static void hns3_self_test(struct net_device *ndev,
return;
}
- /* Only do offline selftest, or pass by default */
- if (eth_test->flags != ETH_TEST_FL_OFFLINE)
+ if (!(eth_test->flags & ETH_TEST_FL_OFFLINE))
return;
- hns3_selftest_prepare(ndev, if_running, st_param);
+ if (netif_msg_ifdown(h))
+ netdev_info(ndev, "self test start\n");
+
+ hns3_set_selftest_param(h, st_param);
+
+ /* external loopback test requires that the link is up and the duplex is
+ * full, do external test first to reduce the whole test time
+ */
+ if (eth_test->flags & ETH_TEST_FL_EXTERNAL_LB) {
+ hns3_external_lb_prepare(ndev, if_running);
+ hns3_do_external_lb(ndev, eth_test, data);
+ hns3_external_lb_restore(ndev, if_running);
+ }
+
+ hns3_selftest_prepare(ndev, if_running);
hns3_do_selftest(ndev, st_param, eth_test, data);
hns3_selftest_restore(ndev, if_running);
+
+ if (netif_msg_ifdown(h))
+ netdev_info(ndev, "self test end\n");
}
static void hns3_update_limit_promisc_mode(struct net_device *netdev,
@@ -712,7 +739,8 @@ static void hns3_get_ksettings(struct hnae3_handle *h,
ops->get_ksettings_an_result(h,
&cmd->base.autoneg,
&cmd->base.speed,
- &cmd->base.duplex);
+ &cmd->base.duplex,
+ &cmd->lanes);
/* 2.get link mode */
if (ops->get_link_mode)
@@ -794,6 +822,7 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
u8 module_type = HNAE3_MODULE_TYPE_UNKNOWN;
u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
+ u32 lane_num;
u8 autoneg;
u32 speed;
u8 duplex;
@@ -806,9 +835,9 @@ static int hns3_check_ksettings_param(const struct net_device *netdev,
return 0;
if (ops->get_ksettings_an_result) {
- ops->get_ksettings_an_result(handle, &autoneg, &speed, &duplex);
+ ops->get_ksettings_an_result(handle, &autoneg, &speed, &duplex, &lane_num);
if (cmd->base.autoneg == autoneg && cmd->base.speed == speed &&
- cmd->base.duplex == duplex)
+ cmd->base.duplex == duplex && cmd->lanes == lane_num)
return 0;
}
@@ -845,10 +874,14 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
return -EINVAL;
+ if (cmd->lanes && !hnae3_ae_dev_lane_num_supported(ae_dev))
+ return -EOPNOTSUPP;
+
netif_dbg(handle, drv, netdev,
- "set link(%s): autoneg=%u, speed=%u, duplex=%u\n",
+ "set link(%s): autoneg=%u, speed=%u, duplex=%u, lanes=%u\n",
netdev->phydev ? "phy" : "mac",
- cmd->base.autoneg, cmd->base.speed, cmd->base.duplex);
+ cmd->base.autoneg, cmd->base.speed, cmd->base.duplex,
+ cmd->lanes);
/* Only support ksettings_set for netdev with phy attached for now */
if (netdev->phydev) {
@@ -886,7 +919,7 @@ static int hns3_set_link_ksettings(struct net_device *netdev,
if (ops->cfg_mac_speed_dup_h)
ret = ops->cfg_mac_speed_dup_h(handle, cmd->base.speed,
- cmd->base.duplex);
+ cmd->base.duplex, (u8)(cmd->lanes));
return ret;
}
@@ -1612,6 +1645,19 @@ static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level)
h->msg_enable = msg_level;
}
+static void hns3_get_fec_stats(struct net_device *netdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (!hnae3_ae_dev_fec_stats_supported(ae_dev) || !ops->get_fec_stats)
+ return;
+
+ ops->get_fec_stats(handle, fec_stats);
+}
+
/* Translate local fec value into ethtool value. */
static unsigned int loc_to_eth_fec(u8 loc_fec)
{
@@ -1621,12 +1667,12 @@ static unsigned int loc_to_eth_fec(u8 loc_fec)
eth_fec |= ETHTOOL_FEC_AUTO;
if (loc_fec & BIT(HNAE3_FEC_RS))
eth_fec |= ETHTOOL_FEC_RS;
+ if (loc_fec & BIT(HNAE3_FEC_LLRS))
+ eth_fec |= ETHTOOL_FEC_LLRS;
if (loc_fec & BIT(HNAE3_FEC_BASER))
eth_fec |= ETHTOOL_FEC_BASER;
-
- /* if nothing is set, then FEC is off */
- if (!eth_fec)
- eth_fec = ETHTOOL_FEC_OFF;
+ if (loc_fec & BIT(HNAE3_FEC_NONE))
+ eth_fec |= ETHTOOL_FEC_OFF;
return eth_fec;
}
@@ -1637,12 +1683,13 @@ static unsigned int eth_to_loc_fec(unsigned int eth_fec)
u32 loc_fec = 0;
if (eth_fec & ETHTOOL_FEC_OFF)
- return loc_fec;
-
+ loc_fec |= BIT(HNAE3_FEC_NONE);
if (eth_fec & ETHTOOL_FEC_AUTO)
loc_fec |= BIT(HNAE3_FEC_AUTO);
if (eth_fec & ETHTOOL_FEC_RS)
loc_fec |= BIT(HNAE3_FEC_RS);
+ if (eth_fec & ETHTOOL_FEC_LLRS)
+ loc_fec |= BIT(HNAE3_FEC_LLRS);
if (eth_fec & ETHTOOL_FEC_BASER)
loc_fec |= BIT(HNAE3_FEC_BASER);
@@ -1668,6 +1715,8 @@ static int hns3_get_fecparam(struct net_device *netdev,
fec->fec = loc_to_eth_fec(fec_ability);
fec->active_fec = loc_to_eth_fec(fec_mode);
+ if (!fec->active_fec)
+ fec->active_fec = ETHTOOL_FEC_OFF;
return 0;
}
@@ -2051,6 +2100,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = {
static const struct ethtool_ops hns3_ethtool_ops = {
.supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
.supported_ring_params = HNS3_ETHTOOL_RING,
+ .cap_link_lanes_supported = true,
.self_test = hns3_self_test,
.get_drvinfo = hns3_get_drvinfo,
.get_link = hns3_get_link,
@@ -2081,6 +2131,7 @@ static const struct ethtool_ops hns3_ethtool_ops = {
.set_msglevel = hns3_set_msglevel,
.get_fecparam = hns3_get_fecparam,
.set_fecparam = hns3_set_fecparam,
+ .get_fec_stats = hns3_get_fec_stats,
.get_module_info = hns3_get_module_info,
.get_module_eeprom = hns3_get_module_eeprom,
.get_priv_flags = hns3_get_priv_flags,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index f9d89511eb32..43cada51d8cb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -321,7 +321,9 @@ struct hclge_config_mac_speed_dup_cmd {
#define HCLGE_CFG_MAC_SPEED_CHANGE_EN_B 0
u8 mac_change_fec_en;
- u8 rsv[22];
+ u8 rsv[4];
+ u8 lane_num;
+ u8 rsv1[17];
};
#define HCLGE_TQP_ENABLE_B 0
@@ -347,7 +349,9 @@ struct hclge_sfp_info_cmd {
u8 autoneg_ability; /* whether support autoneg */
__le32 speed_ability; /* speed ability for current media */
__le32 module_type;
- u8 rsv[8];
+ u8 fec_ability;
+ u8 lane_num;
+ u8 rsv[6];
};
#define HCLGE_MAC_CFG_FEC_AUTO_EN_B 0
@@ -359,12 +363,27 @@ struct hclge_sfp_info_cmd {
#define HCLGE_MAC_FEC_OFF 0
#define HCLGE_MAC_FEC_BASER 1
#define HCLGE_MAC_FEC_RS 2
+#define HCLGE_MAC_FEC_LLRS 3
struct hclge_config_fec_cmd {
u8 fec_mode;
u8 default_config;
u8 rsv[22];
};
+#define HCLGE_FEC_STATS_CMD_NUM 4
+
+struct hclge_query_fec_stats_cmd {
+ /* fec rs mode total stats */
+ __le32 rs_fec_corr_blocks;
+ __le32 rs_fec_uncorr_blocks;
+ __le32 rs_fec_error_blocks;
+ /* fec base-r mode per lanes stats */
+ u8 base_r_lane_num;
+ u8 rsv[3];
+ __le32 base_r_fec_corr_blocks;
+ __le32 base_r_fec_uncorr_blocks;
+};
+
#define HCLGE_MAC_UPLINK_PORT 0x100
struct hclge_config_max_frm_size_cmd {
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index 69b8673436ca..c4aded65e848 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -359,6 +359,93 @@ static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
}
+static int hclge_ieee_setapp(struct hnae3_handle *h, struct dcb_app *app)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
+ struct hclge_dev *hdev = vport->back;
+ struct dcb_app old_app;
+ int ret;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
+ app->protocol >= HNAE3_MAX_DSCP ||
+ app->priority >= HNAE3_MAX_USER_PRIO)
+ return -EINVAL;
+
+ dev_info(&hdev->pdev->dev, "setapp dscp=%u priority=%u\n",
+ app->protocol, app->priority);
+
+ if (app->priority == h->kinfo.dscp_prio[app->protocol])
+ return 0;
+
+ ret = dcb_ieee_setapp(netdev, app);
+ if (ret)
+ return ret;
+
+ old_app.selector = IEEE_8021QAZ_APP_SEL_DSCP;
+ old_app.protocol = app->protocol;
+ old_app.priority = h->kinfo.dscp_prio[app->protocol];
+
+ h->kinfo.dscp_prio[app->protocol] = app->priority;
+ ret = hclge_dscp_to_tc_map(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to set dscp to tc map, ret = %d\n", ret);
+ h->kinfo.dscp_prio[app->protocol] = old_app.priority;
+ (void)dcb_ieee_delapp(netdev, app);
+ return ret;
+ }
+
+ vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_DSCP;
+ if (old_app.priority == HNAE3_PRIO_ID_INVALID)
+ h->kinfo.dscp_app_cnt++;
+ else
+ ret = dcb_ieee_delapp(netdev, &old_app);
+
+ return ret;
+}
+
+static int hclge_ieee_delapp(struct hnae3_handle *h, struct dcb_app *app)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
+ app->protocol >= HNAE3_MAX_DSCP ||
+ app->priority >= HNAE3_MAX_USER_PRIO ||
+ app->priority != h->kinfo.dscp_prio[app->protocol])
+ return -EINVAL;
+
+ dev_info(&hdev->pdev->dev, "delapp dscp=%u priority=%u\n",
+ app->protocol, app->priority);
+
+ ret = dcb_ieee_delapp(netdev, app);
+ if (ret)
+ return ret;
+
+ h->kinfo.dscp_prio[app->protocol] = HNAE3_PRIO_ID_INVALID;
+ ret = hclge_dscp_to_tc_map(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to del dscp to tc map, ret = %d\n", ret);
+ h->kinfo.dscp_prio[app->protocol] = app->priority;
+ (void)dcb_ieee_setapp(netdev, app);
+ return ret;
+ }
+
+ if (h->kinfo.dscp_app_cnt)
+ h->kinfo.dscp_app_cnt--;
+
+ if (!h->kinfo.dscp_app_cnt) {
+ vport->nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO;
+ ret = hclge_up_to_tc_map(hdev);
+ }
+
+ return ret;
+}
+
/* DCBX configuration */
static u8 hclge_getdcbx(struct hnae3_handle *h)
{
@@ -543,6 +630,8 @@ static const struct hnae3_dcb_ops hns3_dcb_ops = {
.ieee_setets = hclge_ieee_setets,
.ieee_getpfc = hclge_ieee_getpfc,
.ieee_setpfc = hclge_ieee_setpfc,
+ .ieee_setapp = hclge_ieee_setapp,
+ .ieee_delapp = hclge_ieee_delapp,
.getdcbx = hclge_getdcbx,
.setdcbx = hclge_setdcbx,
.setup_tc = hclge_setup_tc,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 9b870e79c290..142415c84c6b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -14,6 +14,8 @@ static const char * const hclge_mac_state_str[] = {
"TO_ADD", "TO_DEL", "ACTIVE"
};
+static const char * const tc_map_mode_str[] = { "PRIO", "DSCP" };
+
static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
{ .cmd = HNAE3_DBG_CMD_REG_BIOS_COMMON,
.dfx_msg = &hclge_dbg_bios_common_reg[0],
@@ -1115,10 +1117,11 @@ static int hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev, char *buf,
return 0;
}
+#define HCLGE_DBG_TC_MASK 0x0F
+
static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
int len)
{
-#define HCLGE_DBG_TC_MASK 0x0F
#define HCLGE_DBG_TC_BIT_WIDTH 4
struct hclge_qos_pri_map_cmd *pri_map;
@@ -1152,6 +1155,58 @@ static int hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev, char *buf,
return 0;
}
+static int hclge_dbg_dump_qos_dscp_map(struct hclge_dev *hdev, char *buf,
+ int len)
+{
+ struct hnae3_knic_private_info *kinfo = &hdev->vport[0].nic.kinfo;
+ struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
+ u8 *req0 = (u8 *)desc[0].data;
+ u8 *req1 = (u8 *)desc[1].data;
+ u8 dscp_tc[HNAE3_MAX_DSCP];
+ int pos, ret;
+ u8 i, j;
+
+ pos = scnprintf(buf, len, "tc map mode: %s\n",
+ tc_map_mode_str[kinfo->tc_map_mode]);
+
+ if (kinfo->tc_map_mode != HNAE3_TC_MAP_MODE_DSCP)
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump qos dscp map, ret = %d\n", ret);
+ return ret;
+ }
+
+ pos += scnprintf(buf + pos, len - pos, "\nDSCP PRIO TC\n");
+
+ /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
+ for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
+ j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
+ /* Each dscp setting has 4 bits, so each byte saves two dscp
+ * setting
+ */
+ dscp_tc[i] = req0[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
+ dscp_tc[j] = req1[i >> 1] >> HCLGE_DSCP_TC_SHIFT(i);
+ dscp_tc[i] &= HCLGE_DBG_TC_MASK;
+ dscp_tc[j] &= HCLGE_DBG_TC_MASK;
+ }
+
+ for (i = 0; i < HNAE3_MAX_DSCP; i++) {
+ if (kinfo->dscp_prio[i] == HNAE3_PRIO_ID_INVALID)
+ continue;
+
+ pos += scnprintf(buf + pos, len - pos, " %2u %u %u\n",
+ i, kinfo->dscp_prio[i], dscp_tc[i]);
+ }
+
+ return 0;
+}
+
static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev, char *buf, int len)
{
struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
@@ -1517,7 +1572,7 @@ static int hclge_dbg_dump_fd_tcam(struct hclge_dev *hdev, char *buf, int len)
char *tcam_buf;
int pos = 0;
- if (!hnae3_dev_fd_supported(hdev)) {
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
dev_err(&hdev->pdev->dev,
"Only FD-supported dev supports dump fd tcam\n");
return -EOPNOTSUPP;
@@ -1585,6 +1640,9 @@ static int hclge_dbg_dump_fd_counter(struct hclge_dev *hdev, char *buf, int len)
u64 cnt;
u8 i;
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return -EOPNOTSUPP;
+
pos += scnprintf(buf + pos, len - pos,
"func_id\thit_times\n");
@@ -2373,6 +2431,10 @@ static const struct hclge_dbg_func hclge_dbg_cmd_func[] = {
.cmd = HNAE3_DBG_CMD_QOS_PRI_MAP,
.dbg_dump = hclge_dbg_dump_qos_pri_map,
},
+ {
+ .cmd = HNAE3_DBG_CMD_QOS_DSCP_MAP,
+ .dbg_dump = hclge_dbg_dump_qos_dscp_map,
+ },
{
.cmd = HNAE3_DBG_CMD_QOS_BUF_CFG,
.dbg_dump = hclge_dbg_dump_qos_buf_cfg,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index fae79764dc44..6962a9d69cf8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -71,6 +71,7 @@ static void hclge_sync_mac_table(struct hclge_dev *hdev);
static void hclge_restore_hw_table(struct hclge_dev *hdev);
static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
static void hclge_sync_fd_table(struct hclge_dev *hdev);
+static void hclge_update_fec_stats(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo;
@@ -148,10 +149,11 @@ static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
HCLGE_TQP_INTR_RL_REG};
static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
- "App Loopback test",
- "Serdes serial Loopback test",
- "Serdes parallel Loopback test",
- "Phy Loopback test"
+ "External Loopback test",
+ "App Loopback test",
+ "Serdes serial Loopback test",
+ "Serdes parallel Loopback test",
+ "Phy Loopback test"
};
static const struct hclge_comm_stats_str g_mac_stats_string[] = {
@@ -679,6 +681,8 @@ static void hclge_update_stats_for_all(struct hclge_dev *hdev)
}
}
+ hclge_update_fec_stats(hdev);
+
status = hclge_mac_update_stats(hdev);
if (status)
dev_err(&hdev->pdev->dev,
@@ -715,7 +719,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \
HNAE3_SUPPORT_PHY_LOOPBACK | \
HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \
- HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
+ HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK | \
+ HNAE3_SUPPORT_EXTERNAL_LOOPBACK)
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -737,9 +742,12 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
}
- count += 2;
+ count += 1;
handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+ count += 1;
handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
+ count += 1;
+ handle->flags |= HNAE3_SUPPORT_EXTERNAL_LOOPBACK;
if ((hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
hdev->hw.mac.phydev->drv->set_loopback) ||
@@ -770,6 +778,11 @@ static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
size, p);
p = hclge_comm_tqps_get_strings(handle, p);
} else if (stringset == ETH_SS_TEST) {
+ if (handle->flags & HNAE3_SUPPORT_EXTERNAL_LOOPBACK) {
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_EXTERNAL],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
ETH_GSTRING_LEN);
@@ -1003,6 +1016,27 @@ static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
return -EINVAL;
}
+static void hclge_update_fec_support(struct hclge_mac *mac)
+{
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
+
+ if (mac->fec_ability & BIT(HNAE3_FEC_BASER))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_RS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_LLRS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+ mac->supported);
+ if (mac->fec_ability & BIT(HNAE3_FEC_NONE))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ mac->supported);
+}
+
static void hclge_convert_setting_sr(u16 speed_ability,
unsigned long *link_mode)
{
@@ -1101,34 +1135,36 @@ static void hclge_convert_setting_kr(u16 speed_ability,
static void hclge_convert_setting_fec(struct hclge_mac *mac)
{
- linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
- linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
+ /* If firmware has reported fec_ability, don't need to convert by speed */
+ if (mac->fec_ability)
+ goto out;
switch (mac->speed) {
case HCLGE_MAC_SPEED_10G:
case HCLGE_MAC_SPEED_40G:
- linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
- mac->supported);
- mac->fec_ability =
- BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
+ mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_NONE);
break;
case HCLGE_MAC_SPEED_25G:
case HCLGE_MAC_SPEED_50G:
- linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
- mac->supported);
- mac->fec_ability =
- BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
- BIT(HNAE3_FEC_AUTO);
+ mac->fec_ability = BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
+ BIT(HNAE3_FEC_AUTO) | BIT(HNAE3_FEC_NONE);
break;
case HCLGE_MAC_SPEED_100G:
+ mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_NONE);
+ break;
case HCLGE_MAC_SPEED_200G:
- linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
- mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
+ mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO) |
+ BIT(HNAE3_FEC_LLRS);
break;
default:
mac->fec_ability = 0;
break;
}
+
+out:
+ hclge_update_fec_support(mac);
}
static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
@@ -1574,7 +1610,7 @@ static int hclge_configure(struct hclge_dev *hdev)
if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps);
- if (hnae3_dev_fd_supported(hdev)) {
+ if (hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
hdev->fd_en = true;
hdev->fd_active_type = HCLGE_FD_RULE_NONE;
}
@@ -1617,7 +1653,7 @@ static int hclge_config_gro(struct hclge_dev *hdev)
struct hclge_desc desc;
int ret;
- if (!hnae3_dev_gro_supported(hdev))
+ if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
@@ -2589,7 +2625,7 @@ static int hclge_convert_to_fw_speed(u32 speed_drv, u32 *speed_fw)
}
static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
- u8 duplex)
+ u8 duplex, u8 lane_num)
{
struct hclge_config_mac_speed_dup_cmd *req;
struct hclge_desc desc;
@@ -2613,6 +2649,7 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
speed_fw);
hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
1);
+ req->lane_num = lane_num;
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) {
@@ -2624,33 +2661,35 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
return 0;
}
-int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
+int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num)
{
struct hclge_mac *mac = &hdev->hw.mac;
int ret;
duplex = hclge_check_speed_dup(duplex, speed);
if (!mac->support_autoneg && mac->speed == speed &&
- mac->duplex == duplex)
+ mac->duplex == duplex && (mac->lane_num == lane_num || lane_num == 0))
return 0;
- ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex, lane_num);
if (ret)
return ret;
hdev->hw.mac.speed = speed;
hdev->hw.mac.duplex = duplex;
+ if (!lane_num)
+ hdev->hw.mac.lane_num = lane_num;
return 0;
}
static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
- u8 duplex)
+ u8 duplex, u8 lane_num)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
+ return hclge_cfg_mac_speed_dup(hdev, speed, duplex, lane_num);
}
static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
@@ -2730,6 +2769,157 @@ static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
return 0;
}
+static void hclge_parse_fec_stats_lanes(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 desc_len)
+{
+ u32 lane_size = HCLGE_FEC_STATS_MAX_LANES * 2;
+ u32 desc_index = 0;
+ u32 data_index = 0;
+ u32 i;
+
+ for (i = 0; i < lane_size; i++) {
+ if (data_index >= HCLGE_DESC_DATA_LEN) {
+ desc_index++;
+ data_index = 0;
+ }
+
+ if (desc_index >= desc_len)
+ return;
+
+ hdev->fec_stats.per_lanes[i] +=
+ le32_to_cpu(desc[desc_index].data[data_index]);
+ data_index++;
+ }
+}
+
+static void hclge_parse_fec_stats(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 desc_len)
+{
+ struct hclge_query_fec_stats_cmd *req;
+
+ req = (struct hclge_query_fec_stats_cmd *)desc[0].data;
+
+ hdev->fec_stats.base_r_lane_num = req->base_r_lane_num;
+ hdev->fec_stats.rs_corr_blocks +=
+ le32_to_cpu(req->rs_fec_corr_blocks);
+ hdev->fec_stats.rs_uncorr_blocks +=
+ le32_to_cpu(req->rs_fec_uncorr_blocks);
+ hdev->fec_stats.rs_error_blocks +=
+ le32_to_cpu(req->rs_fec_error_blocks);
+ hdev->fec_stats.base_r_corr_blocks +=
+ le32_to_cpu(req->base_r_fec_corr_blocks);
+ hdev->fec_stats.base_r_uncorr_blocks +=
+ le32_to_cpu(req->base_r_fec_uncorr_blocks);
+
+ hclge_parse_fec_stats_lanes(hdev, &desc[1], desc_len - 1);
+}
+
+static int hclge_update_fec_stats_hw(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc[HCLGE_FEC_STATS_CMD_NUM];
+ int ret;
+ u32 i;
+
+ for (i = 0; i < HCLGE_FEC_STATS_CMD_NUM; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_FEC_STATS,
+ true);
+ if (i != (HCLGE_FEC_STATS_CMD_NUM - 1))
+ desc[i].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_FEC_STATS_CMD_NUM);
+ if (ret)
+ return ret;
+
+ hclge_parse_fec_stats(hdev, desc, HCLGE_FEC_STATS_CMD_NUM);
+
+ return 0;
+}
+
+static void hclge_update_fec_stats(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ int ret;
+
+ if (!hnae3_ae_dev_fec_stats_supported(ae_dev) ||
+ test_and_set_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state))
+ return;
+
+ ret = hclge_update_fec_stats_hw(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to update fec stats, ret = %d\n", ret);
+
+ clear_bit(HCLGE_STATE_FEC_STATS_UPDATING, &hdev->state);
+}
+
+static void hclge_get_fec_stats_total(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ fec_stats->corrected_blocks.total = hdev->fec_stats.rs_corr_blocks;
+ fec_stats->uncorrectable_blocks.total =
+ hdev->fec_stats.rs_uncorr_blocks;
+}
+
+static void hclge_get_fec_stats_lanes(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ u32 i;
+
+ if (hdev->fec_stats.base_r_lane_num == 0 ||
+ hdev->fec_stats.base_r_lane_num > HCLGE_FEC_STATS_MAX_LANES) {
+ dev_err(&hdev->pdev->dev,
+ "fec stats lane number(%llu) is invalid\n",
+ hdev->fec_stats.base_r_lane_num);
+ return;
+ }
+
+ for (i = 0; i < hdev->fec_stats.base_r_lane_num; i++) {
+ fec_stats->corrected_blocks.lanes[i] =
+ hdev->fec_stats.base_r_corr_per_lanes[i];
+ fec_stats->uncorrectable_blocks.lanes[i] =
+ hdev->fec_stats.base_r_uncorr_per_lanes[i];
+ }
+}
+
+static void hclge_comm_get_fec_stats(struct hclge_dev *hdev,
+ struct ethtool_fec_stats *fec_stats)
+{
+ u32 fec_mode = hdev->hw.mac.fec_mode;
+
+ switch (fec_mode) {
+ case BIT(HNAE3_FEC_RS):
+ case BIT(HNAE3_FEC_LLRS):
+ hclge_get_fec_stats_total(hdev, fec_stats);
+ break;
+ case BIT(HNAE3_FEC_BASER):
+ hclge_get_fec_stats_lanes(hdev, fec_stats);
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "fec stats is not supported by current fec mode(0x%x)\n",
+ fec_mode);
+ break;
+ }
+}
+
+static void hclge_get_fec_stats(struct hnae3_handle *handle,
+ struct ethtool_fec_stats *fec_stats)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 fec_mode = hdev->hw.mac.fec_mode;
+
+ if (fec_mode == BIT(HNAE3_FEC_NONE) ||
+ fec_mode == BIT(HNAE3_FEC_AUTO) ||
+ fec_mode == BIT(HNAE3_FEC_USER_DEF))
+ return;
+
+ hclge_update_fec_stats(hdev);
+
+ hclge_comm_get_fec_stats(hdev, fec_stats);
+}
+
static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
{
struct hclge_config_fec_cmd *req;
@@ -2744,6 +2934,9 @@ static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
if (fec_mode & BIT(HNAE3_FEC_RS))
hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
+ if (fec_mode & BIT(HNAE3_FEC_LLRS))
+ hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
+ HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_LLRS);
if (fec_mode & BIT(HNAE3_FEC_BASER))
hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
@@ -2796,7 +2989,7 @@ static int hclge_mac_init(struct hclge_dev *hdev)
hdev->support_sfp_query = true;
hdev->hw.mac.duplex = HCLGE_MAC_FULL;
ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
- hdev->hw.mac.duplex);
+ hdev->hw.mac.duplex, hdev->hw.mac.lane_num);
if (ret)
return ret;
@@ -2988,6 +3181,9 @@ static void hclge_update_fec_advertising(struct hclge_mac *mac)
if (mac->fec_mode & BIT(HNAE3_FEC_RS))
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
mac->advertising);
+ else if (mac->fec_mode & BIT(HNAE3_FEC_LLRS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT,
+ mac->advertising);
else if (mac->fec_mode & BIT(HNAE3_FEC_BASER))
linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
mac->advertising);
@@ -3037,7 +3233,6 @@ static void hclge_update_port_capability(struct hclge_dev *hdev,
struct hclge_mac *mac)
{
if (hnae3_dev_fec_supported(hdev))
- /* update fec ability by speed */
hclge_convert_setting_fec(mac);
/* firmware can not identify back plane type, the media type
@@ -3119,10 +3314,12 @@ static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
mac->autoneg = resp->autoneg;
mac->support_autoneg = resp->autoneg_ability;
mac->speed_type = QUERY_ACTIVE_SPEED;
+ mac->lane_num = resp->lane_num;
if (!resp->active_fec)
mac->fec_mode = 0;
else
mac->fec_mode = BIT(resp->active_fec);
+ mac->fec_ability = resp->fec_ability;
} else {
mac->speed_type = QUERY_SFP_SPEED;
}
@@ -3302,13 +3499,13 @@ static int hclge_update_port_info(struct hclge_dev *hdev)
return 0;
}
return hclge_cfg_mac_speed_dup(hdev, mac->speed,
- HCLGE_MAC_FULL);
+ HCLGE_MAC_FULL, mac->lane_num);
} else {
if (speed == HCLGE_MAC_SPEED_UNKNOWN)
return 0; /* do nothing if no SFP */
/* must config full duplex for SFP */
- return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
+ return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL, 0);
}
}
@@ -5334,7 +5531,7 @@ static int hclge_init_fd_config(struct hclge_dev *hdev)
struct hclge_fd_key_cfg *key_cfg;
int ret;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return 0;
ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
@@ -6339,7 +6536,7 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
u8 action;
int ret;
- if (!hnae3_dev_fd_supported(hdev)) {
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
dev_err(&hdev->pdev->dev,
"flow table director is not supported\n");
return -EOPNOTSUPP;
@@ -6395,7 +6592,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
struct ethtool_rx_flow_spec *fs;
int ret;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
@@ -6431,9 +6628,6 @@ static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
struct hlist_node *node;
u16 location;
- if (!hnae3_dev_fd_supported(hdev))
- return;
-
spin_lock_bh(&hdev->fd_rule_lock);
for_each_set_bit(location, hdev->fd_bmap,
@@ -6458,6 +6652,9 @@ static void hclge_clear_fd_rules_in_list(struct hclge_dev *hdev,
static void hclge_del_all_fd_entries(struct hclge_dev *hdev)
{
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return;
+
hclge_clear_fd_rules_in_list(hdev, true);
hclge_fd_disable_user_def(hdev);
}
@@ -6473,7 +6670,7 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle)
* return value. If error is returned here, the reset process will
* fail.
*/
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return 0;
/* if fd is disabled, should not restore it when reset */
@@ -6497,7 +6694,7 @@ static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- if (!hnae3_dev_fd_supported(hdev) || hclge_is_cls_flower_active(handle))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev) || hclge_is_cls_flower_active(handle))
return -EOPNOTSUPP;
cmd->rule_cnt = hdev->hclge_fd_rule_num;
@@ -6715,7 +6912,7 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
struct hclge_dev *hdev = vport->back;
struct ethtool_rx_flow_spec *fs;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
@@ -6778,7 +6975,7 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
struct hlist_node *node2;
int cnt = 0;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
@@ -6878,7 +7075,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
struct hclge_fd_rule *rule;
u16 bit_id;
- if (!hnae3_dev_fd_supported(hdev))
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
return -EOPNOTSUPP;
/* when there is already fd rule existed add by user,
@@ -7167,6 +7364,12 @@ static int hclge_add_cls_flower(struct hnae3_handle *handle,
struct hclge_fd_rule *rule;
int ret;
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev)) {
+ dev_err(&hdev->pdev->dev,
+ "cls flower is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
ret = hclge_check_cls_flower(hdev, cls_flower, tc);
if (ret) {
dev_err(&hdev->pdev->dev,
@@ -7220,6 +7423,9 @@ static int hclge_del_cls_flower(struct hnae3_handle *handle,
struct hclge_fd_rule *rule;
int ret;
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return -EOPNOTSUPP;
+
spin_lock_bh(&hdev->fd_rule_lock);
rule = hclge_find_cls_flower(hdev, cls_flower->cookie);
@@ -7282,6 +7488,9 @@ out:
static void hclge_sync_fd_table(struct hclge_dev *hdev)
{
+ if (!hnae3_ae_dev_fd_supported(hdev->ae_dev))
+ return;
+
if (test_and_clear_bit(HCLGE_STATE_FD_CLEAR_ALL, &hdev->state)) {
bool clear_list = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
@@ -7705,7 +7914,7 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
- int ret;
+ int ret = 0;
/* Loopback can be enabled in three places: SSU, MAC, and serdes. By
* default, SSU loopback is enabled, so if the SMAC and the DMAC are
@@ -7732,6 +7941,8 @@ static int hclge_set_loopback(struct hnae3_handle *handle,
case HNAE3_LOOP_PHY:
ret = hclge_set_phy_loopback(hdev, en);
break;
+ case HNAE3_LOOP_EXTERNAL:
+ break;
default:
ret = -ENOTSUPP;
dev_err(&hdev->pdev->dev,
@@ -10793,7 +11004,7 @@ static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
}
static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
- u8 *auto_neg, u32 *speed, u8 *duplex)
+ u8 *auto_neg, u32 *speed, u8 *duplex, u32 *lane_num)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
@@ -10804,6 +11015,8 @@ static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
*duplex = hdev->hw.mac.duplex;
if (auto_neg)
*auto_neg = hdev->hw.mac.autoneg;
+ if (lane_num)
+ *lane_num = hdev->hw.mac.lane_num;
}
static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
@@ -11443,6 +11656,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
if (ret)
goto err_mdiobus_unreg;
+ ret = hclge_update_port_info(hdev);
+ if (ret)
+ goto err_mdiobus_unreg;
+
INIT_KFIFO(hdev->mac_tnl_log);
hclge_dcb_ops_set(hdev);
@@ -11510,6 +11727,7 @@ out:
static void hclge_stats_clear(struct hclge_dev *hdev)
{
memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
+ memset(&hdev->fec_stats, 0, sizeof(hdev->fec_stats));
}
static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
@@ -12763,6 +12981,21 @@ static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs)
}
}
+static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode,
+ u8 *priority)
+{
+ if (dscp >= HNAE3_MAX_DSCP)
+ return -EINVAL;
+
+ if (tc_mode)
+ *tc_mode = h->kinfo.tc_map_mode;
+ if (priority)
+ *priority = h->kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 :
+ h->kinfo.dscp_prio[dscp];
+
+ return 0;
+}
+
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
@@ -12786,6 +13019,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
.get_media_type = hclge_get_media_type,
.check_port_speed = hclge_check_port_speed,
+ .get_fec_stats = hclge_get_fec_stats,
.get_fec = hclge_get_fec,
.set_fec = hclge_set_fec,
.get_rss_key_size = hclge_comm_get_rss_key_size,
@@ -12865,6 +13099,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_ts_info = hclge_ptp_get_ts_info,
.get_link_diagnosis_info = hclge_get_link_diagnosis_info,
.clean_vf_config = hclge_clean_vport_config,
+ .get_dscp_prio = hclge_get_dscp_prio,
};
static struct hnae3_ae_algo ae_algo = {
@@ -12872,7 +13107,7 @@ static struct hnae3_ae_algo ae_algo = {
.pdev_id_table = ae_algo_pci_tbl,
};
-static int hclge_init(void)
+static int __init hclge_init(void)
{
pr_info("%s is initializing\n", HCLGE_NAME);
@@ -12887,7 +13122,7 @@ static int hclge_init(void)
return 0;
}
-static void hclge_exit(void)
+static void __exit hclge_exit(void)
{
hnae3_unregister_ae_algo_prepare(&ae_algo);
hnae3_unregister_ae_algo(&ae_algo);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index 18caddd541f8..495b639b0dc2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -216,6 +216,7 @@ enum HCLGE_DEV_STATE {
HCLGE_STATE_FD_USER_DEF_CHANGED,
HCLGE_STATE_PTP_EN,
HCLGE_STATE_PTP_TX_HANDLING,
+ HCLGE_STATE_FEC_STATS_UPDATING,
HCLGE_STATE_MAX
};
@@ -258,6 +259,7 @@ struct hclge_mac {
u8 duplex;
u8 support_autoneg;
u8 speed_type; /* 0: sfp speed, 1: active speed */
+ u8 lane_num;
u32 speed;
u32 max_speed;
u32 speed_ability; /* speed ability supported by current media */
@@ -488,6 +490,26 @@ struct hclge_mac_stats {
#define HCLGE_STATS_TIMER_INTERVAL 300UL
+/* fec stats ,opcode id: 0x0316 */
+#define HCLGE_FEC_STATS_MAX_LANES 8
+struct hclge_fec_stats {
+ /* fec rs mode total stats */
+ u64 rs_corr_blocks;
+ u64 rs_uncorr_blocks;
+ u64 rs_error_blocks;
+ /* fec base-r mode per lanes stats */
+ u64 base_r_lane_num;
+ u64 base_r_corr_blocks;
+ u64 base_r_uncorr_blocks;
+ union {
+ struct {
+ u64 base_r_corr_per_lanes[HCLGE_FEC_STATS_MAX_LANES];
+ u64 base_r_uncorr_per_lanes[HCLGE_FEC_STATS_MAX_LANES];
+ };
+ u64 per_lanes[HCLGE_FEC_STATS_MAX_LANES * 2];
+ };
+};
+
struct hclge_vlan_type_cfg {
u16 rx_ot_fst_vlan_type;
u16 rx_ot_sec_vlan_type;
@@ -826,6 +848,7 @@ struct hclge_dev {
struct hclge_hw hw;
struct hclge_misc_vector misc_vector;
struct hclge_mac_stats mac_stats;
+ struct hclge_fec_stats fec_stats;
unsigned long state;
unsigned long flr_state;
unsigned long last_reset_time;
@@ -1070,7 +1093,7 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue)
}
int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
-int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
+int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex, u8 lane_num);
int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
u16 vlan_id, bool is_kill);
int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
index e1012f7f9b73..a7b06c63143c 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -779,17 +779,284 @@ static void hclge_handle_vf_tbl(struct hclge_vport *vport,
}
}
+static int
+hclge_mbx_map_ring_to_vector_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_map_unmap_ring_to_vf_vector(param->vport, true,
+ param->req);
+}
+
+static int
+hclge_mbx_unmap_ring_to_vector_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_map_unmap_ring_to_vf_vector(param->vport, false,
+ param->req);
+}
+
+static int
+hclge_mbx_get_ring_vector_map_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_get_vf_ring_vector_map(param->vport, param->req,
+ param->resp_msg);
+ if (ret)
+ dev_err(¶m->vport->back->pdev->dev,
+ "PF fail(%d) to get VF ring vector map\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_promisc_mode_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_set_vf_promisc_mode(param->vport, param->req);
+ return 0;
+}
+
+static int hclge_mbx_set_unicast_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_uc_mac_addr(param->vport, param->req);
+ if (ret)
+ dev_err(¶m->vport->back->pdev->dev,
+ "PF fail(%d) to set VF UC MAC Addr\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_multicast_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_mc_mac_addr(param->vport, param->req);
+ if (ret)
+ dev_err(¶m->vport->back->pdev->dev,
+ "PF fail(%d) to set VF MC MAC Addr\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_vlan_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_vlan_cfg(param->vport, param->req, param->resp_msg);
+ if (ret)
+ dev_err(¶m->vport->back->pdev->dev,
+ "PF failed(%d) to config VF's VLAN\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_set_alive_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_alive(param->vport, param->req);
+ if (ret)
+ dev_err(¶m->vport->back->pdev->dev,
+ "PF failed(%d) to set VF's ALIVE\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_get_qinfo_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_queue_info(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_get_qdepth_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_queue_depth(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_get_basic_info_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_basic_info(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_get_link_status_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_push_vf_link_status(param->vport);
+ if (ret)
+ dev_err(¶m->vport->back->pdev->dev,
+ "failed to inform link stat to VF, ret = %d\n",
+ ret);
+ return ret;
+}
+
+static int hclge_mbx_queue_reset_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_mbx_reset_vf_queue(param->vport, param->req,
+ param->resp_msg);
+}
+
+static int hclge_mbx_reset_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_reset_vf(param->vport);
+}
+
+static int hclge_mbx_keep_alive_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_vf_keep_alive(param->vport);
+ return 0;
+}
+
+static int hclge_mbx_set_mtu_handler(struct hclge_mbx_ops_param *param)
+{
+ int ret;
+
+ ret = hclge_set_vf_mtu(param->vport, param->req);
+ if (ret)
+ dev_err(¶m->vport->back->pdev->dev,
+ "VF fail(%d) to set mtu\n", ret);
+ return ret;
+}
+
+static int hclge_mbx_get_qid_in_pf_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_get_queue_id_in_pf(param->vport, param->req,
+ param->resp_msg);
+}
+
+static int hclge_mbx_get_rss_key_handler(struct hclge_mbx_ops_param *param)
+{
+ return hclge_get_rss_key(param->vport, param->req, param->resp_msg);
+}
+
+static int hclge_mbx_get_link_mode_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_link_mode(param->vport, param->req);
+ return 0;
+}
+
+static int
+hclge_mbx_get_vf_flr_status_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_rm_vport_all_mac_table(param->vport, false,
+ HCLGE_MAC_ADDR_UC);
+ hclge_rm_vport_all_mac_table(param->vport, false,
+ HCLGE_MAC_ADDR_MC);
+ hclge_rm_vport_all_vlan_table(param->vport, false);
+ return 0;
+}
+
+static int hclge_mbx_vf_uninit_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_rm_vport_all_mac_table(param->vport, true,
+ HCLGE_MAC_ADDR_UC);
+ hclge_rm_vport_all_mac_table(param->vport, true,
+ HCLGE_MAC_ADDR_MC);
+ hclge_rm_vport_all_vlan_table(param->vport, true);
+ return 0;
+}
+
+static int hclge_mbx_get_media_type_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_media_type(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_push_link_status_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_handle_link_change_event(param->vport->back, param->req);
+ return 0;
+}
+
+static int hclge_mbx_get_mac_addr_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_get_vf_mac_addr(param->vport, param->resp_msg);
+ return 0;
+}
+
+static int hclge_mbx_ncsi_error_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_handle_ncsi_error(param->vport->back);
+ return 0;
+}
+
+static int hclge_mbx_handle_vf_tbl_handler(struct hclge_mbx_ops_param *param)
+{
+ hclge_handle_vf_tbl(param->vport, param->req);
+ return 0;
+}
+
+static const hclge_mbx_ops_fn hclge_mbx_ops_list[HCLGE_MBX_OPCODE_MAX] = {
+ [HCLGE_MBX_RESET] = hclge_mbx_reset_handler,
+ [HCLGE_MBX_SET_UNICAST] = hclge_mbx_set_unicast_handler,
+ [HCLGE_MBX_SET_MULTICAST] = hclge_mbx_set_multicast_handler,
+ [HCLGE_MBX_SET_VLAN] = hclge_mbx_set_vlan_handler,
+ [HCLGE_MBX_MAP_RING_TO_VECTOR] = hclge_mbx_map_ring_to_vector_handler,
+ [HCLGE_MBX_UNMAP_RING_TO_VECTOR] = hclge_mbx_unmap_ring_to_vector_handler,
+ [HCLGE_MBX_SET_PROMISC_MODE] = hclge_mbx_set_promisc_mode_handler,
+ [HCLGE_MBX_GET_QINFO] = hclge_mbx_get_qinfo_handler,
+ [HCLGE_MBX_GET_QDEPTH] = hclge_mbx_get_qdepth_handler,
+ [HCLGE_MBX_GET_BASIC_INFO] = hclge_mbx_get_basic_info_handler,
+ [HCLGE_MBX_GET_RSS_KEY] = hclge_mbx_get_rss_key_handler,
+ [HCLGE_MBX_GET_MAC_ADDR] = hclge_mbx_get_mac_addr_handler,
+ [HCLGE_MBX_GET_LINK_STATUS] = hclge_mbx_get_link_status_handler,
+ [HCLGE_MBX_QUEUE_RESET] = hclge_mbx_queue_reset_handler,
+ [HCLGE_MBX_KEEP_ALIVE] = hclge_mbx_keep_alive_handler,
+ [HCLGE_MBX_SET_ALIVE] = hclge_mbx_set_alive_handler,
+ [HCLGE_MBX_SET_MTU] = hclge_mbx_set_mtu_handler,
+ [HCLGE_MBX_GET_QID_IN_PF] = hclge_mbx_get_qid_in_pf_handler,
+ [HCLGE_MBX_GET_LINK_MODE] = hclge_mbx_get_link_mode_handler,
+ [HCLGE_MBX_GET_MEDIA_TYPE] = hclge_mbx_get_media_type_handler,
+ [HCLGE_MBX_VF_UNINIT] = hclge_mbx_vf_uninit_handler,
+ [HCLGE_MBX_HANDLE_VF_TBL] = hclge_mbx_handle_vf_tbl_handler,
+ [HCLGE_MBX_GET_RING_VECTOR_MAP] = hclge_mbx_get_ring_vector_map_handler,
+ [HCLGE_MBX_GET_VF_FLR_STATUS] = hclge_mbx_get_vf_flr_status_handler,
+ [HCLGE_MBX_PUSH_LINK_STATUS] = hclge_mbx_push_link_status_handler,
+ [HCLGE_MBX_NCSI_ERROR] = hclge_mbx_ncsi_error_handler,
+};
+
+static void hclge_mbx_request_handling(struct hclge_mbx_ops_param *param)
+{
+ hclge_mbx_ops_fn cmd_func = NULL;
+ struct hclge_dev *hdev;
+ int ret = 0;
+
+ hdev = param->vport->back;
+ cmd_func = hclge_mbx_ops_list[param->req->msg.code];
+ if (cmd_func)
+ ret = cmd_func(param);
+ else
+ dev_err(&hdev->pdev->dev,
+ "un-supported mailbox message, code = %u\n",
+ param->req->msg.code);
+
+ /* PF driver should not reply IMP */
+ if (hnae3_get_bit(param->req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
+ param->req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) {
+ param->resp_msg->status = ret;
+ if (time_is_before_jiffies(hdev->last_mbx_scheduled +
+ HCLGE_MBX_SCHED_TIMEOUT))
+ dev_warn(&hdev->pdev->dev,
+ "resp vport%u mbx(%u,%u) late\n",
+ param->req->mbx_src_vfid,
+ param->req->msg.code,
+ param->req->msg.subcode);
+
+ hclge_gen_resp_to_vf(param->vport, param->req, param->resp_msg);
+ }
+}
+
void hclge_mbx_handler(struct hclge_dev *hdev)
{
struct hclge_comm_cmq_ring *crq = &hdev->hw.hw.cmq.crq;
struct hclge_respond_to_vf_msg resp_msg;
struct hclge_mbx_vf_to_pf_cmd *req;
- struct hclge_vport *vport;
+ struct hclge_mbx_ops_param param;
struct hclge_desc *desc;
- bool is_del = false;
unsigned int flag;
- int ret = 0;
+ param.resp_msg = &resp_msg;
/* handle all the mailbox requests in the queue */
while (!hclge_cmd_crq_empty(&hdev->hw)) {
if (test_bit(HCLGE_COMM_STATE_CMD_DISABLE,
@@ -814,152 +1081,16 @@ void hclge_mbx_handler(struct hclge_dev *hdev)
continue;
}
- vport = &hdev->vport[req->mbx_src_vfid];
-
trace_hclge_pf_mbx_get(hdev, req);
/* clear the resp_msg before processing every mailbox message */
memset(&resp_msg, 0, sizeof(resp_msg));
-
- switch (req->msg.code) {
- case HCLGE_MBX_MAP_RING_TO_VECTOR:
- ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
- req);
- break;
- case HCLGE_MBX_UNMAP_RING_TO_VECTOR:
- ret = hclge_map_unmap_ring_to_vf_vector(vport, false,
- req);
- break;
- case HCLGE_MBX_GET_RING_VECTOR_MAP:
- ret = hclge_get_vf_ring_vector_map(vport, req,
- &resp_msg);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to get VF ring vector map\n",
- ret);
- break;
- case HCLGE_MBX_SET_PROMISC_MODE:
- hclge_set_vf_promisc_mode(vport, req);
- break;
- case HCLGE_MBX_SET_UNICAST:
- ret = hclge_set_vf_uc_mac_addr(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to set VF UC MAC Addr\n",
- ret);
- break;
- case HCLGE_MBX_SET_MULTICAST:
- ret = hclge_set_vf_mc_mac_addr(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF fail(%d) to set VF MC MAC Addr\n",
- ret);
- break;
- case HCLGE_MBX_SET_VLAN:
- ret = hclge_set_vf_vlan_cfg(vport, req, &resp_msg);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to config VF's VLAN\n",
- ret);
- break;
- case HCLGE_MBX_SET_ALIVE:
- ret = hclge_set_vf_alive(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "PF failed(%d) to set VF's ALIVE\n",
- ret);
- break;
- case HCLGE_MBX_GET_QINFO:
- hclge_get_vf_queue_info(vport, &resp_msg);
- break;
- case HCLGE_MBX_GET_QDEPTH:
- hclge_get_vf_queue_depth(vport, &resp_msg);
- break;
- case HCLGE_MBX_GET_BASIC_INFO:
- hclge_get_basic_info(vport, &resp_msg);
- break;
- case HCLGE_MBX_GET_LINK_STATUS:
- ret = hclge_push_vf_link_status(vport);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "failed to inform link stat to VF, ret = %d\n",
- ret);
- break;
- case HCLGE_MBX_QUEUE_RESET:
- ret = hclge_mbx_reset_vf_queue(vport, req, &resp_msg);
- break;
- case HCLGE_MBX_RESET:
- ret = hclge_reset_vf(vport);
- break;
- case HCLGE_MBX_KEEP_ALIVE:
- hclge_vf_keep_alive(vport);
- break;
- case HCLGE_MBX_SET_MTU:
- ret = hclge_set_vf_mtu(vport, req);
- if (ret)
- dev_err(&hdev->pdev->dev,
- "VF fail(%d) to set mtu\n", ret);
- break;
- case HCLGE_MBX_GET_QID_IN_PF:
- ret = hclge_get_queue_id_in_pf(vport, req, &resp_msg);
- break;
- case HCLGE_MBX_GET_RSS_KEY:
- ret = hclge_get_rss_key(vport, req, &resp_msg);
- break;
- case HCLGE_MBX_GET_LINK_MODE:
- hclge_get_link_mode(vport, req);
- break;
- case HCLGE_MBX_GET_VF_FLR_STATUS:
- case HCLGE_MBX_VF_UNINIT:
- is_del = req->msg.code == HCLGE_MBX_VF_UNINIT;
- hclge_rm_vport_all_mac_table(vport, is_del,
- HCLGE_MAC_ADDR_UC);
- hclge_rm_vport_all_mac_table(vport, is_del,
- HCLGE_MAC_ADDR_MC);
- hclge_rm_vport_all_vlan_table(vport, is_del);
- break;
- case HCLGE_MBX_GET_MEDIA_TYPE:
- hclge_get_vf_media_type(vport, &resp_msg);
- break;
- case HCLGE_MBX_PUSH_LINK_STATUS:
- hclge_handle_link_change_event(hdev, req);
- break;
- case HCLGE_MBX_GET_MAC_ADDR:
- hclge_get_vf_mac_addr(vport, &resp_msg);
- break;
- case HCLGE_MBX_NCSI_ERROR:
- hclge_handle_ncsi_error(hdev);
- break;
- case HCLGE_MBX_HANDLE_VF_TBL:
- hclge_handle_vf_tbl(vport, req);
- break;
- default:
- dev_err(&hdev->pdev->dev,
- "un-supported mailbox message, code = %u\n",
- req->msg.code);
- break;
- }
-
- /* PF driver should not reply IMP */
- if (hnae3_get_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
- req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) {
- resp_msg.status = ret;
- if (time_is_before_jiffies(hdev->last_mbx_scheduled +
- HCLGE_MBX_SCHED_TIMEOUT))
- dev_warn(&hdev->pdev->dev,
- "resp vport%u mbx(%u,%u) late\n",
- req->mbx_src_vfid,
- req->msg.code,
- req->msg.subcode);
-
- hclge_gen_resp_to_vf(vport, req, &resp_msg);
- }
+ param.vport = &hdev->vport[req->mbx_src_vfid];
+ param.req = req;
+ hclge_mbx_request_handling(¶m);
crq->desc[crq->next_to_use].flag = 0;
hclge_mbx_ring_ptr_move_crq(crq);
-
- /* reinitialize ret after complete the mbx message processing */
- ret = 0;
}
/* Write back CMDQ_RQ header pointer, M7 need this pointer */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
index 03d63b6a9b2b..85fb11de43a1 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -187,7 +187,7 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
speed = netdev->phydev->speed;
duplex = netdev->phydev->duplex;
- ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
+ ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex, 0);
if (ret)
netdev_err(netdev, "failed to adjust link.\n");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 2f33b036a47a..4a33f65190e2 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -248,7 +248,7 @@ static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
return 0;
}
-static int hclge_up_to_tc_map(struct hclge_dev *hdev)
+int hclge_up_to_tc_map(struct hclge_dev *hdev)
{
struct hclge_desc desc;
u8 *pri = (u8 *)desc.data;
@@ -266,6 +266,47 @@ static int hclge_up_to_tc_map(struct hclge_dev *hdev)
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
+static void hclge_dscp_to_prio_map_init(struct hclge_dev *hdev)
+{
+ u8 i;
+
+ hdev->vport[0].nic.kinfo.tc_map_mode = HNAE3_TC_MAP_MODE_PRIO;
+ hdev->vport[0].nic.kinfo.dscp_app_cnt = 0;
+ for (i = 0; i < HNAE3_MAX_DSCP; i++)
+ hdev->vport[0].nic.kinfo.dscp_prio[i] = HNAE3_PRIO_ID_INVALID;
+}
+
+int hclge_dscp_to_tc_map(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc[HCLGE_DSCP_MAP_TC_BD_NUM];
+ u8 *req0 = (u8 *)desc[0].data;
+ u8 *req1 = (u8 *)desc[1].data;
+ u8 pri_id, tc_id, i, j;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QOS_MAP, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_QOS_MAP, false);
+
+ /* The low 32 dscp setting use bd0, high 32 dscp setting use bd1 */
+ for (i = 0; i < HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM; i++) {
+ pri_id = hdev->vport[0].nic.kinfo.dscp_prio[i];
+ pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id;
+ tc_id = hdev->tm_info.prio_tc[pri_id];
+ /* Each dscp setting has 4 bits, so each byte saves two dscp
+ * setting
+ */
+ req0[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i);
+
+ j = i + HNAE3_MAX_DSCP / HCLGE_DSCP_MAP_TC_BD_NUM;
+ pri_id = hdev->vport[0].nic.kinfo.dscp_prio[j];
+ pri_id = pri_id == HNAE3_PRIO_ID_INVALID ? 0 : pri_id;
+ tc_id = hdev->tm_info.prio_tc[pri_id];
+ req1[i >> 1] |= tc_id << HCLGE_DSCP_TC_SHIFT(i);
+ }
+
+ return hclge_cmd_send(&hdev->hw, desc, HCLGE_DSCP_MAP_TC_BD_NUM);
+}
+
static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
u8 pg_id, u8 pri_bit_map)
{
@@ -1275,6 +1316,12 @@ static int hclge_tm_map_cfg(struct hclge_dev *hdev)
if (ret)
return ret;
+ if (hdev->vport[0].nic.kinfo.tc_map_mode == HNAE3_TC_MAP_MODE_DSCP) {
+ ret = hclge_dscp_to_tc_map(hdev);
+ if (ret)
+ return ret;
+ }
+
ret = hclge_tm_pg_to_pri_map(hdev);
if (ret)
return ret;
@@ -1646,6 +1693,7 @@ int hclge_tm_schd_init(struct hclge_dev *hdev)
return -EINVAL;
hclge_tm_schd_info_init(hdev);
+ hclge_dscp_to_prio_map_init(hdev);
return hclge_tm_init_hw(hdev, true);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index d943943912f7..68f28a98e380 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -30,6 +30,9 @@ enum hclge_opcode_type;
#define HCLGE_TM_PF_MAX_PRI_NUM 8
#define HCLGE_TM_PF_MAX_QSET_NUM 8
+#define HCLGE_DSCP_MAP_TC_BD_NUM 2
+#define HCLGE_DSCP_TC_SHIFT(n) (((n) & 1) * 4)
+
struct hclge_pg_to_pri_link_cmd {
u8 pg_id;
u8 rsvd1[3];
@@ -262,4 +265,6 @@ int hclge_tm_get_pg_shaper(struct hclge_dev *hdev, u8 pg_id,
struct hclge_tm_shaper_para *para);
int hclge_tm_get_port_shaper(struct hclge_dev *hdev,
struct hclge_tm_shaper_para *para);
+int hclge_up_to_tc_map(struct hclge_dev *hdev);
+int hclge_dscp_to_tc_map(struct hclge_dev *hdev);
#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 26f87330173e..db6f7cdba958 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -2125,7 +2125,7 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev)
struct hclge_desc desc;
int ret;
- if (!hnae3_dev_gro_supported(hdev))
+ if (!hnae3_ae_dev_gro_supported(hdev->ae_dev))
return 0;
hclgevf_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG,
@@ -3177,7 +3177,7 @@ static int hclgevf_get_status(struct hnae3_handle *handle)
static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
u8 *auto_neg, u32 *speed,
- u8 *duplex)
+ u8 *duplex, u32 *lane_num)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@ -3429,7 +3429,7 @@ static struct hnae3_ae_algo ae_algovf = {
.pdev_id_table = ae_algovf_pci_tbl,
};
-static int hclgevf_init(void)
+static int __init hclgevf_init(void)
{
pr_info("%s is initializing\n", HCLGEVF_NAME);
@@ -3444,7 +3444,7 @@ static int hclgevf_init(void)
return 0;
}
-static void hclgevf_exit(void)
+static void __exit hclgevf_exit(void)
{
hnae3_unregister_ae_algo(&ae_algovf);
destroy_workqueue(hclgevf_wq);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
index e9e00cfa1329..e10f739d8339 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.h
@@ -12,7 +12,6 @@
#define TBL_ID_FUNC_CFG_SM_INST 1
#define HINIC_FUNCTION_CONFIGURE_TABLE_SIZE 64
-#define HINIC_FUNCTION_CONFIGURE_TABLE 1
struct hinic_cmd_lt_rd {
u8 status;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index 93192f58ac88..f4b680286911 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -55,7 +55,6 @@
#define COALESCE_ALL_QUEUE 0xFFFF
#define COALESCE_MAX_PENDING_LIMIT (255 * COALESCE_PENDING_LIMIT_UNIT)
#define COALESCE_MAX_TIMER_CFG (255 * COALESCE_TIMER_CFG_UNIT)
-#define OBJ_STR_MAX_LEN 32
struct hw2ethtool_link_mode {
enum ethtool_link_mode_bit_indices link_mode_bit;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
index a627237f694b..78190e88cd75 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c
@@ -82,11 +82,6 @@
struct hinic_func_to_io, \
cmdqs)
-enum cmdq_wqe_type {
- WQE_LCMD_TYPE = 0,
- WQE_SCMD_TYPE = 1,
-};
-
enum completion_format {
COMPLETE_DIRECT = 0,
COMPLETE_SGE = 1,
@@ -509,8 +504,8 @@ int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
*
* Return 0 - Success, negative - Failure
**/
-int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
- enum hinic_set_arm_qtype q_type, u32 q_id)
+static int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
+ enum hinic_set_arm_qtype q_type, u32 q_id)
{
struct hinic_cmdq *cmdq = &cmdqs->cmdq[HINIC_CMDQ_SYNC];
struct hinic_hwif *hwif = cmdqs->hwif;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
index 9c413e963a04..ff09cf0ed52b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.h
@@ -177,9 +177,6 @@ int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
enum hinic_mod_type mod, u8 cmd,
struct hinic_cmdq_buf *buf_in, u64 *out_param);
-int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
- enum hinic_set_arm_qtype q_type, u32 q_id);
-
int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
void __iomem **db_area);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
index 7e84e4e33fff..d56e7413ace0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_csr.h
@@ -22,7 +22,6 @@
(HINIC_DMA_ATTR_BASE + (idx) * HINIC_DMA_ATTR_STRIDE)
#define HINIC_PPF_ELECTION_STRIDE 0x4
-#define HINIC_CSR_MAX_PORTS 4
#define HINIC_CSR_PPF_ELECTION_ADDR(idx) \
(HINIC_ELECTION_BASE + (idx) * HINIC_PPF_ELECTION_STRIDE)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 2127a48749a8..94f470556295 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -29,7 +29,6 @@
#include "hinic_hw_io.h"
#include "hinic_hw_dev.h"
-#define IO_STATUS_TIMEOUT 100
#define OUTBOUND_STATE_TIMEOUT 100
#define DB_STATE_TIMEOUT 100
@@ -42,11 +41,6 @@ enum intr_type {
INTR_MSIX_TYPE,
};
-enum io_status {
- IO_STOPPED = 0,
- IO_RUNNING = 1,
-};
-
/**
* parse_capability - convert device capabilities to NIC capabilities
* @hwdev: the HW device to set and convert device capabilities for
@@ -837,8 +831,8 @@ static int hinic_l2nic_reset(struct hinic_hwdev *hwdev)
return 0;
}
-int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev,
- struct hinic_msix_config *interrupt_info)
+static int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev,
+ struct hinic_msix_config *interrupt_info)
{
u16 out_size = sizeof(*interrupt_info);
struct hinic_pfhwdev *pfhwdev;
@@ -1041,13 +1035,6 @@ void hinic_free_hwdev(struct hinic_hwdev *hwdev)
hinic_free_hwif(hwdev->hwif);
}
-int hinic_hwdev_max_num_qps(struct hinic_hwdev *hwdev)
-{
- struct hinic_cap *nic_cap = &hwdev->nic_cap;
-
- return nic_cap->max_qps;
-}
-
/**
* hinic_hwdev_num_qps - return the number QPs available for use
* @hwdev: the NIC HW device
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index 416492e48274..d2d89b0a5ef0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -566,8 +566,6 @@ struct hinic_hwdev *hinic_init_hwdev(struct pci_dev *pdev, struct devlink *devli
void hinic_free_hwdev(struct hinic_hwdev *hwdev);
-int hinic_hwdev_max_num_qps(struct hinic_hwdev *hwdev);
-
int hinic_hwdev_num_qps(struct hinic_hwdev *hwdev);
struct hinic_sq *hinic_hwdev_get_sq(struct hinic_hwdev *hwdev, int i);
@@ -587,9 +585,6 @@ int hinic_hwdev_hw_ci_addr_set(struct hinic_hwdev *hwdev, struct hinic_sq *sq,
void hinic_hwdev_set_msix_state(struct hinic_hwdev *hwdev, u16 msix_index,
enum hinic_msix_state flag);
-int hinic_get_interrupt_cfg(struct hinic_hwdev *hwdev,
- struct hinic_msix_config *interrupt_info);
-
int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev,
struct hinic_msix_config *interrupt_info);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
index 0428faa68e80..88567305d06e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.c
@@ -57,39 +57,6 @@ int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
return 0;
}
-/**
- * hinic_msix_attr_get - get message attribute of msix entry
- * @hwif: the HW interface of a pci function device
- * @msix_index: msix_index
- * @pending_limit: the maximum pending interrupt events (unit 8)
- * @coalesc_timer: coalesc period for interrupt (unit 8 us)
- * @lli_timer: replenishing period for low latency credit (unit 8 us)
- * @lli_credit_limit: maximum credits for low latency msix messages (unit 8)
- * @resend_timer: maximum wait for resending msix (unit coalesc period)
- *
- * Return 0 - Success, negative - Failure
- **/
-int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index,
- u8 *pending_limit, u8 *coalesc_timer,
- u8 *lli_timer, u8 *lli_credit_limit,
- u8 *resend_timer)
-{
- u32 addr, val;
-
- if (!VALID_MSIX_IDX(&hwif->attr, msix_index))
- return -EINVAL;
-
- addr = HINIC_CSR_MSIX_CTRL_ADDR(msix_index);
- val = hinic_hwif_read_reg(hwif, addr);
-
- *pending_limit = HINIC_MSIX_ATTR_GET(val, PENDING_LIMIT);
- *coalesc_timer = HINIC_MSIX_ATTR_GET(val, COALESC_TIMER);
- *lli_timer = HINIC_MSIX_ATTR_GET(val, LLI_TIMER);
- *lli_credit_limit = HINIC_MSIX_ATTR_GET(val, LLI_CREDIT);
- *resend_timer = HINIC_MSIX_ATTR_GET(val, RESEND_TIMER);
- return 0;
-}
-
/**
* hinic_msix_attr_cnt_clear - clear message attribute counters for msix entry
* @hwif: the HW interface of a pci function device
@@ -115,8 +82,6 @@ int hinic_msix_attr_cnt_clear(struct hinic_hwif *hwif, u16 msix_index)
* hinic_set_pf_action - set action on pf channel
* @hwif: the HW interface of a pci function device
* @action: action on pf channel
- *
- * Return 0 - Success, negative - Failure
**/
void hinic_set_pf_action(struct hinic_hwif *hwif, enum hinic_pf_action action)
{
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
index c06f2253151e..3d588896a367 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
@@ -131,10 +131,6 @@
(((u32)(val) & HINIC_MSIX_##member##_MASK) << \
HINIC_MSIX_##member##_SHIFT)
-#define HINIC_MSIX_ATTR_GET(val, member) \
- (((val) >> HINIC_MSIX_##member##_SHIFT) & \
- HINIC_MSIX_##member##_MASK)
-
#define HINIC_MSIX_CNT_RESEND_TIMER_SHIFT 29
#define HINIC_MSIX_CNT_RESEND_TIMER_MASK 0x1
@@ -269,11 +265,6 @@ int hinic_msix_attr_set(struct hinic_hwif *hwif, u16 msix_index,
u8 lli_timer_cfg, u8 lli_credit_limit,
u8 resend_timer);
-int hinic_msix_attr_get(struct hinic_hwif *hwif, u16 msix_index,
- u8 *pending_limit, u8 *coalesc_timer_cfg,
- u8 *lli_timer, u8 *lli_credit_limit,
- u8 *resend_timer);
-
void hinic_set_msix_state(struct hinic_hwif *hwif, u16 msix_idx,
enum hinic_msix_state flag);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
index 5078c0c73863..3f9c31d29215 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.c
@@ -117,7 +117,6 @@ enum hinic_mbox_tx_status {
#define MBOX_WB_STATUS_MASK 0xFF
#define MBOX_WB_ERROR_CODE_MASK 0xFF00
#define MBOX_WB_STATUS_FINISHED_SUCCESS 0xFF
-#define MBOX_WB_STATUS_FINISHED_WITH_ERR 0xFE
#define MBOX_WB_STATUS_NOT_FINISHED 0x00
#define MBOX_STATUS_FINISHED(wb) \
@@ -130,11 +129,8 @@ enum hinic_mbox_tx_status {
#define SEQ_ID_START_VAL 0
#define SEQ_ID_MAX_VAL 42
-#define DST_AEQ_IDX_DEFAULT_VAL 0
-#define SRC_AEQ_IDX_DEFAULT_VAL 0
#define NO_DMA_ATTRIBUTE_VAL 0
-#define HINIC_MGMT_RSP_AEQN 0
#define HINIC_MBOX_RSP_AEQN 2
#define HINIC_MBOX_RECV_AEQN 0
@@ -146,7 +142,6 @@ enum hinic_mbox_tx_status {
#define IS_PF_OR_PPF_SRC(src_func_idx) ((src_func_idx) < HINIC_MAX_PF_FUNCS)
-#define MBOX_RESPONSE_ERROR 0x1
#define MBOX_MSG_ID_MASK 0xFF
#define MBOX_MSG_ID(func_to_func) ((func_to_func)->send_msg_id)
#define MBOX_MSG_ID_INC(func_to_func_mbox) (MBOX_MSG_ID(func_to_func_mbox) = \
@@ -621,7 +616,7 @@ static bool check_vf_mbox_random_id(struct hinic_mbox_func_to_func *func_to_func
return false;
}
-void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
+static void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
{
struct hinic_mbox_func_to_func *func_to_func;
u64 mbox_header = *((u64 *)header);
@@ -649,7 +644,7 @@ void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size)
recv_mbox_handler(func_to_func, (u64 *)header, recv_mbox);
}
-void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size)
+static void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size)
{
struct hinic_mbox_func_to_func *func_to_func;
struct hinic_send_mbox *send_mbox;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
index 46953190d29e..33ac7814d3b3 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mbox.h
@@ -150,10 +150,6 @@ void hinic_unregister_pf_mbox_cb(struct hinic_hwdev *hwdev,
void hinic_unregister_vf_mbox_cb(struct hinic_hwdev *hwdev,
enum hinic_mod_type mod);
-void hinic_mbox_func_aeqe_handler(void *handle, void *header, u8 size);
-
-void hinic_mbox_self_aeqe_handler(void *handle, void *header, u8 size);
-
int hinic_func_to_func_init(struct hinic_hwdev *hwdev);
void hinic_func_to_func_free(struct hinic_hwdev *hwdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
index 336248aa2e48..537a8098bc4e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c
@@ -472,8 +472,7 @@ int hinic_get_rq_free_wqebbs(struct hinic_rq *rq)
return atomic_read(&wq->delta) - 1;
}
-static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, u16 prod_idx,
- int nr_descs)
+static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, int nr_descs)
{
u32 ctrl_size, task_size, bufdesc_size;
@@ -588,18 +587,16 @@ void hinic_set_tso_inner_l4(struct hinic_sq_task *task, u32 *queue_info,
/**
* hinic_sq_prepare_wqe - prepare wqe before insert to the queue
* @sq: send queue
- * @prod_idx: pi value
* @sq_wqe: wqe to prepare
* @sges: sges for use by the wqe for send for buf addresses
* @nr_sges: number of sges
**/
-void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx,
- struct hinic_sq_wqe *sq_wqe, struct hinic_sge *sges,
- int nr_sges)
+void hinic_sq_prepare_wqe(struct hinic_sq *sq, struct hinic_sq_wqe *sq_wqe,
+ struct hinic_sge *sges, int nr_sges)
{
int i;
- sq_prepare_ctrl(&sq_wqe->ctrl, prod_idx, nr_sges);
+ sq_prepare_ctrl(&sq_wqe->ctrl, nr_sges);
sq_prepare_task(&sq_wqe->task);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
index 0dfa51ad5855..178dcc874370 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
@@ -175,9 +175,8 @@ void hinic_set_tso_inner_l4(struct hinic_sq_task *task,
u32 l4_len,
u32 offset, u32 ip_ident, u32 mss);
-void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx,
- struct hinic_sq_wqe *wqe, struct hinic_sge *sges,
- int nr_sges);
+void hinic_sq_prepare_wqe(struct hinic_sq *sq, struct hinic_sq_wqe *wqe,
+ struct hinic_sge *sges, int nr_sges);
void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size,
unsigned int cos);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
index 4daf6bf291ec..e1a1735c00c1 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c
@@ -175,8 +175,6 @@ static int cmdq_allocate_page(struct hinic_cmdq_pages *cmdq_pages)
/**
* cmdq_free_page - free page from cmdq
* @cmdq_pages: the pages of the cmdq queue struct that hold the page
- *
- * Return 0 - Success, negative - Failure
**/
static void cmdq_free_page(struct hinic_cmdq_pages *cmdq_pages)
{
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
index f4b6d2c1061f..c6bdeed5606e 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h
@@ -261,23 +261,6 @@
#define HINIC_RSS_TYPE_GET(val, member) \
(((u32)(val) >> HINIC_RSS_TYPE_##member##_SHIFT) & 0x1)
-enum hinic_l4offload_type {
- HINIC_L4_OFF_DISABLE = 0,
- HINIC_TCP_OFFLOAD_ENABLE = 1,
- HINIC_SCTP_OFFLOAD_ENABLE = 2,
- HINIC_UDP_OFFLOAD_ENABLE = 3,
-};
-
-enum hinic_vlan_offload {
- HINIC_VLAN_OFF_DISABLE = 0,
- HINIC_VLAN_OFF_ENABLE = 1,
-};
-
-enum hinic_pkt_parsed {
- HINIC_PKT_NOT_PARSED = 0,
- HINIC_PKT_PARSED = 1,
-};
-
enum hinic_l3_offload_type {
L3TYPE_UNKNOWN = 0,
IPV6_PKT = 1,
@@ -305,18 +288,10 @@ enum hinic_outer_l3type {
HINIC_OUTER_L3TYPE_IPV4_CHKSUM = 3,
};
-enum hinic_media_type {
- HINIC_MEDIA_UNKNOWN = 0,
-};
-
enum hinic_l2type {
HINIC_L2TYPE_ETH = 0,
};
-enum hinc_tunnel_l4type {
- HINIC_TUNNEL_L4TYPE_UNKNOWN = 0,
-};
-
struct hinic_cmdq_header {
u32 header_info;
u32 saved_data;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index c23ee2ddbce3..e1f54a2f28b2 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -960,8 +960,6 @@ static void hinic_refresh_nic_cfg(struct hinic_dev *nic_dev)
* @in_size: input size
* @buf_out: output buffer
* @out_size: returned output size
- *
- * Return 0 - Success, negative - Failure
**/
static void link_status_event_handler(void *handle, void *buf_in, u16 in_size,
void *buf_out, u16 *out_size)
@@ -1382,8 +1380,6 @@ err_pci_regions:
return err;
}
-#define HINIC_WAIT_SRIOV_CFG_TIMEOUT 15000
-
static void wait_sriov_cfg_complete(struct hinic_dev *nic_dev)
{
struct hinic_sriov_info *sriov_info = &nic_dev->sriov_info;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index e5828a658caf..d649c6e323c8 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -50,7 +50,7 @@
* hinic_rxq_clean_stats - Clean the statistics of specific queue
* @rxq: Logical Rx Queue
**/
-void hinic_rxq_clean_stats(struct hinic_rxq *rxq)
+static void hinic_rxq_clean_stats(struct hinic_rxq *rxq)
{
struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.h b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
index 507dcbae9085..8f7bd6a049bd 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.h
@@ -41,8 +41,6 @@ struct hinic_rxq {
struct napi_struct napi;
};
-void hinic_rxq_clean_stats(struct hinic_rxq *rxq);
-
void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats);
int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
index df555847afb5..a5f08b969e3f 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c
@@ -24,6 +24,7 @@ MODULE_PARM_DESC(set_vf_link_state, "Set vf link state, 0 represents link auto,
#define HINIC_VLAN_PRIORITY_SHIFT 13
#define HINIC_ADD_VLAN_IN_MAC 0x8000
#define HINIC_TX_RATE_TABLE_FULL 12
+#define HINIC_MAX_QOS 7
static int hinic_set_mac(struct hinic_hwdev *hwdev, const u8 *mac_addr,
u16 vlan_id, u16 func_id)
@@ -774,7 +775,7 @@ int hinic_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
u16 vlanprio, cur_vlanprio;
sriov_info = &nic_dev->sriov_info;
- if (vf >= sriov_info->num_vfs || vlan > 4095 || qos > 7)
+ if (vf >= sriov_info->num_vfs || vlan >= VLAN_N_VID || qos > HINIC_MAX_QOS)
return -EINVAL;
if (vlan_proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
@@ -820,7 +821,7 @@ int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
cur_trust = nic_io->vf_infos[vf].trust;
/* same request, so just return success */
- if ((setting && cur_trust) || (!setting && !cur_trust))
+ if (setting == cur_trust)
return 0;
err = hinic_set_vf_trust(adapter->hwdev, vf, setting);
@@ -940,7 +941,7 @@ int hinic_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting)
cur_spoofchk = nic_dev->hwdev->func_to_io.vf_infos[vf].spoofchk;
/* same request, so just return success */
- if ((setting && cur_spoofchk) || (!setting && !cur_spoofchk))
+ if (setting == cur_spoofchk)
return 0;
err = hinic_set_vf_spoofchk(sriov_info->hwdev,
@@ -1131,8 +1132,8 @@ static void hinic_clear_vf_infos(struct hinic_dev *nic_dev, u16 vf_id)
hinic_init_vf_infos(&nic_dev->hwdev->func_to_io, HW_VF_ID_TO_OS(vf_id));
}
-static int hinic_deinit_vf_hw(struct hinic_sriov_info *sriov_info,
- u16 start_vf_id, u16 end_vf_id)
+static void hinic_deinit_vf_hw(struct hinic_sriov_info *sriov_info,
+ u16 start_vf_id, u16 end_vf_id)
{
struct hinic_dev *nic_dev;
u16 func_idx, idx;
@@ -1145,8 +1146,6 @@ static int hinic_deinit_vf_hw(struct hinic_sriov_info *sriov_info,
HINIC_HW_WQ_PAGE_SIZE);
hinic_clear_vf_infos(nic_dev, idx);
}
-
- return 0;
}
int hinic_vf_func_init(struct hinic_hwdev *hwdev)
@@ -1293,7 +1292,7 @@ int hinic_pci_sriov_disable(struct pci_dev *pdev)
return 0;
}
-int hinic_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
+static int hinic_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
{
struct hinic_sriov_info *sriov_info;
int err;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.h b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
index ba627a362f9a..d4d4e63d31ea 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.h
@@ -98,8 +98,6 @@ void hinic_notify_all_vfs_link_changed(struct hinic_hwdev *hwdev,
int hinic_pci_sriov_disable(struct pci_dev *dev);
-int hinic_pci_sriov_enable(struct pci_dev *dev, int num_vfs);
-
int hinic_vf_func_init(struct hinic_hwdev *hwdev);
void hinic_vf_func_free(struct hinic_hwdev *hwdev);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 3b6c7b585737..e91476c8ff8b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -74,7 +74,7 @@ enum hinic_offload_type {
* hinic_txq_clean_stats - Clean the statistics of specific queue
* @txq: Logical Tx Queue
**/
-void hinic_txq_clean_stats(struct hinic_txq *txq)
+static void hinic_txq_clean_stats(struct hinic_txq *txq)
{
struct hinic_txq_stats *txq_stats = &txq->txq_stats;
@@ -530,7 +530,7 @@ netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
process_sq_wqe:
- hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
+ hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges);
hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size);
flush_skbs:
@@ -614,7 +614,7 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
}
process_sq_wqe:
- hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges);
+ hinic_sq_prepare_wqe(txq->sq, sq_wqe, txq->sges, nr_sges);
err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info);
if (err)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.h b/drivers/net/ethernet/huawei/hinic/hinic_tx.h
index b3c8657774a7..91dc778362f3 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.h
@@ -40,8 +40,6 @@ struct hinic_txq {
struct napi_struct napi;
};
-void hinic_txq_clean_stats(struct hinic_txq *txq);
-
void hinic_txq_get_stats(struct hinic_txq *txq, struct hinic_txq_stats *stats);
netdev_tx_t hinic_lb_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
index 6cb86032ce46..1db5b6790a41 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
@@ -159,8 +159,8 @@ static int ehea_nway_reset(struct net_device *dev)
static void ehea_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
}
static u32 ehea_get_msglevel(struct net_device *dev)
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 5dc302880f5f..294bdbbeacc3 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1546,7 +1546,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
kfree(init_attr);
- netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
+ netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll);
ret = 0;
goto out;
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index fbea9f7efe8c..9b08e41ccc29 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2284,8 +2284,8 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev,
{
struct emac_instance *dev = netdev_priv(ndev);
- strlcpy(info->driver, "ibm_emac", sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->driver, "ibm_emac", sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info), "PPC 4xx EMAC-%d %pOF",
dev->cell_index, dev->ofdev->dev.of_node);
}
@@ -2979,11 +2979,9 @@ static int emac_init_config(struct emac_instance *dev)
/* Read MAC-address */
err = of_get_ethdev_address(np, dev->ndev);
- if (err) {
- if (err != -EPROBE_DEFER)
- dev_err(&dev->ofdev->dev, "Can't get valid [local-]mac-address from OF !\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&dev->ofdev->dev, err,
+ "Can't get valid [local-]mac-address from OF !\n");
/* IAHT and GAHT filter parameterization */
if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 5c6a04d29f5b..3b14dc93f59d 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -141,6 +141,13 @@ static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
}
+static unsigned int ibmveth_real_max_tx_queues(void)
+{
+ unsigned int n_cpu = num_online_cpus();
+
+ return min(n_cpu, IBMVETH_MAX_QUEUES);
+}
+
/* setup the initial settings for a buffer pool */
static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
u32 pool_index, u32 pool_size,
@@ -456,6 +463,38 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
}
}
+static void ibmveth_free_tx_ltb(struct ibmveth_adapter *adapter, int idx)
+{
+ dma_unmap_single(&adapter->vdev->dev, adapter->tx_ltb_dma[idx],
+ adapter->tx_ltb_size, DMA_TO_DEVICE);
+ kfree(adapter->tx_ltb_ptr[idx]);
+ adapter->tx_ltb_ptr[idx] = NULL;
+}
+
+static int ibmveth_allocate_tx_ltb(struct ibmveth_adapter *adapter, int idx)
+{
+ adapter->tx_ltb_ptr[idx] = kzalloc(adapter->tx_ltb_size,
+ GFP_KERNEL);
+ if (!adapter->tx_ltb_ptr[idx]) {
+ netdev_err(adapter->netdev,
+ "unable to allocate tx long term buffer\n");
+ return -ENOMEM;
+ }
+ adapter->tx_ltb_dma[idx] = dma_map_single(&adapter->vdev->dev,
+ adapter->tx_ltb_ptr[idx],
+ adapter->tx_ltb_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&adapter->vdev->dev, adapter->tx_ltb_dma[idx])) {
+ netdev_err(adapter->netdev,
+ "unable to DMA map tx long term buffer\n");
+ kfree(adapter->tx_ltb_ptr[idx]);
+ adapter->tx_ltb_ptr[idx] = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
union ibmveth_buf_desc rxq_desc, u64 mac_address)
{
@@ -538,6 +577,11 @@ static int ibmveth_open(struct net_device *netdev)
goto out_unmap_buffer_list;
}
+ for (i = 0; i < netdev->real_num_tx_queues; i++) {
+ if (ibmveth_allocate_tx_ltb(adapter, i))
+ goto out_free_tx_ltb;
+ }
+
adapter->rx_queue.index = 0;
adapter->rx_queue.num_slots = rxq_entries;
adapter->rx_queue.toggle = 1;
@@ -595,25 +639,15 @@ static int ibmveth_open(struct net_device *netdev)
rc = -ENOMEM;
- adapter->bounce_buffer = dma_alloc_coherent(&adapter->vdev->dev,
- netdev->mtu + IBMVETH_BUFF_OH,
- &adapter->bounce_buffer_dma, GFP_KERNEL);
- if (!adapter->bounce_buffer) {
- netdev_err(netdev, "unable to alloc bounce buffer\n");
- goto out_free_irq;
- }
-
netdev_dbg(netdev, "initial replenish cycle\n");
ibmveth_interrupt(netdev->irq, netdev);
- netif_start_queue(netdev);
+ netif_tx_start_all_queues(netdev);
netdev_dbg(netdev, "open complete\n");
return 0;
-out_free_irq:
- free_irq(netdev->irq, netdev);
out_free_buffer_pools:
while (--i >= 0) {
if (adapter->rx_buff_pool[i].active)
@@ -623,6 +657,12 @@ out_free_buffer_pools:
out_unmap_filter_list:
dma_unmap_single(dev, adapter->filter_list_dma, 4096,
DMA_BIDIRECTIONAL);
+
+out_free_tx_ltb:
+ while (--i >= 0) {
+ ibmveth_free_tx_ltb(adapter, i);
+ }
+
out_unmap_buffer_list:
dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
DMA_BIDIRECTIONAL);
@@ -651,7 +691,7 @@ static int ibmveth_close(struct net_device *netdev)
napi_disable(&adapter->napi);
if (!adapter->pool_config)
- netif_stop_queue(netdev);
+ netif_tx_stop_all_queues(netdev);
h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
@@ -685,9 +725,8 @@ static int ibmveth_close(struct net_device *netdev)
ibmveth_free_buffer_pool(adapter,
&adapter->rx_buff_pool[i]);
- dma_free_coherent(&adapter->vdev->dev,
- adapter->netdev->mtu + IBMVETH_BUFF_OH,
- adapter->bounce_buffer, adapter->bounce_buffer_dma);
+ for (i = 0; i < netdev->real_num_tx_queues; i++)
+ ibmveth_free_tx_ltb(adapter, i);
netdev_dbg(netdev, "close complete\n");
@@ -727,8 +766,8 @@ static void ibmveth_init_link_settings(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
- strlcpy(info->version, ibmveth_driver_version, sizeof(info->version));
+ strscpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
+ strscpy(info->version, ibmveth_driver_version, sizeof(info->version));
}
static netdev_features_t ibmveth_fix_features(struct net_device *dev,
@@ -953,6 +992,69 @@ static void ibmveth_get_ethtool_stats(struct net_device *dev,
data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
}
+static void ibmveth_get_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ channels->max_tx = ibmveth_real_max_tx_queues();
+ channels->tx_count = netdev->real_num_tx_queues;
+
+ channels->max_rx = netdev->real_num_rx_queues;
+ channels->rx_count = netdev->real_num_rx_queues;
+}
+
+static int ibmveth_set_channels(struct net_device *netdev,
+ struct ethtool_channels *channels)
+{
+ struct ibmveth_adapter *adapter = netdev_priv(netdev);
+ unsigned int old = netdev->real_num_tx_queues,
+ goal = channels->tx_count;
+ int rc, i;
+
+ /* If ndo_open has not been called yet then don't allocate, just set
+ * desired netdev_queue's and return
+ */
+ if (!(netdev->flags & IFF_UP))
+ return netif_set_real_num_tx_queues(netdev, goal);
+
+ /* We have IBMVETH_MAX_QUEUES netdev_queue's allocated
+ * but we may need to alloc/free the ltb's.
+ */
+ netif_tx_stop_all_queues(netdev);
+
+ /* Allocate any queue that we need */
+ for (i = old; i < goal; i++) {
+ if (adapter->tx_ltb_ptr[i])
+ continue;
+
+ rc = ibmveth_allocate_tx_ltb(adapter, i);
+ if (!rc)
+ continue;
+
+ /* if something goes wrong, free everything we just allocated */
+ netdev_err(netdev, "Failed to allocate more tx queues, returning to %d queues\n",
+ old);
+ goal = old;
+ old = i;
+ break;
+ }
+ rc = netif_set_real_num_tx_queues(netdev, goal);
+ if (rc) {
+ netdev_err(netdev, "Failed to set real tx queues, returning to %d queues\n",
+ old);
+ goal = old;
+ old = i;
+ }
+ /* Free any that are no longer needed */
+ for (i = old; i > goal; i--) {
+ if (adapter->tx_ltb_ptr[i - 1])
+ ibmveth_free_tx_ltb(adapter, i - 1);
+ }
+
+ netif_tx_wake_all_queues(netdev);
+
+ return rc;
+}
+
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -961,6 +1063,8 @@ static const struct ethtool_ops netdev_ethtool_ops = {
.get_ethtool_stats = ibmveth_get_ethtool_stats,
.get_link_ksettings = ibmveth_get_link_ksettings,
.set_link_ksettings = ibmveth_set_link_ksettings,
+ .get_channels = ibmveth_get_channels,
+ .set_channels = ibmveth_set_channels
};
static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -969,7 +1073,7 @@ static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
static int ibmveth_send(struct ibmveth_adapter *adapter,
- union ibmveth_buf_desc *descs, unsigned long mss)
+ unsigned long desc, unsigned long mss)
{
unsigned long correlator;
unsigned int retry_count;
@@ -982,12 +1086,9 @@ static int ibmveth_send(struct ibmveth_adapter *adapter,
retry_count = 1024;
correlator = 0;
do {
- ret = h_send_logical_lan(adapter->vdev->unit_address,
- descs[0].desc, descs[1].desc,
- descs[2].desc, descs[3].desc,
- descs[4].desc, descs[5].desc,
- correlator, &correlator, mss,
- adapter->fw_large_send_support);
+ ret = h_send_logical_lan(adapter->vdev->unit_address, desc,
+ correlator, &correlator, mss,
+ adapter->fw_large_send_support);
} while ((ret == H_BUSY) && (retry_count--));
if (ret != H_SUCCESS && ret != H_DROPPED) {
@@ -1020,34 +1121,13 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
struct ibmveth_adapter *adapter = netdev_priv(netdev);
- unsigned int desc_flags;
- union ibmveth_buf_desc descs[6];
- int last, i;
- int force_bounce = 0;
- dma_addr_t dma_addr;
+ unsigned int desc_flags, total_bytes;
+ union ibmveth_buf_desc desc;
+ int i, queue_num = skb_get_queue_mapping(skb);
unsigned long mss = 0;
if (ibmveth_is_packet_unsupported(skb, netdev))
goto out;
-
- /* veth doesn't handle frag_list, so linearize the skb.
- * When GRO is enabled SKB's can have frag_list.
- */
- if (adapter->is_active_trunk &&
- skb_has_frag_list(skb) && __skb_linearize(skb)) {
- netdev->stats.tx_dropped++;
- goto out;
- }
-
- /*
- * veth handles a maximum of 6 segments including the header, so
- * we have to linearize the skb if there are more than this.
- */
- if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
- netdev->stats.tx_dropped++;
- goto out;
- }
-
/* veth can't checksum offload UDP */
if (skb->ip_summed == CHECKSUM_PARTIAL &&
((skb->protocol == htons(ETH_P_IP) &&
@@ -1077,56 +1157,6 @@ static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
desc_flags |= IBMVETH_BUF_LRG_SND;
}
-retry_bounce:
- memset(descs, 0, sizeof(descs));
-
- /*
- * If a linear packet is below the rx threshold then
- * copy it into the static bounce buffer. This avoids the
- * cost of a TCE insert and remove.
- */
- if (force_bounce || (!skb_is_nonlinear(skb) &&
- (skb->len < tx_copybreak))) {
- skb_copy_from_linear_data(skb, adapter->bounce_buffer,
- skb->len);
-
- descs[0].fields.flags_len = desc_flags | skb->len;
- descs[0].fields.address = adapter->bounce_buffer_dma;
-
- if (ibmveth_send(adapter, descs, 0)) {
- adapter->tx_send_failed++;
- netdev->stats.tx_dropped++;
- } else {
- netdev->stats.tx_packets++;
- netdev->stats.tx_bytes += skb->len;
- }
-
- goto out;
- }
-
- /* Map the header */
- dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
- if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
- goto map_failed;
-
- descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
- descs[0].fields.address = dma_addr;
-
- /* Map the frags */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
- dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
- skb_frag_size(frag), DMA_TO_DEVICE);
-
- if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
- goto map_failed_frags;
-
- descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
- descs[i+1].fields.address = dma_addr;
- }
-
if (skb->ip_summed == CHECKSUM_PARTIAL && skb_is_gso(skb)) {
if (adapter->fw_large_send_support) {
mss = (unsigned long)skb_shinfo(skb)->gso_size;
@@ -1143,7 +1173,36 @@ retry_bounce:
}
}
- if (ibmveth_send(adapter, descs, mss)) {
+ /* Copy header into mapped buffer */
+ if (unlikely(skb->len > adapter->tx_ltb_size)) {
+ netdev_err(adapter->netdev, "tx: packet size (%u) exceeds ltb (%u)\n",
+ skb->len, adapter->tx_ltb_size);
+ netdev->stats.tx_dropped++;
+ goto out;
+ }
+ memcpy(adapter->tx_ltb_ptr[queue_num], skb->data, skb_headlen(skb));
+ total_bytes = skb_headlen(skb);
+ /* Copy frags into mapped buffers */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ memcpy(adapter->tx_ltb_ptr[queue_num] + total_bytes,
+ skb_frag_address_safe(frag), skb_frag_size(frag));
+ total_bytes += skb_frag_size(frag);
+ }
+
+ if (unlikely(total_bytes != skb->len)) {
+ netdev_err(adapter->netdev, "tx: incorrect packet len copied into ltb (%u != %u)\n",
+ skb->len, total_bytes);
+ netdev->stats.tx_dropped++;
+ goto out;
+ }
+ desc.fields.flags_len = desc_flags | skb->len;
+ desc.fields.address = adapter->tx_ltb_dma[queue_num];
+ /* finish writing to long_term_buff before VIOS accessing it */
+ dma_wmb();
+
+ if (ibmveth_send(adapter, desc.desc, mss)) {
adapter->tx_send_failed++;
netdev->stats.tx_dropped++;
} else {
@@ -1151,41 +1210,11 @@ retry_bounce:
netdev->stats.tx_bytes += skb->len;
}
- dma_unmap_single(&adapter->vdev->dev,
- descs[0].fields.address,
- descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
- DMA_TO_DEVICE);
-
- for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
- dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
- descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
- DMA_TO_DEVICE);
-
out:
dev_consume_skb_any(skb);
return NETDEV_TX_OK;
-map_failed_frags:
- last = i+1;
- for (i = 1; i < last; i++)
- dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
- descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
- DMA_TO_DEVICE);
- dma_unmap_single(&adapter->vdev->dev,
- descs[0].fields.address,
- descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
- DMA_TO_DEVICE);
-map_failed:
- if (!firmware_has_feature(FW_FEATURE_CMO))
- netdev_err(netdev, "tx: unable to map xmit buffer\n");
- adapter->tx_map_failed++;
- if (skb_linearize(skb)) {
- netdev->stats.tx_dropped++;
- goto out;
- }
- force_bounce = 1;
- goto retry_bounce;
}
static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
@@ -1568,6 +1597,8 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
+ /* add size of mapped tx buffers */
+ ret += IOMMU_PAGE_ALIGN(IBMVETH_MAX_TX_BUF_SIZE, tbl);
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
/* add the size of the active receive buffers */
@@ -1660,8 +1691,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
return -EINVAL;
}
- netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
-
+ netdev = alloc_etherdev_mqs(sizeof(struct ibmveth_adapter), IBMVETH_MAX_QUEUES, 1);
if (!netdev)
return -ENOMEM;
@@ -1727,6 +1757,17 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
kobject_uevent(kobj, KOBJ_ADD);
}
+ rc = netif_set_real_num_tx_queues(netdev, ibmveth_real_max_tx_queues());
+ if (rc) {
+ netdev_dbg(netdev, "failed to set number of tx queues rc=%d\n",
+ rc);
+ free_netdev(netdev);
+ return rc;
+ }
+ adapter->tx_ltb_size = PAGE_ALIGN(IBMVETH_MAX_TX_BUF_SIZE);
+ for (i = 0; i < IBMVETH_MAX_QUEUES; i++)
+ adapter->tx_ltb_ptr[i] = NULL;
+
netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
netdev_dbg(netdev, "registering netdev...\n");
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 27dfff200166..daf6f615c03f 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -46,23 +46,23 @@
#define h_add_logical_lan_buffer(ua, buf) \
plpar_hcall_norets(H_ADD_LOGICAL_LAN_BUFFER, ua, buf)
+/* FW allows us to send 6 descriptors but we only use one so mark
+ * the other 5 as unused (0)
+ */
static inline long h_send_logical_lan(unsigned long unit_address,
- unsigned long desc1, unsigned long desc2, unsigned long desc3,
- unsigned long desc4, unsigned long desc5, unsigned long desc6,
- unsigned long corellator_in, unsigned long *corellator_out,
- unsigned long mss, unsigned long large_send_support)
+ unsigned long desc, unsigned long corellator_in,
+ unsigned long *corellator_out, unsigned long mss,
+ unsigned long large_send_support)
{
long rc;
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
if (large_send_support)
rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
- desc1, desc2, desc3, desc4, desc5, desc6,
- corellator_in, mss);
+ desc, 0, 0, 0, 0, 0, corellator_in, mss);
else
rc = plpar_hcall9(H_SEND_LOGICAL_LAN, retbuf, unit_address,
- desc1, desc2, desc3, desc4, desc5, desc6,
- corellator_in);
+ desc, 0, 0, 0, 0, 0, corellator_in);
*corellator_out = retbuf[0];
@@ -98,6 +98,8 @@ static inline long h_illan_attributes(unsigned long unit_address,
#define IBMVETH_BUFF_LIST_SIZE 4096
#define IBMVETH_FILT_LIST_SIZE 4096
#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
+#define IBMVETH_MAX_TX_BUF_SIZE (1024 * 64)
+#define IBMVETH_MAX_QUEUES 16U
static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
static int pool_count[] = { 256, 512, 256, 256, 256 };
@@ -137,6 +139,9 @@ struct ibmveth_adapter {
unsigned int mcastFilterSize;
void * buffer_list_addr;
void * filter_list_addr;
+ void *tx_ltb_ptr[IBMVETH_MAX_QUEUES];
+ unsigned int tx_ltb_size;
+ dma_addr_t tx_ltb_dma[IBMVETH_MAX_QUEUES];
dma_addr_t buffer_list_dma;
dma_addr_t filter_list_dma;
struct ibmveth_buff_pool rx_buff_pool[IBMVETH_NUM_BUFF_POOLS];
@@ -145,8 +150,6 @@ struct ibmveth_adapter {
int rx_csum;
int large_send;
bool is_active_trunk;
- void *bounce_buffer;
- dma_addr_t bounce_buffer_dma;
u64 fw_ipv6_csum_support;
u64 fw_ipv4_csum_support;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5ab7c0f81e9a..65dbfbec487a 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1262,7 +1262,7 @@ static int init_napi(struct ibmvnic_adapter *adapter)
for (i = 0; i < adapter->req_rx_queues; i++) {
netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
netif_napi_add(adapter->netdev, &adapter->napi[i],
- ibmvnic_poll, NAPI_POLL_WEIGHT);
+ ibmvnic_poll);
}
adapter->num_active_rx_napi = adapter->req_rx_queues;
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 11a884aa5082..560d1d442232 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2431,8 +2431,8 @@ static void e100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
struct nic *nic = netdev_priv(netdev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->bus_info, pci_name(nic->pdev),
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->bus_info, pci_name(nic->pdev),
sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 32803b0cf1e8..d06d29c6c037 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -531,10 +531,10 @@ static void e1000_get_drvinfo(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, e1000_driver_name,
+ strscpy(drvinfo->driver, e1000_driver_name,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 23299fc56199..61e60e4de600 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1012,7 +1012,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &e1000_netdev_ops;
e1000_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
+ netif_napi_add(netdev, &adapter->napi, e1000_clean);
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index b80ae9a82224..51a5afe9df2f 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -639,7 +639,7 @@ static void e1000_get_drvinfo(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, e1000e_driver_name, sizeof(drvinfo->driver));
/* EEPROM image version # is reported as firmware version # for
* PCI-E controllers
@@ -650,7 +650,7 @@ static void e1000_get_drvinfo(struct net_device *netdev,
(adapter->eeprom_vers & 0x0FF0) >> 4,
(adapter->eeprom_vers & 0x000F));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 321f2a95ae3a..49e926959ad3 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -7267,7 +7267,7 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
ret_val = e1000_read_pba_string_generic(hw, pba_str,
E1000_PBANUM_LENGTH);
if (ret_val)
- strlcpy((char *)pba_str, "Unknown", sizeof(pba_str));
+ strscpy((char *)pba_str, "Unknown", sizeof(pba_str));
e_info("MAC: %d, PHY: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, pba_str);
}
@@ -7479,8 +7479,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &e1000e_netdev_ops;
e1000e_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
- strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
+ netif_napi_add(netdev, &adapter->napi, e1000e_poll);
+ strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
netdev->mem_start = mmio_start;
netdev->mem_end = mmio_start + mmio_len;
@@ -7676,7 +7676,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (hw->mac.type >= e1000_pch_cnp)
adapter->flags2 |= FLAG2_ENABLE_S0IX_FLOWS;
- strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
+ strscpy(netdev->name, "eth%d", sizeof(netdev->name));
err = register_netdev(netdev);
if (err)
goto err_register;
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index fd07c3679bb1..060b263348ce 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2697,9 +2697,14 @@ static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
void e1000_power_up_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
+ int ret;
/* The PHY will retain its settings across a power down/up cycle */
- e1e_rphy(hw, MII_BMCR, &mii_reg);
+ ret = e1e_rphy(hw, MII_BMCR, &mii_reg);
+ if (ret) {
+ e_dbg("Error reading PHY register\n");
+ return;
+ }
mii_reg &= ~BMCR_PDOWN;
e1e_wphy(hw, MII_BMCR, mii_reg);
}
@@ -2715,9 +2720,14 @@ void e1000_power_up_phy_copper(struct e1000_hw *hw)
void e1000_power_down_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
+ int ret;
/* The PHY will retain its settings across a power down/up cycle */
- e1e_rphy(hw, MII_BMCR, &mii_reg);
+ ret = e1e_rphy(hw, MII_BMCR, &mii_reg);
+ if (ret) {
+ e_dbg("Error reading PHY register\n");
+ return;
+ }
mii_reg |= BMCR_PDOWN;
e1e_wphy(hw, MII_BMCR, mii_reg);
usleep_range(1000, 2000);
@@ -3037,7 +3047,11 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
return 0;
/* Do not apply workaround if in PHY loopback bit 14 set */
- e1e_rphy(hw, MII_BMCR, &data);
+ ret_val = e1e_rphy(hw, MII_BMCR, &data);
+ if (ret_val) {
+ e_dbg("Error reading PHY register\n");
+ return ret_val;
+ }
if (data & BMCR_LOOPBACK)
return 0;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 3362f26d7f99..4a6630586ec9 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -1595,8 +1595,7 @@ static int fm10k_alloc_q_vector(struct fm10k_intfc *interface,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(interface->netdev, &q_vector->napi,
- fm10k_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(interface->netdev, &q_vector->napi, fm10k_poll);
/* tie q_vector and interface together */
interface->q_vector[v_idx] = q_vector;
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d86b6d349ea9..9a60d6b207f7 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -399,6 +399,20 @@ struct i40e_ddp_old_profile_list {
I40E_FLEX_54_MASK | I40E_FLEX_55_MASK | \
I40E_FLEX_56_MASK | I40E_FLEX_57_MASK)
+#define I40E_QINT_TQCTL_VAL(qp, vector, nextq_type) \
+ (I40E_QINT_TQCTL_CAUSE_ENA_MASK | \
+ (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | \
+ ((vector) << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | \
+ ((qp) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | \
+ (I40E_QUEUE_TYPE_##nextq_type << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT))
+
+#define I40E_QINT_RQCTL_VAL(qp, vector, nextq_type) \
+ (I40E_QINT_RQCTL_CAUSE_ENA_MASK | \
+ (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) | \
+ ((vector) << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) | \
+ ((qp) << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) | \
+ (I40E_QUEUE_TYPE_##nextq_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT))
+
struct i40e_flex_pit {
struct list_head list;
u16 src_offset;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 2819e261a126..4f01e2a6b6bb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -27,6 +27,7 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_QSFP_A:
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
+ case I40E_DEV_ID_1G_BASE_T_BC:
case I40E_DEV_ID_5G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
@@ -4974,6 +4975,7 @@ i40e_status i40e_write_phy_register(struct i40e_hw *hw,
status = i40e_write_phy_register_clause22(hw, reg, phy_addr,
value);
break;
+ case I40E_DEV_ID_1G_BASE_T_BC:
case I40E_DEV_ID_5G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
@@ -5012,6 +5014,7 @@ i40e_status i40e_read_phy_register(struct i40e_hw *hw,
status = i40e_read_phy_register_clause22(hw, reg, phy_addr,
value);
break;
+ case I40E_DEV_ID_1G_BASE_T_BC:
case I40E_DEV_ID_5G_BASE_T_BC:
case I40E_DEV_ID_10G_BASE_T:
case I40E_DEV_ID_10G_BASE_T4:
diff --git a/drivers/net/ethernet/intel/i40e/i40e_devids.h b/drivers/net/ethernet/intel/i40e/i40e_devids.h
index 2610338002fe..d9c51a238dcc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_devids.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_devids.h
@@ -24,8 +24,10 @@
#define I40E_DEV_ID_10G_B 0x104F
#define I40E_DEV_ID_10G_SFP 0x104E
#define I40E_DEV_ID_5G_BASE_T_BC 0x101F
+#define I40E_DEV_ID_1G_BASE_T_BC 0x0DD2
#define I40E_IS_X710TL_DEVICE(d) \
- (((d) == I40E_DEV_ID_5G_BASE_T_BC) || \
+ (((d) == I40E_DEV_ID_1G_BASE_T_BC) || \
+ ((d) == I40E_DEV_ID_5G_BASE_T_BC) || \
((d) == I40E_DEV_ID_10G_BASE_T_BC))
#define I40E_DEV_ID_KX_X722 0x37CE
#define I40E_DEV_ID_QSFP_X722 0x37CF
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index e9cd0fa6a0d2..7e75706f76db 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2001,10 +2001,10 @@ static void i40e_get_drvinfo(struct net_device *netdev,
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
- strlcpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw),
+ strscpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->fw_version, i40e_nvm_version_str(&pf->hw),
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
+ strscpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN;
if (pf->hw.pf_id == 0)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index e3d9804aeb25..2c07fa8ecfc8 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -66,6 +66,7 @@ static const struct pci_device_id i40e_pci_tbl[] = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
+ {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_BC), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_BC), 0},
@@ -3878,7 +3879,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
wr32(hw, I40E_PFINT_RATEN(vector - 1),
i40e_intrl_usec_to_reg(vsi->int_rate_limit));
- /* Linked list for the queuepairs assigned to this vector */
+ /* begin of linked list for RX queue assigned to this vector */
wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
for (q = 0; q < q_vector->num_ringpairs; q++) {
u32 nextqp = has_xdp ? qp + vsi->alloc_queue_pairs : qp;
@@ -3894,6 +3895,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
wr32(hw, I40E_QINT_RQCTL(qp), val);
if (has_xdp) {
+ /* TX queue with next queue set to TX */
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
(I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
@@ -3903,7 +3905,7 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
wr32(hw, I40E_QINT_TQCTL(nextqp), val);
}
-
+ /* TX queue with next RX or end of linked list */
val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
(I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
(vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
@@ -3972,7 +3974,6 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
struct i40e_q_vector *q_vector = vsi->q_vectors[0];
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
- u32 val;
/* set the ITR configuration */
q_vector->rx.next_update = jiffies + 1;
@@ -3989,28 +3990,20 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
/* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
wr32(hw, I40E_PFINT_LNKLST0, 0);
- /* Associate the queue pair to the vector and enable the queue int */
- val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
- (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
- (nextqp << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
- (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
-
- wr32(hw, I40E_QINT_RQCTL(0), val);
+ /* Associate the queue pair to the vector and enable the queue
+ * interrupt RX queue in linked list with next queue set to TX
+ */
+ wr32(hw, I40E_QINT_RQCTL(0), I40E_QINT_RQCTL_VAL(nextqp, 0, TX));
if (i40e_enabled_xdp_vsi(vsi)) {
- val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
- (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)|
- (I40E_QUEUE_TYPE_TX
- << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
-
- wr32(hw, I40E_QINT_TQCTL(nextqp), val);
+ /* TX queue in linked list with next queue set to TX */
+ wr32(hw, I40E_QINT_TQCTL(nextqp),
+ I40E_QINT_TQCTL_VAL(nextqp, 0, TX));
}
- val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
- (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
- (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
-
- wr32(hw, I40E_QINT_TQCTL(0), val);
+ /* last TX queue so the next RX queue doesn't matter */
+ wr32(hw, I40E_QINT_TQCTL(0),
+ I40E_QINT_TQCTL_VAL(I40E_QUEUE_END_OF_LIST, 0, RX));
i40e_flush(hw);
}
@@ -10724,7 +10717,7 @@ static void i40e_send_version(struct i40e_pf *pf)
dv.minor_version = 0xff;
dv.build_version = 0xff;
dv.subbuild_version = 0;
- strlcpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
+ strscpy(dv.driver_string, UTS_RELEASE, sizeof(dv.driver_string));
i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
}
@@ -11948,8 +11941,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
if (vsi->netdev)
- netif_napi_add(vsi->netdev, &q_vector->napi,
- i40e_napi_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(vsi->netdev, &q_vector->napi, i40e_napi_poll);
/* tie q_vector and vsi together */
vsi->q_vectors[v_idx] = q_vector;
@@ -16072,23 +16064,23 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
switch (hw->bus.speed) {
case i40e_bus_speed_8000:
- strlcpy(speed, "8.0", PCI_SPEED_SIZE); break;
+ strscpy(speed, "8.0", PCI_SPEED_SIZE); break;
case i40e_bus_speed_5000:
- strlcpy(speed, "5.0", PCI_SPEED_SIZE); break;
+ strscpy(speed, "5.0", PCI_SPEED_SIZE); break;
case i40e_bus_speed_2500:
- strlcpy(speed, "2.5", PCI_SPEED_SIZE); break;
+ strscpy(speed, "2.5", PCI_SPEED_SIZE); break;
default:
break;
}
switch (hw->bus.width) {
case i40e_bus_width_pcie_x8:
- strlcpy(width, "8", PCI_WIDTH_SIZE); break;
+ strscpy(width, "8", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x4:
- strlcpy(width, "4", PCI_WIDTH_SIZE); break;
+ strscpy(width, "4", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x2:
- strlcpy(width, "2", PCI_WIDTH_SIZE); break;
+ strscpy(width, "2", PCI_WIDTH_SIZE); break;
case i40e_bus_width_pcie_x1:
- strlcpy(width, "1", PCI_WIDTH_SIZE); break;
+ strscpy(width, "1", PCI_WIDTH_SIZE); break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 2d3533f38d7b..ffea0c9c82f1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -1390,7 +1390,7 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
if (!IS_ERR_OR_NULL(pf->ptp_clock))
return 0;
- strlcpy(pf->ptp_caps.name, i40e_driver_name,
+ strscpy(pf->ptp_caps.name, i40e_driver_name,
sizeof(pf->ptp_caps.name) - 1);
pf->ptp_caps.owner = THIS_MODULE;
pf->ptp_caps.max_adj = 999999999;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index e535d4c3da49..a056e1545615 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -581,9 +581,9 @@ static void iavf_get_drvinfo(struct net_device *netdev,
{
struct iavf_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, iavf_driver_name, 32);
- strlcpy(drvinfo->fw_version, "N/A", 4);
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strscpy(drvinfo->driver, iavf_driver_name, 32);
+ strscpy(drvinfo->fw_version, "N/A", 4);
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
drvinfo->n_priv_flags = IAVF_PRIV_FLAGS_STR_LEN;
}
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 0c89f16bf1e2..3fc572341781 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -1266,6 +1266,116 @@ static void iavf_up_complete(struct iavf_adapter *adapter)
mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
}
+/**
+ * iavf_clear_mac_vlan_filters - Remove mac and vlan filters not sent to PF
+ * yet and mark other to be removed.
+ * @adapter: board private structure
+ **/
+static void iavf_clear_mac_vlan_filters(struct iavf_adapter *adapter)
+{
+ struct iavf_vlan_filter *vlf, *vlftmp;
+ struct iavf_mac_filter *f, *ftmp;
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+ /* clear the sync flag on all filters */
+ __dev_uc_unsync(adapter->netdev, NULL);
+ __dev_mc_unsync(adapter->netdev, NULL);
+
+ /* remove all MAC filters */
+ list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
+ list) {
+ if (f->add) {
+ list_del(&f->list);
+ kfree(f);
+ } else {
+ f->remove = true;
+ }
+ }
+
+ /* remove all VLAN filters */
+ list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
+ list) {
+ if (vlf->add) {
+ list_del(&vlf->list);
+ kfree(vlf);
+ } else {
+ vlf->remove = true;
+ }
+ }
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+}
+
+/**
+ * iavf_clear_cloud_filters - Remove cloud filters not sent to PF yet and
+ * mark other to be removed.
+ * @adapter: board private structure
+ **/
+static void iavf_clear_cloud_filters(struct iavf_adapter *adapter)
+{
+ struct iavf_cloud_filter *cf, *cftmp;
+
+ /* remove all cloud filters */
+ spin_lock_bh(&adapter->cloud_filter_list_lock);
+ list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
+ list) {
+ if (cf->add) {
+ list_del(&cf->list);
+ kfree(cf);
+ adapter->num_cloud_filters--;
+ } else {
+ cf->del = true;
+ }
+ }
+ spin_unlock_bh(&adapter->cloud_filter_list_lock);
+}
+
+/**
+ * iavf_clear_fdir_filters - Remove fdir filters not sent to PF yet and mark
+ * other to be removed.
+ * @adapter: board private structure
+ **/
+static void iavf_clear_fdir_filters(struct iavf_adapter *adapter)
+{
+ struct iavf_fdir_fltr *fdir, *fdirtmp;
+
+ /* remove all Flow Director filters */
+ spin_lock_bh(&adapter->fdir_fltr_lock);
+ list_for_each_entry_safe(fdir, fdirtmp, &adapter->fdir_list_head,
+ list) {
+ if (fdir->state == IAVF_FDIR_FLTR_ADD_REQUEST) {
+ list_del(&fdir->list);
+ kfree(fdir);
+ adapter->fdir_active_fltr--;
+ } else {
+ fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
+ }
+ }
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
+}
+
+/**
+ * iavf_clear_adv_rss_conf - Remove adv rss conf not sent to PF yet and mark
+ * other to be removed.
+ * @adapter: board private structure
+ **/
+static void iavf_clear_adv_rss_conf(struct iavf_adapter *adapter)
+{
+ struct iavf_adv_rss *rss, *rsstmp;
+
+ /* remove all advance RSS configuration */
+ spin_lock_bh(&adapter->adv_rss_lock);
+ list_for_each_entry_safe(rss, rsstmp, &adapter->adv_rss_list_head,
+ list) {
+ if (rss->state == IAVF_ADV_RSS_ADD_REQUEST) {
+ list_del(&rss->list);
+ kfree(rss);
+ } else {
+ rss->state = IAVF_ADV_RSS_DEL_REQUEST;
+ }
+ }
+ spin_unlock_bh(&adapter->adv_rss_lock);
+}
+
/**
* iavf_down - Shutdown the connection processing
* @adapter: board private structure
@@ -1275,11 +1385,6 @@ static void iavf_up_complete(struct iavf_adapter *adapter)
void iavf_down(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- struct iavf_vlan_filter *vlf;
- struct iavf_cloud_filter *cf;
- struct iavf_fdir_fltr *fdir;
- struct iavf_mac_filter *f;
- struct iavf_adv_rss *rss;
if (adapter->state <= __IAVF_DOWN_PENDING)
return;
@@ -1290,43 +1395,10 @@ void iavf_down(struct iavf_adapter *adapter)
iavf_napi_disable_all(adapter);
iavf_irq_disable(adapter);
- spin_lock_bh(&adapter->mac_vlan_list_lock);
-
- /* clear the sync flag on all filters */
- __dev_uc_unsync(adapter->netdev, NULL);
- __dev_mc_unsync(adapter->netdev, NULL);
-
- /* remove all MAC filters */
- list_for_each_entry(f, &adapter->mac_filter_list, list) {
- f->remove = true;
- }
-
- /* remove all VLAN filters */
- list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
- vlf->remove = true;
- }
-
- spin_unlock_bh(&adapter->mac_vlan_list_lock);
-
- /* remove all cloud filters */
- spin_lock_bh(&adapter->cloud_filter_list_lock);
- list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
- cf->del = true;
- }
- spin_unlock_bh(&adapter->cloud_filter_list_lock);
-
- /* remove all Flow Director filters */
- spin_lock_bh(&adapter->fdir_fltr_lock);
- list_for_each_entry(fdir, &adapter->fdir_list_head, list) {
- fdir->state = IAVF_FDIR_FLTR_DEL_REQUEST;
- }
- spin_unlock_bh(&adapter->fdir_fltr_lock);
-
- /* remove all advance RSS configuration */
- spin_lock_bh(&adapter->adv_rss_lock);
- list_for_each_entry(rss, &adapter->adv_rss_list_head, list)
- rss->state = IAVF_ADV_RSS_DEL_REQUEST;
- spin_unlock_bh(&adapter->adv_rss_lock);
+ iavf_clear_mac_vlan_filters(adapter);
+ iavf_clear_cloud_filters(adapter);
+ iavf_clear_fdir_filters(adapter);
+ iavf_clear_adv_rss_conf(adapter);
if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
/* cancel any current operation */
@@ -1335,11 +1407,16 @@ void iavf_down(struct iavf_adapter *adapter)
* here for this to complete. The watchdog is still running
* and it will take care of this.
*/
- adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
+ if (!list_empty(&adapter->mac_filter_list))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
+ if (!list_empty(&adapter->vlan_filter_list))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
+ if (!list_empty(&adapter->cloud_filter_list))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
+ if (!list_empty(&adapter->fdir_list_head))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_FDIR_FILTER;
+ if (!list_empty(&adapter->adv_rss_list_head))
+ adapter->aq_required |= IAVF_FLAG_AQ_DEL_ADV_RSS_CFG;
adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
}
@@ -1754,7 +1831,7 @@ static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
q_vector->reg_idx = q_idx;
cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
netif_napi_add(adapter->netdev, &q_vector->napi,
- iavf_napi_poll, NAPI_POLL_WEIGHT);
+ iavf_napi_poll);
}
return 0;
@@ -4178,6 +4255,7 @@ err_unlock:
static int iavf_close(struct net_device *netdev)
{
struct iavf_adapter *adapter = netdev_priv(netdev);
+ u64 aq_to_restore;
int status;
mutex_lock(&adapter->crit_lock);
@@ -4190,6 +4268,29 @@ static int iavf_close(struct net_device *netdev)
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
if (CLIENT_ENABLED(adapter))
adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
+ /* We cannot send IAVF_FLAG_AQ_GET_OFFLOAD_VLAN_V2_CAPS before
+ * IAVF_FLAG_AQ_DISABLE_QUEUES because in such case there is rtnl
+ * deadlock with adminq_task() until iavf_close timeouts. We must send
+ * IAVF_FLAG_AQ_GET_CONFIG before IAVF_FLAG_AQ_DISABLE_QUEUES to make
+ * disable queues possible for vf. Give only necessary flags to
+ * iavf_down and save other to set them right before iavf_close()
+ * returns, when IAVF_FLAG_AQ_DISABLE_QUEUES will be already sent and
+ * iavf will be in DOWN state.
+ */
+ aq_to_restore = adapter->aq_required;
+ adapter->aq_required &= IAVF_FLAG_AQ_GET_CONFIG;
+
+ /* Remove flags which we do not want to send after close or we want to
+ * send before disable queues.
+ */
+ aq_to_restore &= ~(IAVF_FLAG_AQ_GET_CONFIG |
+ IAVF_FLAG_AQ_ENABLE_QUEUES |
+ IAVF_FLAG_AQ_CONFIGURE_QUEUES |
+ IAVF_FLAG_AQ_ADD_VLAN_FILTER |
+ IAVF_FLAG_AQ_ADD_MAC_FILTER |
+ IAVF_FLAG_AQ_ADD_CLOUD_FILTER |
+ IAVF_FLAG_AQ_ADD_FDIR_FILTER |
+ IAVF_FLAG_AQ_ADD_ADV_RSS_CFG);
iavf_down(adapter);
iavf_change_state(adapter, __IAVF_DOWN_PENDING);
@@ -4213,6 +4314,10 @@ static int iavf_close(struct net_device *netdev)
msecs_to_jiffies(500));
if (!status)
netdev_warn(netdev, "Device resources not yet released\n");
+
+ mutex_lock(&adapter->crit_lock);
+ adapter->aq_required |= aq_to_restore;
+ mutex_unlock(&adapter->crit_lock);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 841fa149c407..001500afc4a6 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -864,6 +864,7 @@ ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp,
struct ice_q_stats stats, u64 *pkts, u64 *bytes);
int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
+int ice_down_up(struct ice_vsi *vsi);
int ice_vsi_cfg(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 9939238573a4..1bdc70aa979d 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1423,6 +1423,56 @@ struct ice_aqc_set_port_id_led {
u8 rsvd[13];
};
+/* Get Port Options (indirect, 0x06EA) */
+struct ice_aqc_get_port_options {
+ u8 lport_num;
+ u8 lport_num_valid;
+ u8 port_options_count;
+#define ICE_AQC_PORT_OPT_COUNT_M GENMASK(3, 0)
+#define ICE_AQC_PORT_OPT_MAX 16
+
+ u8 innermost_phy_index;
+ u8 port_options;
+#define ICE_AQC_PORT_OPT_ACTIVE_M GENMASK(3, 0)
+#define ICE_AQC_PORT_OPT_VALID BIT(7)
+
+ u8 pending_port_option_status;
+#define ICE_AQC_PENDING_PORT_OPT_IDX_M GENMASK(3, 0)
+#define ICE_AQC_PENDING_PORT_OPT_VALID BIT(7)
+
+ u8 rsvd[2];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_get_port_options_elem {
+ u8 pmd;
+#define ICE_AQC_PORT_OPT_PMD_COUNT_M GENMASK(3, 0)
+
+ u8 max_lane_speed;
+#define ICE_AQC_PORT_OPT_MAX_LANE_M GENMASK(3, 0)
+#define ICE_AQC_PORT_OPT_MAX_LANE_100M 0
+#define ICE_AQC_PORT_OPT_MAX_LANE_1G 1
+#define ICE_AQC_PORT_OPT_MAX_LANE_2500M 2
+#define ICE_AQC_PORT_OPT_MAX_LANE_5G 3
+#define ICE_AQC_PORT_OPT_MAX_LANE_10G 4
+#define ICE_AQC_PORT_OPT_MAX_LANE_25G 5
+#define ICE_AQC_PORT_OPT_MAX_LANE_50G 6
+#define ICE_AQC_PORT_OPT_MAX_LANE_100G 7
+
+ u8 global_scid[2];
+ u8 phy_scid[2];
+ u8 pf2port_cid[2];
+};
+
+/* Set Port Option (direct, 0x06EB) */
+struct ice_aqc_set_port_option {
+ u8 lport_num;
+ u8 lport_num_valid;
+ u8 selected_port_option;
+ u8 rsvd[13];
+};
+
/* Set/Get GPIO (direct, 0x06EC/0x06ED) */
struct ice_aqc_gpio {
__le16 gpio_ctrl_handle;
@@ -1489,6 +1539,12 @@ struct ice_aqc_nvm {
#define ICE_AQC_NVM_PERST_FLAG 1
#define ICE_AQC_NVM_EMPR_FLAG 2
#define ICE_AQC_NVM_EMPR_ENA BIT(0) /* Write Activate reply only */
+ /* For Write Activate, several flags are sent as part of a separate
+ * flags2 field using a separate byte. For simplicity of the software
+ * interface, we pass the flags as a 16 bit value so these flags are
+ * all offset by 8 bits
+ */
+#define ICE_AQC_NVM_ACTIV_REQ_EMPR BIT(8) /* NVM Write Activate only */
__le16 module_typeid;
__le16 length;
#define ICE_AQC_NVM_ERASE_LEN 0xFFFF
@@ -2082,6 +2138,8 @@ struct ice_aq_desc {
struct ice_aqc_gpio read_write_gpio;
struct ice_aqc_sff_eeprom read_write_sff_param;
struct ice_aqc_set_port_id_led set_port_id_led;
+ struct ice_aqc_get_port_options get_port_options;
+ struct ice_aqc_set_port_option set_port_option;
struct ice_aqc_get_sw_cfg get_sw_conf;
struct ice_aqc_set_port_params set_port_params;
struct ice_aqc_sw_rules sw_rules;
@@ -2243,6 +2301,8 @@ enum ice_adminq_opc {
ice_aqc_opc_read_i2c = 0x06E2,
ice_aqc_opc_write_i2c = 0x06E3,
ice_aqc_opc_set_port_id_led = 0x06E9,
+ ice_aqc_opc_get_port_options = 0x06EA,
+ ice_aqc_opc_set_port_option = 0x06EB,
ice_aqc_opc_set_gpio = 0x06EC,
ice_aqc_opc_get_gpio = 0x06ED,
ice_aqc_opc_sff_eeprom = 0x06EE,
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index 1e3243808178..9e36f01dfa4f 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -130,8 +130,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
* handler here (i.e. resume, reset/rebuild, etc.)
*/
if (vsi->netdev)
- netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(vsi->netdev, &q_vector->napi, ice_napi_poll);
out:
/* tie q_vector and VSI together */
@@ -405,7 +404,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
/* Strip the Ethernet CRC bytes before the packet is posted to host
* memory.
*/
- rlan_ctx.crcstrip = 1;
+ rlan_ctx.crcstrip = !(ring->flags & ICE_RX_FLAGS_CRC_STRIP_DIS);
/* L2TSEL flag defines the reported L2 Tags in the receive descriptor
* and it needs to remain 1 for non-DVM capable configurations to not
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 27d0cbbd29da..039342a0ed15 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -8,6 +8,108 @@
#define ICE_PF_RESET_WAIT_COUNT 300
+static const char * const ice_link_mode_str_low[] = {
+ [0] = "100BASE_TX",
+ [1] = "100M_SGMII",
+ [2] = "1000BASE_T",
+ [3] = "1000BASE_SX",
+ [4] = "1000BASE_LX",
+ [5] = "1000BASE_KX",
+ [6] = "1G_SGMII",
+ [7] = "2500BASE_T",
+ [8] = "2500BASE_X",
+ [9] = "2500BASE_KX",
+ [10] = "5GBASE_T",
+ [11] = "5GBASE_KR",
+ [12] = "10GBASE_T",
+ [13] = "10G_SFI_DA",
+ [14] = "10GBASE_SR",
+ [15] = "10GBASE_LR",
+ [16] = "10GBASE_KR_CR1",
+ [17] = "10G_SFI_AOC_ACC",
+ [18] = "10G_SFI_C2C",
+ [19] = "25GBASE_T",
+ [20] = "25GBASE_CR",
+ [21] = "25GBASE_CR_S",
+ [22] = "25GBASE_CR1",
+ [23] = "25GBASE_SR",
+ [24] = "25GBASE_LR",
+ [25] = "25GBASE_KR",
+ [26] = "25GBASE_KR_S",
+ [27] = "25GBASE_KR1",
+ [28] = "25G_AUI_AOC_ACC",
+ [29] = "25G_AUI_C2C",
+ [30] = "40GBASE_CR4",
+ [31] = "40GBASE_SR4",
+ [32] = "40GBASE_LR4",
+ [33] = "40GBASE_KR4",
+ [34] = "40G_XLAUI_AOC_ACC",
+ [35] = "40G_XLAUI",
+ [36] = "50GBASE_CR2",
+ [37] = "50GBASE_SR2",
+ [38] = "50GBASE_LR2",
+ [39] = "50GBASE_KR2",
+ [40] = "50G_LAUI2_AOC_ACC",
+ [41] = "50G_LAUI2",
+ [42] = "50G_AUI2_AOC_ACC",
+ [43] = "50G_AUI2",
+ [44] = "50GBASE_CP",
+ [45] = "50GBASE_SR",
+ [46] = "50GBASE_FR",
+ [47] = "50GBASE_LR",
+ [48] = "50GBASE_KR_PAM4",
+ [49] = "50G_AUI1_AOC_ACC",
+ [50] = "50G_AUI1",
+ [51] = "100GBASE_CR4",
+ [52] = "100GBASE_SR4",
+ [53] = "100GBASE_LR4",
+ [54] = "100GBASE_KR4",
+ [55] = "100G_CAUI4_AOC_ACC",
+ [56] = "100G_CAUI4",
+ [57] = "100G_AUI4_AOC_ACC",
+ [58] = "100G_AUI4",
+ [59] = "100GBASE_CR_PAM4",
+ [60] = "100GBASE_KR_PAM4",
+ [61] = "100GBASE_CP2",
+ [62] = "100GBASE_SR2",
+ [63] = "100GBASE_DR",
+};
+
+static const char * const ice_link_mode_str_high[] = {
+ [0] = "100GBASE_KR2_PAM4",
+ [1] = "100G_CAUI2_AOC_ACC",
+ [2] = "100G_CAUI2",
+ [3] = "100G_AUI2_AOC_ACC",
+ [4] = "100G_AUI2",
+};
+
+/**
+ * ice_dump_phy_type - helper function to dump phy_type
+ * @hw: pointer to the HW structure
+ * @low: 64 bit value for phy_type_low
+ * @high: 64 bit value for phy_type_high
+ * @prefix: prefix string to differentiate multiple dumps
+ */
+static void
+ice_dump_phy_type(struct ice_hw *hw, u64 low, u64 high, const char *prefix)
+{
+ ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_low: 0x%016llx\n", prefix, low);
+
+ for (u32 i = 0; i < BITS_PER_TYPE(typeof(low)); i++) {
+ if (low & BIT_ULL(i))
+ ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
+ prefix, i, ice_link_mode_str_low[i]);
+ }
+
+ ice_debug(hw, ICE_DBG_PHY, "%s: phy_type_high: 0x%016llx\n", prefix, high);
+
+ for (u32 i = 0; i < BITS_PER_TYPE(typeof(high)); i++) {
+ if (high & BIT_ULL(i))
+ ice_debug(hw, ICE_DBG_PHY, "%s: bit(%d): %s\n",
+ prefix, i, ice_link_mode_str_high[i]);
+ }
+}
+
/**
* ice_set_mac_type - Sets MAC type
* @hw: pointer to the HW structure
@@ -80,9 +182,23 @@ bool ice_is_e810t(struct ice_hw *hw)
{
switch (hw->device_id) {
case ICE_DEV_ID_E810C_SFP:
- if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T ||
- hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
+ switch (hw->subsystem_device_id) {
+ case ICE_SUBDEV_ID_E810T:
+ case ICE_SUBDEV_ID_E810T2:
+ case ICE_SUBDEV_ID_E810T3:
+ case ICE_SUBDEV_ID_E810T4:
+ case ICE_SUBDEV_ID_E810T6:
+ case ICE_SUBDEV_ID_E810T7:
return true;
+ }
+ break;
+ case ICE_DEV_ID_E810C_QSFP:
+ switch (hw->subsystem_device_id) {
+ case ICE_SUBDEV_ID_E810T2:
+ case ICE_SUBDEV_ID_E810T3:
+ case ICE_SUBDEV_ID_E810T5:
+ return true;
+ }
break;
default:
break;
@@ -183,6 +299,7 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps *cmd;
u16 pcaps_size = sizeof(*pcaps);
struct ice_aq_desc desc;
+ const char *prefix;
struct ice_hw *hw;
int status;
@@ -204,29 +321,48 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
cmd->param0 |= cpu_to_le16(report_mode);
status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
- ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
- report_mode);
- ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
- (unsigned long long)le64_to_cpu(pcaps->phy_type_low));
- ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
- (unsigned long long)le64_to_cpu(pcaps->phy_type_high));
- ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
- ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "get phy caps dump\n");
+
+ switch (report_mode) {
+ case ICE_AQC_REPORT_TOPO_CAP_MEDIA:
+ prefix = "phy_caps_media";
+ break;
+ case ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA:
+ prefix = "phy_caps_no_media";
+ break;
+ case ICE_AQC_REPORT_ACTIVE_CFG:
+ prefix = "phy_caps_active";
+ break;
+ case ICE_AQC_REPORT_DFLT_CFG:
+ prefix = "phy_caps_default";
+ break;
+ default:
+ prefix = "phy_caps_invalid";
+ }
+
+ ice_dump_phy_type(hw, le64_to_cpu(pcaps->phy_type_low),
+ le64_to_cpu(pcaps->phy_type_high), prefix);
+
+ ice_debug(hw, ICE_DBG_LINK, "%s: report_mode = 0x%x\n",
+ prefix, report_mode);
+ ice_debug(hw, ICE_DBG_LINK, "%s: caps = 0x%x\n", prefix, pcaps->caps);
+ ice_debug(hw, ICE_DBG_LINK, "%s: low_power_ctrl_an = 0x%x\n", prefix,
pcaps->low_power_ctrl_an);
- ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
- ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: eee_cap = 0x%x\n", prefix,
+ pcaps->eee_cap);
+ ice_debug(hw, ICE_DBG_LINK, "%s: eeer_value = 0x%x\n", prefix,
pcaps->eeer_value);
- ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: link_fec_options = 0x%x\n", prefix,
pcaps->link_fec_options);
- ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
- pcaps->module_compliance_enforcement);
- ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
- pcaps->extended_compliance_code);
- ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: module_compliance_enforcement = 0x%x\n",
+ prefix, pcaps->module_compliance_enforcement);
+ ice_debug(hw, ICE_DBG_LINK, "%s: extended_compliance_code = 0x%x\n",
+ prefix, pcaps->extended_compliance_code);
+ ice_debug(hw, ICE_DBG_LINK, "%s: module_type[0] = 0x%x\n", prefix,
pcaps->module_type[0]);
- ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: module_type[1] = 0x%x\n", prefix,
pcaps->module_type[1]);
- ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
+ ice_debug(hw, ICE_DBG_LINK, "%s: module_type[2] = 0x%x\n", prefix,
pcaps->module_type[2]);
if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
@@ -2397,6 +2533,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->tmr1_owned = ((number & ICE_TS_TMR1_OWND_M) != 0);
info->tmr1_ena = ((number & ICE_TS_TMR1_ENA_M) != 0);
+ info->ts_ll_read = ((number & ICE_TS_LL_TX_TS_READ_M) != 0);
+
info->ena_ports = logical_id;
info->tmr_own_map = phys_id;
@@ -2414,6 +2552,8 @@ ice_parse_1588_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
info->tmr1_owned);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr1_ena = %u\n",
info->tmr1_ena);
+ ice_debug(hw, ICE_DBG_INIT, "dev caps: ts_ll_read = %u\n",
+ info->ts_ll_read);
ice_debug(hw, ICE_DBG_INIT, "dev caps: ieee_1588 ena_ports = %u\n",
info->ena_ports);
ice_debug(hw, ICE_DBG_INIT, "dev caps: tmr_own_map = %u\n",
@@ -2775,6 +2915,26 @@ ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan,
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
+/**
+ * ice_is_100m_speed_supported
+ * @hw: pointer to the HW struct
+ *
+ * returns true if 100M speeds are supported by the device,
+ * false otherwise.
+ */
+bool ice_is_100m_speed_supported(struct ice_hw *hw)
+{
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E822C_SGMII:
+ case ICE_DEV_ID_E822L_SGMII:
+ case ICE_DEV_ID_E823L_1GBE:
+ case ICE_DEV_ID_E823C_SGMII:
+ return true;
+ default:
+ return false;
+ }
+}
+
/**
* ice_get_link_speed_based_on_phy_type - returns link speed
* @phy_type_low: lower part of phy_type
@@ -3534,6 +3694,121 @@ ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
+/**
+ * ice_aq_get_port_options
+ * @hw: pointer to the HW struct
+ * @options: buffer for the resultant port options
+ * @option_count: input - size of the buffer in port options structures,
+ * output - number of returned port options
+ * @lport: logical port to call the command with (optional)
+ * @lport_valid: when false, FW uses port owned by the PF instead of lport,
+ * when PF owns more than 1 port it must be true
+ * @active_option_idx: index of active port option in returned buffer
+ * @active_option_valid: active option in returned buffer is valid
+ * @pending_option_idx: index of pending port option in returned buffer
+ * @pending_option_valid: pending option in returned buffer is valid
+ *
+ * Calls Get Port Options AQC (0x06ea) and verifies result.
+ */
+int
+ice_aq_get_port_options(struct ice_hw *hw,
+ struct ice_aqc_get_port_options_elem *options,
+ u8 *option_count, u8 lport, bool lport_valid,
+ u8 *active_option_idx, bool *active_option_valid,
+ u8 *pending_option_idx, bool *pending_option_valid)
+{
+ struct ice_aqc_get_port_options *cmd;
+ struct ice_aq_desc desc;
+ int status;
+ u8 i;
+
+ /* options buffer shall be able to hold max returned options */
+ if (*option_count < ICE_AQC_PORT_OPT_COUNT_M)
+ return -EINVAL;
+
+ cmd = &desc.params.get_port_options;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_port_options);
+
+ if (lport_valid)
+ cmd->lport_num = lport;
+ cmd->lport_num_valid = lport_valid;
+
+ status = ice_aq_send_cmd(hw, &desc, options,
+ *option_count * sizeof(*options), NULL);
+ if (status)
+ return status;
+
+ /* verify direct FW response & set output parameters */
+ *option_count = FIELD_GET(ICE_AQC_PORT_OPT_COUNT_M,
+ cmd->port_options_count);
+ ice_debug(hw, ICE_DBG_PHY, "options: %x\n", *option_count);
+ *active_option_valid = FIELD_GET(ICE_AQC_PORT_OPT_VALID,
+ cmd->port_options);
+ if (*active_option_valid) {
+ *active_option_idx = FIELD_GET(ICE_AQC_PORT_OPT_ACTIVE_M,
+ cmd->port_options);
+ if (*active_option_idx > (*option_count - 1))
+ return -EIO;
+ ice_debug(hw, ICE_DBG_PHY, "active idx: %x\n",
+ *active_option_idx);
+ }
+
+ *pending_option_valid = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_VALID,
+ cmd->pending_port_option_status);
+ if (*pending_option_valid) {
+ *pending_option_idx = FIELD_GET(ICE_AQC_PENDING_PORT_OPT_IDX_M,
+ cmd->pending_port_option_status);
+ if (*pending_option_idx > (*option_count - 1))
+ return -EIO;
+ ice_debug(hw, ICE_DBG_PHY, "pending idx: %x\n",
+ *pending_option_idx);
+ }
+
+ /* mask output options fields */
+ for (i = 0; i < *option_count; i++) {
+ options[i].pmd = FIELD_GET(ICE_AQC_PORT_OPT_PMD_COUNT_M,
+ options[i].pmd);
+ options[i].max_lane_speed = FIELD_GET(ICE_AQC_PORT_OPT_MAX_LANE_M,
+ options[i].max_lane_speed);
+ ice_debug(hw, ICE_DBG_PHY, "pmds: %x max speed: %x\n",
+ options[i].pmd, options[i].max_lane_speed);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_aq_set_port_option
+ * @hw: pointer to the HW struct
+ * @lport: logical port to call the command with
+ * @lport_valid: when false, FW uses port owned by the PF instead of lport,
+ * when PF owns more than 1 port it must be true
+ * @new_option: new port option to be written
+ *
+ * Calls Set Port Options AQC (0x06eb).
+ */
+int
+ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
+ u8 new_option)
+{
+ struct ice_aqc_set_port_option *cmd;
+ struct ice_aq_desc desc;
+
+ if (new_option > ICE_AQC_PORT_OPT_COUNT_M)
+ return -EINVAL;
+
+ cmd = &desc.params.set_port_option;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_option);
+
+ if (lport_valid)
+ cmd->lport_num = lport;
+
+ cmd->lport_num_valid = lport_valid;
+ cmd->selected_port_option = new_option;
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+}
+
/**
* ice_aq_sff_eeprom
* @hw: pointer to the HW struct
@@ -5028,6 +5303,29 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
return 0;
}
+/**
+ * ice_is_fw_api_min_ver
+ * @hw: pointer to the hardware structure
+ * @maj: major version
+ * @min: minor version
+ * @patch: patch version
+ *
+ * Checks if the firmware API is minimum version
+ */
+static bool ice_is_fw_api_min_ver(struct ice_hw *hw, u8 maj, u8 min, u8 patch)
+{
+ if (hw->api_maj_ver == maj) {
+ if (hw->api_min_ver > min)
+ return true;
+ if (hw->api_min_ver == min && hw->api_patch >= patch)
+ return true;
+ } else if (hw->api_maj_ver > maj) {
+ return true;
+ }
+
+ return false;
+}
+
/**
* ice_fw_supports_link_override
* @hw: pointer to the hardware structure
@@ -5036,17 +5334,9 @@ ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
*/
bool ice_fw_supports_link_override(struct ice_hw *hw)
{
- if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
- if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
- return true;
- if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
- hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
- return true;
- } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
- return true;
- }
-
- return false;
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_LINK_OVERRIDE_MAJ,
+ ICE_FW_API_LINK_OVERRIDE_MIN,
+ ICE_FW_API_LINK_OVERRIDE_PATCH);
}
/**
@@ -5179,16 +5469,9 @@ bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
if (hw->mac_type != ICE_MAC_E810)
return false;
- if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
- if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
- return true;
- if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
- hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
- return true;
- } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
- return true;
- }
- return false;
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_LLDP_FLTR_MAJ,
+ ICE_FW_API_LLDP_FLTR_MIN,
+ ICE_FW_API_LLDP_FLTR_PATCH);
}
/**
@@ -5225,14 +5508,7 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
*/
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
{
- if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
- if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
- return true;
- if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
- hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
- return true;
- } else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
- return true;
- }
- return false;
+ return ice_is_fw_api_min_ver(hw, ICE_FW_API_REPORT_DFLT_CFG_MAJ,
+ ICE_FW_API_REPORT_DFLT_CFG_MIN,
+ ICE_FW_API_REPORT_DFLT_CFG_PATCH);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index 61b7c60db689..8b6712b92e84 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -151,6 +151,15 @@ int
ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
struct ice_sq_cd *cd);
int
+ice_aq_get_port_options(struct ice_hw *hw,
+ struct ice_aqc_get_port_options_elem *options,
+ u8 *option_count, u8 lport, bool lport_valid,
+ u8 *active_option_idx, bool *active_option_valid,
+ u8 *pending_option_idx, bool *pending_option_valid);
+int
+ice_aq_set_port_option(struct ice_hw *hw, u8 lport, u8 lport_valid,
+ u8 new_option);
+int
ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
bool write, struct ice_sq_cd *cd);
@@ -204,6 +213,7 @@ ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
int
ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
bool *value, struct ice_sq_cd *cd);
+bool ice_is_100m_speed_supported(struct ice_hw *hw);
int
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index b41bc3dc1745..6d560d1c74a4 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -24,6 +24,11 @@
#define ICE_DEV_ID_E810C_SFP 0x1593
#define ICE_SUBDEV_ID_E810T 0x000E
#define ICE_SUBDEV_ID_E810T2 0x000F
+#define ICE_SUBDEV_ID_E810T3 0x0010
+#define ICE_SUBDEV_ID_E810T4 0x0011
+#define ICE_SUBDEV_ID_E810T5 0x0012
+#define ICE_SUBDEV_ID_E810T6 0x02E9
+#define ICE_SUBDEV_ID_E810T7 0x02EA
/* Intel(R) Ethernet Controller E810-XXV for backplane */
#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
/* Intel(R) Ethernet Controller E810-XXV for QSFP */
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 3337314a7b35..e6ec20079ced 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -9,6 +9,8 @@
#include "ice_eswitch.h"
#include "ice_fw_update.h"
+static int ice_active_port_option = -1;
+
/* context for devlink info version reporting */
struct ice_info_ctx {
char buf[128];
@@ -466,12 +468,259 @@ ice_devlink_reload_empr_finish(struct devlink *devlink,
return 0;
}
+/**
+ * ice_devlink_port_opt_speed_str - convert speed to a string
+ * @speed: speed value
+ */
+static const char *ice_devlink_port_opt_speed_str(u8 speed)
+{
+ switch (speed & ICE_AQC_PORT_OPT_MAX_LANE_M) {
+ case ICE_AQC_PORT_OPT_MAX_LANE_100M:
+ return "0.1";
+ case ICE_AQC_PORT_OPT_MAX_LANE_1G:
+ return "1";
+ case ICE_AQC_PORT_OPT_MAX_LANE_2500M:
+ return "2.5";
+ case ICE_AQC_PORT_OPT_MAX_LANE_5G:
+ return "5";
+ case ICE_AQC_PORT_OPT_MAX_LANE_10G:
+ return "10";
+ case ICE_AQC_PORT_OPT_MAX_LANE_25G:
+ return "25";
+ case ICE_AQC_PORT_OPT_MAX_LANE_50G:
+ return "50";
+ case ICE_AQC_PORT_OPT_MAX_LANE_100G:
+ return "100";
+ }
+
+ return "-";
+}
+
+#define ICE_PORT_OPT_DESC_LEN 50
+/**
+ * ice_devlink_port_options_print - Print available port split options
+ * @pf: the PF to print split port options
+ *
+ * Prints a table with available port split options and max port speeds
+ */
+static void ice_devlink_port_options_print(struct ice_pf *pf)
+{
+ u8 i, j, options_count, cnt, speed, pending_idx, active_idx;
+ struct ice_aqc_get_port_options_elem *options, *opt;
+ struct device *dev = ice_pf_to_dev(pf);
+ bool active_valid, pending_valid;
+ char desc[ICE_PORT_OPT_DESC_LEN];
+ const char *str;
+ int status;
+
+ options = kcalloc(ICE_AQC_PORT_OPT_MAX * ICE_MAX_PORT_PER_PCI_DEV,
+ sizeof(*options), GFP_KERNEL);
+ if (!options)
+ return;
+
+ for (i = 0; i < ICE_MAX_PORT_PER_PCI_DEV; i++) {
+ opt = options + i * ICE_AQC_PORT_OPT_MAX;
+ options_count = ICE_AQC_PORT_OPT_MAX;
+ active_valid = 0;
+
+ status = ice_aq_get_port_options(&pf->hw, opt, &options_count,
+ i, true, &active_idx,
+ &active_valid, &pending_idx,
+ &pending_valid);
+ if (status) {
+ dev_dbg(dev, "Couldn't read port option for port %d, err %d\n",
+ i, status);
+ goto err;
+ }
+ }
+
+ dev_dbg(dev, "Available port split options and max port speeds (Gbps):\n");
+ dev_dbg(dev, "Status Split Quad 0 Quad 1\n");
+ dev_dbg(dev, " count L0 L1 L2 L3 L4 L5 L6 L7\n");
+
+ for (i = 0; i < options_count; i++) {
+ cnt = 0;
+
+ if (i == ice_active_port_option)
+ str = "Active";
+ else if ((i == pending_idx) && pending_valid)
+ str = "Pending";
+ else
+ str = "";
+
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%-8s", str);
+
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%-6u", options[i].pmd);
+
+ for (j = 0; j < ICE_MAX_PORT_PER_PCI_DEV; ++j) {
+ speed = options[i + j * ICE_AQC_PORT_OPT_MAX].max_lane_speed;
+ str = ice_devlink_port_opt_speed_str(speed);
+ cnt += snprintf(&desc[cnt], ICE_PORT_OPT_DESC_LEN - cnt,
+ "%3s ", str);
+ }
+
+ dev_dbg(dev, "%s\n", desc);
+ }
+
+err:
+ kfree(options);
+}
+
+/**
+ * ice_devlink_aq_set_port_option - Send set port option admin queue command
+ * @pf: the PF to print split port options
+ * @option_idx: selected port option
+ * @extack: extended netdev ack structure
+ *
+ * Sends set port option admin queue command with selected port option and
+ * calls NVM write activate.
+ */
+static int
+ice_devlink_aq_set_port_option(struct ice_pf *pf, u8 option_idx,
+ struct netlink_ext_ack *extack)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int status;
+
+ status = ice_aq_set_port_option(&pf->hw, 0, true, option_idx);
+ if (status) {
+ dev_dbg(dev, "ice_aq_set_port_option, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Port split request failed");
+ return -EIO;
+ }
+
+ status = ice_acquire_nvm(&pf->hw, ICE_RES_WRITE);
+ if (status) {
+ dev_dbg(dev, "ice_acquire_nvm failed, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to acquire NVM semaphore");
+ return -EIO;
+ }
+
+ status = ice_nvm_write_activate(&pf->hw, ICE_AQC_NVM_ACTIV_REQ_EMPR, NULL);
+ if (status) {
+ dev_dbg(dev, "ice_nvm_write_activate failed, err %d aq_err %d\n",
+ status, pf->hw.adminq.sq_last_status);
+ NL_SET_ERR_MSG_MOD(extack, "Port split request failed to save data");
+ ice_release_nvm(&pf->hw);
+ return -EIO;
+ }
+
+ ice_release_nvm(&pf->hw);
+
+ NL_SET_ERR_MSG_MOD(extack, "Reboot required to finish port split");
+ return 0;
+}
+
+/**
+ * ice_devlink_port_split - .port_split devlink handler
+ * @devlink: devlink instance structure
+ * @port: devlink port structure
+ * @count: number of ports to split to
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_split operation.
+ *
+ * Unfortunately, the devlink expression of available options is limited
+ * to just a number, so search for an FW port option which supports
+ * the specified number. As there could be multiple FW port options with
+ * the same port split count, allow switching between them. When the same
+ * port split count request is issued again, switch to the next FW port
+ * option with the same port split count.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_split(struct devlink *devlink, struct devlink_port *port,
+ unsigned int count, struct netlink_ext_ack *extack)
+{
+ struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
+ u8 i, j, active_idx, pending_idx, new_option;
+ struct ice_pf *pf = devlink_priv(devlink);
+ u8 option_count = ICE_AQC_PORT_OPT_MAX;
+ struct device *dev = ice_pf_to_dev(pf);
+ bool active_valid, pending_valid;
+ int status;
+
+ status = ice_aq_get_port_options(&pf->hw, options, &option_count,
+ 0, true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (status) {
+ dev_dbg(dev, "Couldn't read port split options, err = %d\n",
+ status);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to get available port split options");
+ return -EIO;
+ }
+
+ new_option = ICE_AQC_PORT_OPT_MAX;
+ active_idx = pending_valid ? pending_idx : active_idx;
+ for (i = 1; i <= option_count; i++) {
+ /* In order to allow switching between FW port options with
+ * the same port split count, search for a new option starting
+ * from the active/pending option (with array wrap around).
+ */
+ j = (active_idx + i) % option_count;
+
+ if (count == options[j].pmd) {
+ new_option = j;
+ break;
+ }
+ }
+
+ if (new_option == active_idx) {
+ dev_dbg(dev, "request to split: count: %u is already set and there are no other options\n",
+ count);
+ NL_SET_ERR_MSG_MOD(extack, "Requested split count is already set");
+ ice_devlink_port_options_print(pf);
+ return -EINVAL;
+ }
+
+ if (new_option == ICE_AQC_PORT_OPT_MAX) {
+ dev_dbg(dev, "request to split: count: %u not found\n", count);
+ NL_SET_ERR_MSG_MOD(extack, "Port split requested unsupported port config");
+ ice_devlink_port_options_print(pf);
+ return -EINVAL;
+ }
+
+ status = ice_devlink_aq_set_port_option(pf, new_option, extack);
+ if (status)
+ return status;
+
+ ice_devlink_port_options_print(pf);
+
+ return 0;
+}
+
+/**
+ * ice_devlink_port_unsplit - .port_unsplit devlink handler
+ * @devlink: devlink instance structure
+ * @port: devlink port structure
+ * @extack: extended netdev ack structure
+ *
+ * Callback for the devlink .port_unsplit operation.
+ * Calls ice_devlink_port_split with split count set to 1.
+ * There could be no FW option available with split count 1.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+static int
+ice_devlink_port_unsplit(struct devlink *devlink, struct devlink_port *port,
+ struct netlink_ext_ack *extack)
+{
+ return ice_devlink_port_split(devlink, port, 1, extack);
+}
+
static const struct devlink_ops ice_devlink_ops = {
.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
.reload_actions = BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
/* The ice driver currently does not support driver reinit */
.reload_down = ice_devlink_reload_empr_start,
.reload_up = ice_devlink_reload_empr_finish,
+ .port_split = ice_devlink_port_split,
+ .port_unsplit = ice_devlink_port_unsplit,
.eswitch_mode_get = ice_eswitch_mode_get,
.eswitch_mode_set = ice_eswitch_mode_set,
.info_get = ice_devlink_info_get,
@@ -694,6 +943,39 @@ void ice_devlink_unregister_params(struct ice_pf *pf)
ARRAY_SIZE(ice_devlink_params));
}
+/**
+ * ice_devlink_set_port_split_options - Set port split options
+ * @pf: the PF to set port split options
+ * @attrs: devlink attributes
+ *
+ * Sets devlink port split options based on available FW port options
+ */
+static void
+ice_devlink_set_port_split_options(struct ice_pf *pf,
+ struct devlink_port_attrs *attrs)
+{
+ struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX];
+ u8 i, active_idx, pending_idx, option_count = ICE_AQC_PORT_OPT_MAX;
+ bool active_valid, pending_valid;
+ int status;
+
+ status = ice_aq_get_port_options(&pf->hw, options, &option_count,
+ 0, true, &active_idx, &active_valid,
+ &pending_idx, &pending_valid);
+ if (status) {
+ dev_dbg(ice_pf_to_dev(pf), "Couldn't read port split options, err = %d\n",
+ status);
+ return;
+ }
+
+ /* find the biggest available port split count */
+ for (i = 0; i < option_count; i++)
+ attrs->lanes = max_t(int, attrs->lanes, options[i].pmd);
+
+ attrs->splittable = attrs->lanes ? 1 : 0;
+ ice_active_port_option = active_idx;
+}
+
/**
* ice_devlink_create_pf_port - Create a devlink port for this PF
* @pf: the PF to create a devlink port for
@@ -722,6 +1004,12 @@ int ice_devlink_create_pf_port(struct ice_pf *pf)
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = pf->hw.bus.func;
+ /* As FW supports only port split options for whole device,
+ * set port split options only for first PF.
+ */
+ if (pf->hw.pf_id == 0)
+ ice_devlink_set_port_split_options(pf, &attrs);
+
ice_devlink_set_switch_id(pf, &attrs.switch_id);
devlink_port_attrs_set(devlink_port, &attrs);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
index e35371e61e07..f9f15acae90a 100644
--- a/drivers/net/ethernet/intel/ice/ice_eswitch.c
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -292,8 +292,8 @@ static int ice_eswitch_setup_reprs(struct ice_pf *pf)
if (max_vsi_num < vsi->vsi_num)
max_vsi_num = vsi->vsi_num;
- netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, ice_napi_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi,
+ ice_napi_poll);
netif_keep_dst(vf->repr->netdev);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index a6fff8ebaf9d..b7be84bbe72d 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -136,6 +136,11 @@ static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("mac_remote_faults.nic", stats.mac_remote_faults),
ICE_PF_STAT("fdir_sb_match.nic", stats.fd_sb_match),
ICE_PF_STAT("fdir_sb_status.nic", stats.fd_sb_status),
+ ICE_PF_STAT("tx_hwtstamp_skipped", ptp.tx_hwtstamp_skipped),
+ ICE_PF_STAT("tx_hwtstamp_timeouts", ptp.tx_hwtstamp_timeouts),
+ ICE_PF_STAT("tx_hwtstamp_flushed", ptp.tx_hwtstamp_flushed),
+ ICE_PF_STAT("tx_hwtstamp_discarded", ptp.tx_hwtstamp_discarded),
+ ICE_PF_STAT("late_cached_phc_updates", ptp.late_cached_phc_updates),
};
static const u32 ice_regs_dump_list[] = {
@@ -1284,10 +1289,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
}
if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
/* down and up VSI so that changes of Rx cfg are reflected. */
- if (!test_and_set_bit(ICE_VSI_DOWN, vsi->state)) {
- ice_down(vsi);
- ice_up(vsi);
- }
+ ice_down_up(vsi);
}
/* don't allow modification of this flag when a single VF is in
* promiscuous mode because it's not supported
@@ -1468,20 +1470,22 @@ ice_get_ethtool_stats(struct net_device *netdev,
/**
* ice_mask_min_supported_speeds
+ * @hw: pointer to the HW structure
* @phy_types_high: PHY type high
* @phy_types_low: PHY type low to apply minimum supported speeds mask
*
* Apply minimum supported speeds mask to PHY type low. These are the speeds
* for ethtool supported link mode.
*/
-static
-void ice_mask_min_supported_speeds(u64 phy_types_high, u64 *phy_types_low)
+static void
+ice_mask_min_supported_speeds(struct ice_hw *hw,
+ u64 phy_types_high, u64 *phy_types_low)
{
/* if QSFP connection with 100G speed, minimum supported speed is 25G */
if (*phy_types_low & ICE_PHY_TYPE_LOW_MASK_100G ||
phy_types_high & ICE_PHY_TYPE_HIGH_MASK_100G)
*phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_25G;
- else
+ else if (!ice_is_100m_speed_supported(hw))
*phy_types_low &= ~ICE_PHY_TYPE_LOW_MASK_MIN_1G;
}
@@ -1531,7 +1535,8 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_types_low = le64_to_cpu(pf->nvm_phy_type_lo);
phy_types_high = le64_to_cpu(pf->nvm_phy_type_hi);
- ice_mask_min_supported_speeds(phy_types_high, &phy_types_low);
+ ice_mask_min_supported_speeds(&pf->hw, phy_types_high,
+ &phy_types_low);
/* determine advertised modes based on link override only
* if it's supported and if the FW doesn't abstract the
* driver from having to account for link overrides
@@ -2826,6 +2831,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
tx_rings[i].count = new_tx_cnt;
tx_rings[i].desc = NULL;
tx_rings[i].tx_buf = NULL;
+ tx_rings[i].tx_tstamps = &pf->ptp.port.tx;
err = ice_setup_tx_ring(&tx_rings[i]);
if (err) {
while (i--)
@@ -2884,6 +2890,7 @@ process_rx:
/* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_cnt;
+ rx_rings[i].cached_phctime = pf->ptp.cached_phc_time;
rx_rings[i].desc = NULL;
rx_rings[i].rx_buf = NULL;
/* this is to allow wr32 to have something to write to
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index c9f7393b783d..ee5b36941ba3 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -61,13 +61,13 @@ static void ice_lag_set_backup(struct ice_lag *lag)
*/
static void ice_display_lag_info(struct ice_lag *lag)
{
- const char *name, *peer, *upper, *role, *bonded, *master;
+ const char *name, *peer, *upper, *role, *bonded, *primary;
struct device *dev = &lag->pf->pdev->dev;
name = lag->netdev ? netdev_name(lag->netdev) : "unset";
peer = lag->peer_netdev ? netdev_name(lag->peer_netdev) : "unset";
upper = lag->upper_netdev ? netdev_name(lag->upper_netdev) : "unset";
- master = lag->master ? "TRUE" : "FALSE";
+ primary = lag->primary ? "TRUE" : "FALSE";
bonded = lag->bonded ? "BONDED" : "UNBONDED";
switch (lag->role) {
@@ -87,8 +87,8 @@ static void ice_display_lag_info(struct ice_lag *lag)
role = "ERROR";
}
- dev_dbg(dev, "%s %s, peer:%s, upper:%s, role:%s, master:%s\n", name,
- bonded, peer, upper, role, master);
+ dev_dbg(dev, "%s %s, peer:%s, upper:%s, role:%s, primary:%s\n", name,
+ bonded, peer, upper, role, primary);
}
/**
@@ -119,7 +119,7 @@ static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
}
if (strcmp(bonding_info->slave.slave_name, lag_netdev_name)) {
- netdev_dbg(lag->netdev, "Bonding event recv, but slave info not for us\n");
+ netdev_dbg(lag->netdev, "Bonding event recv, but secondary info not for us\n");
goto lag_out;
}
@@ -164,8 +164,8 @@ ice_lag_link(struct ice_lag *lag, struct netdev_notifier_changeupper_info *info)
lag->bonded = true;
lag->role = ICE_LAG_UNSET;
- /* if this is the first element in an LAG mark as master */
- lag->master = !!(peers == 1);
+ /* if this is the first element in an LAG mark as primary */
+ lag->primary = !!(peers == 1);
}
/**
@@ -264,7 +264,7 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
netdev_dbg(netdev, "bonding %s\n", info->linking ? "LINK" : "UNLINK");
if (!netif_is_lag_master(info->upper_dev)) {
- netdev_dbg(netdev, "changeupper rcvd, but not master. bail\n");
+ netdev_dbg(netdev, "changeupper rcvd, but not primary. bail\n");
return;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.h b/drivers/net/ethernet/intel/ice/ice_lag.h
index c2e3688dd8fd..51b5cf467ce2 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.h
+++ b/drivers/net/ethernet/intel/ice/ice_lag.h
@@ -24,7 +24,7 @@ struct ice_lag {
struct net_device *upper_netdev; /* upper bonding netdev */
struct notifier_block notif_block;
u8 bonded:1; /* currently bonded */
- u8 master:1; /* this is a master */
+ u8 primary:1; /* this is primary */
u8 handler:1; /* did we register a rx_netdev_handler */
/* each thing blocking bonding will increment this value by one.
* If this value is zero, then bonding is allowed.
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 58d483e2f539..938ba8c215cb 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -1524,6 +1524,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->netdev = vsi->netdev;
ring->dev = dev;
ring->count = vsi->num_rx_desc;
+ ring->cached_phctime = pf->ptp.cached_phc_time;
WRITE_ONCE(vsi->rx_rings[i], ring);
}
@@ -1563,6 +1564,22 @@ void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
kfree(lut);
}
+/**
+ * ice_vsi_cfg_crc_strip - Configure CRC stripping for a VSI
+ * @vsi: VSI to be configured
+ * @disable: set to true to have FCS / CRC in the frame data
+ */
+void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable)
+{
+ int i;
+
+ ice_for_each_rxq(vsi, i)
+ if (disable)
+ vsi->rx_rings[i]->flags |= ICE_RX_FLAGS_CRC_STRIP_DIS;
+ else
+ vsi->rx_rings[i]->flags &= ~ICE_RX_FLAGS_CRC_STRIP_DIS;
+}
+
/**
* ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
* @vsi: VSI to be configured
@@ -2971,9 +2988,6 @@ int ice_vsi_release(struct ice_vsi *vsi)
clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
}
- if (vsi->type == ICE_VSI_PF)
- ice_devlink_destroy_pf_port(pf);
-
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_rss_clean(vsi);
@@ -3031,6 +3045,9 @@ int ice_vsi_release(struct ice_vsi *vsi)
}
}
+ if (vsi->type == ICE_VSI_PF)
+ ice_devlink_destroy_pf_port(pf);
+
if (vsi->type == ICE_VSI_VF &&
vsi->agg_node && vsi->agg_node->valid)
vsi->agg_node->num_vsis--;
@@ -3278,6 +3295,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
*/
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_vsi_cfg_rss_lut_key(vsi);
+
+ /* disable or enable CRC stripping */
+ if (vsi->netdev)
+ ice_vsi_cfg_crc_strip(vsi, !!(vsi->netdev->features &
+ NETIF_F_RXFCS));
+
break;
case ICE_VSI_VF:
ret = ice_vsi_alloc_q_vectors(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 8712b1d2ceec..ec4bf0c89857 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -89,6 +89,8 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
+void ice_vsi_cfg_crc_strip(struct ice_vsi *vsi, bool disable);
+
void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes);
void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index e109cb93886b..0f6718719453 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -3093,7 +3093,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
if (oicr & PFINT_OICR_TSYN_TX_M) {
ena_mask &= ~PFINT_OICR_TSYN_TX_M;
- ice_ptp_process_ts(pf);
+ if (!hw->reset_ongoing)
+ ret = IRQ_WAKE_THREAD;
}
if (oicr & PFINT_OICR_TSYN_EVNT_M) {
@@ -3128,7 +3129,8 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
ice_service_task_schedule(pf);
}
}
- ret = IRQ_HANDLED;
+ if (!ret)
+ ret = IRQ_HANDLED;
ice_service_task_schedule(pf);
ice_irq_dynamic_ena(hw, NULL, NULL);
@@ -3136,6 +3138,24 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data)
return ret;
}
+/**
+ * ice_misc_intr_thread_fn - misc interrupt thread function
+ * @irq: interrupt number
+ * @data: pointer to a q_vector
+ */
+static irqreturn_t ice_misc_intr_thread_fn(int __always_unused irq, void *data)
+{
+ irqreturn_t ret = IRQ_HANDLED;
+ struct ice_pf *pf = data;
+ bool irq_handled;
+
+ irq_handled = ice_ptp_process_ts(pf);
+ if (!irq_handled)
+ ret = IRQ_WAKE_THREAD;
+
+ return ret;
+}
+
/**
* ice_dis_ctrlq_interrupts - disable control queue interrupts
* @hw: pointer to HW structure
@@ -3248,10 +3268,12 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
pf->num_avail_sw_msix -= 1;
pf->oicr_idx = (u16)oicr_idx;
- err = devm_request_irq(dev, pf->msix_entries[pf->oicr_idx].vector,
- ice_misc_intr, 0, pf->int_name, pf);
+ err = devm_request_threaded_irq(dev,
+ pf->msix_entries[pf->oicr_idx].vector,
+ ice_misc_intr, ice_misc_intr_thread_fn,
+ 0, pf->int_name, pf);
if (err) {
- dev_err(dev, "devm_request_irq for %s failed: %d\n",
+ dev_err(dev, "devm_request_threaded_irq for %s failed: %d\n",
pf->int_name, err);
ice_free_res(pf->irq_tracker, 1, ICE_RES_MISC_VEC_ID);
pf->num_avail_sw_msix += 1;
@@ -3288,7 +3310,7 @@ static void ice_napi_add(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, v_idx)
netif_napi_add(vsi->netdev, &vsi->q_vectors[v_idx]->napi,
- ice_napi_poll, NAPI_POLL_WEIGHT);
+ ice_napi_poll);
}
/**
@@ -3391,6 +3413,11 @@ static void ice_set_netdev_features(struct net_device *netdev)
if (is_dvm_ena)
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX |
NETIF_F_HW_VLAN_STAG_TX;
+
+ /* Leave CRC / FCS stripping enabled by default, but allow the value to
+ * be changed at runtime
+ */
+ netdev->hw_features |= NETIF_F_RXFCS;
}
/**
@@ -3922,88 +3949,135 @@ static int ice_init_pf(struct ice_pf *pf)
return 0;
}
+/**
+ * ice_reduce_msix_usage - Reduce usage of MSI-X vectors
+ * @pf: board private structure
+ * @v_remain: number of remaining MSI-X vectors to be distributed
+ *
+ * Reduce the usage of MSI-X vectors when entire request cannot be fulfilled.
+ * pf->num_lan_msix and pf->num_rdma_msix values are set based on number of
+ * remaining vectors.
+ */
+static void ice_reduce_msix_usage(struct ice_pf *pf, int v_remain)
+{
+ int v_rdma;
+
+ if (!ice_is_rdma_ena(pf)) {
+ pf->num_lan_msix = v_remain;
+ return;
+ }
+
+ /* RDMA needs at least 1 interrupt in addition to AEQ MSIX */
+ v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
+
+ if (v_remain < ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_RDMA_MSIX) {
+ dev_warn(ice_pf_to_dev(pf), "Not enough MSI-X vectors to support RDMA.\n");
+ clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
+
+ pf->num_rdma_msix = 0;
+ pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
+ } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
+ (v_remain - v_rdma < v_rdma)) {
+ /* Support minimum RDMA and give remaining vectors to LAN MSIX */
+ pf->num_rdma_msix = ICE_MIN_RDMA_MSIX;
+ pf->num_lan_msix = v_remain - ICE_MIN_RDMA_MSIX;
+ } else {
+ /* Split remaining MSIX with RDMA after accounting for AEQ MSIX
+ */
+ pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
+ ICE_RDMA_NUM_AEQ_MSIX;
+ pf->num_lan_msix = v_remain - pf->num_rdma_msix;
+ }
+}
+
/**
* ice_ena_msix_range - Request a range of MSIX vectors from the OS
* @pf: board private structure
*
- * compute the number of MSIX vectors required (v_budget) and request from
- * the OS. Return the number of vectors reserved or negative on failure
+ * Compute the number of MSIX vectors wanted and request from the OS. Adjust
+ * device usage if there are not enough vectors. Return the number of vectors
+ * reserved or negative on failure.
*/
static int ice_ena_msix_range(struct ice_pf *pf)
{
- int num_cpus, v_left, v_actual, v_other, v_budget = 0;
+ int num_cpus, hw_num_msix, v_other, v_wanted, v_actual;
struct device *dev = ice_pf_to_dev(pf);
- int needed, err, i;
+ int err, i;
- v_left = pf->hw.func_caps.common_cap.num_msix_vectors;
+ hw_num_msix = pf->hw.func_caps.common_cap.num_msix_vectors;
num_cpus = num_online_cpus();
- /* reserve for LAN miscellaneous handler */
- needed = ICE_MIN_LAN_OICR_MSIX;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- v_budget += needed;
- v_left -= needed;
+ /* LAN miscellaneous handler */
+ v_other = ICE_MIN_LAN_OICR_MSIX;
- /* reserve for flow director */
- if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
- needed = ICE_FDIR_MSIX;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- v_budget += needed;
- v_left -= needed;
- }
+ /* Flow Director */
+ if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
+ v_other += ICE_FDIR_MSIX;
- /* reserve for switchdev */
- needed = ICE_ESWITCH_MSIX;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- v_budget += needed;
- v_left -= needed;
+ /* switchdev */
+ v_other += ICE_ESWITCH_MSIX;
- /* total used for non-traffic vectors */
- v_other = v_budget;
+ v_wanted = v_other;
- /* reserve vectors for LAN traffic */
- needed = num_cpus;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- pf->num_lan_msix = needed;
- v_budget += needed;
- v_left -= needed;
+ /* LAN traffic */
+ pf->num_lan_msix = num_cpus;
+ v_wanted += pf->num_lan_msix;
- /* reserve vectors for RDMA auxiliary driver */
+ /* RDMA auxiliary driver */
if (ice_is_rdma_ena(pf)) {
- needed = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
- if (v_left < needed)
- goto no_hw_vecs_left_err;
- pf->num_rdma_msix = needed;
- v_budget += needed;
- v_left -= needed;
+ pf->num_rdma_msix = num_cpus + ICE_RDMA_NUM_AEQ_MSIX;
+ v_wanted += pf->num_rdma_msix;
}
- pf->msix_entries = devm_kcalloc(dev, v_budget,
+ if (v_wanted > hw_num_msix) {
+ int v_remain;
+
+ dev_warn(dev, "not enough device MSI-X vectors. wanted = %d, available = %d\n",
+ v_wanted, hw_num_msix);
+
+ if (hw_num_msix < ICE_MIN_MSIX) {
+ err = -ERANGE;
+ goto exit_err;
+ }
+
+ v_remain = hw_num_msix - v_other;
+ if (v_remain < ICE_MIN_LAN_TXRX_MSIX) {
+ v_other = ICE_MIN_MSIX - ICE_MIN_LAN_TXRX_MSIX;
+ v_remain = ICE_MIN_LAN_TXRX_MSIX;
+ }
+
+ ice_reduce_msix_usage(pf, v_remain);
+ v_wanted = pf->num_lan_msix + pf->num_rdma_msix + v_other;
+
+ dev_notice(dev, "Reducing request to %d MSI-X vectors for LAN traffic.\n",
+ pf->num_lan_msix);
+ if (ice_is_rdma_ena(pf))
+ dev_notice(dev, "Reducing request to %d MSI-X vectors for RDMA.\n",
+ pf->num_rdma_msix);
+ }
+
+ pf->msix_entries = devm_kcalloc(dev, v_wanted,
sizeof(*pf->msix_entries), GFP_KERNEL);
if (!pf->msix_entries) {
err = -ENOMEM;
goto exit_err;
}
- for (i = 0; i < v_budget; i++)
+ for (i = 0; i < v_wanted; i++)
pf->msix_entries[i].entry = i;
/* actually reserve the vectors */
v_actual = pci_enable_msix_range(pf->pdev, pf->msix_entries,
- ICE_MIN_MSIX, v_budget);
+ ICE_MIN_MSIX, v_wanted);
if (v_actual < 0) {
dev_err(dev, "unable to reserve MSI-X vectors\n");
err = v_actual;
goto msix_err;
}
- if (v_actual < v_budget) {
+ if (v_actual < v_wanted) {
dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
- v_budget, v_actual);
+ v_wanted, v_actual);
if (v_actual < ICE_MIN_MSIX) {
/* error if we can't get minimum vectors */
@@ -4012,38 +4086,11 @@ static int ice_ena_msix_range(struct ice_pf *pf)
goto msix_err;
} else {
int v_remain = v_actual - v_other;
- int v_rdma = 0, v_min_rdma = 0;
- if (ice_is_rdma_ena(pf)) {
- /* Need at least 1 interrupt in addition to
- * AEQ MSIX
- */
- v_rdma = ICE_RDMA_NUM_AEQ_MSIX + 1;
- v_min_rdma = ICE_MIN_RDMA_MSIX;
- }
+ if (v_remain < ICE_MIN_LAN_TXRX_MSIX)
+ v_remain = ICE_MIN_LAN_TXRX_MSIX;
- if (v_actual == ICE_MIN_MSIX ||
- v_remain < ICE_MIN_LAN_TXRX_MSIX + v_min_rdma) {
- dev_warn(dev, "Not enough MSI-X vectors to support RDMA.\n");
- clear_bit(ICE_FLAG_RDMA_ENA, pf->flags);
-
- pf->num_rdma_msix = 0;
- pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX;
- } else if ((v_remain < ICE_MIN_LAN_TXRX_MSIX + v_rdma) ||
- (v_remain - v_rdma < v_rdma)) {
- /* Support minimum RDMA and give remaining
- * vectors to LAN MSIX
- */
- pf->num_rdma_msix = v_min_rdma;
- pf->num_lan_msix = v_remain - v_min_rdma;
- } else {
- /* Split remaining MSIX with RDMA after
- * accounting for AEQ MSIX
- */
- pf->num_rdma_msix = (v_remain - ICE_RDMA_NUM_AEQ_MSIX) / 2 +
- ICE_RDMA_NUM_AEQ_MSIX;
- pf->num_lan_msix = v_remain - pf->num_rdma_msix;
- }
+ ice_reduce_msix_usage(pf, v_remain);
dev_notice(dev, "Enabled %d MSI-X vectors for LAN traffic.\n",
pf->num_lan_msix);
@@ -4058,12 +4105,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
msix_err:
devm_kfree(dev, pf->msix_entries);
- goto exit_err;
-no_hw_vecs_left_err:
- dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
- needed, v_left);
- err = -ERANGE;
exit_err:
pf->num_rdma_msix = 0;
pf->num_lan_msix = 0;
@@ -4557,6 +4599,10 @@ static int ice_register_netdev(struct ice_pf *pf)
if (!vsi || !vsi->netdev)
return -EIO;
+ err = ice_devlink_create_pf_port(pf);
+ if (err)
+ goto err_devlink_create;
+
err = register_netdev(vsi->netdev);
if (err)
goto err_register_netdev;
@@ -4564,17 +4610,13 @@ static int ice_register_netdev(struct ice_pf *pf)
set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
netif_carrier_off(vsi->netdev);
netif_tx_stop_all_queues(vsi->netdev);
- err = ice_devlink_create_pf_port(pf);
- if (err)
- goto err_devlink_create;
devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
return 0;
-err_devlink_create:
- unregister_netdev(vsi->netdev);
- clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
err_register_netdev:
+ ice_devlink_destroy_pf_port(pf);
+err_devlink_create:
free_netdev(vsi->netdev);
vsi->netdev = NULL;
clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
@@ -4682,8 +4724,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_set_safe_mode_caps(hw);
}
- hw->ucast_shared = true;
-
err = ice_init_pf(pf);
if (err) {
dev_err(dev, "ice_init_pf failed: %d\n", err);
@@ -5742,6 +5782,9 @@ ice_fdb_del(struct ndmsg *ndm, __always_unused struct nlattr *tb[],
NETIF_F_HW_VLAN_STAG_RX | \
NETIF_F_HW_VLAN_STAG_TX)
+#define NETIF_VLAN_STRIPPING_FEATURES (NETIF_F_HW_VLAN_CTAG_RX | \
+ NETIF_F_HW_VLAN_STAG_RX)
+
#define NETIF_VLAN_FILTERING_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
NETIF_F_HW_VLAN_STAG_FILTER)
@@ -5828,6 +5871,14 @@ ice_fix_features(struct net_device *netdev, netdev_features_t features)
NETIF_F_HW_VLAN_STAG_TX);
}
+ if (!(netdev->features & NETIF_F_RXFCS) &&
+ (features & NETIF_F_RXFCS) &&
+ (features & NETIF_VLAN_STRIPPING_FEATURES) &&
+ !ice_vsi_has_non_zero_vlans(np->vsi)) {
+ netdev_warn(netdev, "Disabling VLAN stripping as FCS/CRC stripping is also disabled and there is no VLAN configured\n");
+ features &= ~NETIF_VLAN_STRIPPING_FEATURES;
+ }
+
return features;
}
@@ -5921,6 +5972,13 @@ ice_set_vlan_features(struct net_device *netdev, netdev_features_t features)
current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES;
requested_vlan_features = features & NETIF_VLAN_OFFLOAD_FEATURES;
if (current_vlan_features ^ requested_vlan_features) {
+ if ((features & NETIF_F_RXFCS) &&
+ (features & NETIF_VLAN_STRIPPING_FEATURES)) {
+ dev_err(ice_pf_to_dev(vsi->back),
+ "To enable VLAN stripping, you must first enable FCS/CRC stripping\n");
+ return -EIO;
+ }
+
err = ice_set_vlan_offload_features(vsi, features);
if (err)
return err;
@@ -6002,6 +6060,23 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
if (ret)
return ret;
+ /* Turn on receive of FCS aka CRC, and after setting this
+ * flag the packet data will have the 4 byte CRC appended
+ */
+ if (changed & NETIF_F_RXFCS) {
+ if ((features & NETIF_F_RXFCS) &&
+ (features & NETIF_VLAN_STRIPPING_FEATURES)) {
+ dev_err(ice_pf_to_dev(vsi->back),
+ "To disable FCS/CRC stripping, you must first disable VLAN stripping\n");
+ return -EIO;
+ }
+
+ ice_vsi_cfg_crc_strip(vsi, !!(features & NETIF_F_RXFCS));
+ ret = ice_down_up(vsi);
+ if (ret)
+ return ret;
+ }
+
if (changed & NETIF_F_NTUPLE) {
bool ena = !!(features & NETIF_F_NTUPLE);
@@ -6698,6 +6773,31 @@ int ice_down(struct ice_vsi *vsi)
return 0;
}
+/**
+ * ice_down_up - shutdown the VSI connection and bring it up
+ * @vsi: the VSI to be reconnected
+ */
+int ice_down_up(struct ice_vsi *vsi)
+{
+ int ret;
+
+ /* if DOWN already set, nothing to do */
+ if (test_and_set_bit(ICE_VSI_DOWN, vsi->state))
+ return 0;
+
+ ret = ice_down(vsi);
+ if (ret)
+ return ret;
+
+ ret = ice_up(vsi);
+ if (ret) {
+ netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to reload driver\n");
+ return ret;
+ }
+
+ return 0;
+}
+
/**
* ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
* @vsi: VSI having resources allocated
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.c b/drivers/net/ethernet/intel/ice/ice_nvm.c
index 13cdb5ea594d..c262dc886e6a 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.c
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.c
@@ -1114,14 +1114,18 @@ int ice_nvm_validate_checksum(struct ice_hw *hw)
* Update the control word with the required banks' validity bits
* and dumps the Shadow RAM to flash (0x0707)
*
- * cmd_flags controls which banks to activate, and the preservation level to
- * use when activating the NVM bank.
+ * cmd_flags controls which banks to activate, the preservation level to use
+ * when activating the NVM bank, and whether an EMP reset is required for
+ * activation.
+ *
+ * Note that the 16bit cmd_flags value is split between two separate 1 byte
+ * flag values in the descriptor.
*
* On successful return of the firmware command, the response_flags variable
* is updated with the flags reported by firmware indicating certain status,
* such as whether EMP reset is enabled.
*/
-int ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags, u8 *response_flags)
+int ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags)
{
struct ice_aqc_nvm *cmd;
struct ice_aq_desc desc;
@@ -1130,7 +1134,8 @@ int ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags, u8 *response_flags)
cmd = &desc.params.nvm;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_nvm_write_activate);
- cmd->cmd_flags = cmd_flags;
+ cmd->cmd_flags = (u8)(cmd_flags & 0xFF);
+ cmd->offset_high = (u8)((cmd_flags >> 8) & 0xFF);
err = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
if (!err && response_flags)
diff --git a/drivers/net/ethernet/intel/ice/ice_nvm.h b/drivers/net/ethernet/intel/ice/ice_nvm.h
index 856d1ad4398b..774c2317967d 100644
--- a/drivers/net/ethernet/intel/ice/ice_nvm.h
+++ b/drivers/net/ethernet/intel/ice/ice_nvm.h
@@ -34,7 +34,7 @@ ice_aq_update_nvm(struct ice_hw *hw, u16 module_typeid, u32 offset,
int
ice_aq_erase_nvm(struct ice_hw *hw, u16 module_typeid, struct ice_sq_cd *cd);
int ice_nvm_validate_checksum(struct ice_hw *hw);
-int ice_nvm_write_activate(struct ice_hw *hw, u8 cmd_flags, u8 *response_flags);
+int ice_nvm_write_activate(struct ice_hw *hw, u16 cmd_flags, u8 *response_flags);
int ice_aq_nvm_update_empr(struct ice_hw *hw);
int
ice_nvm_set_pkg_data(struct ice_hw *hw, bool del_pkg_data_flag, u8 *data,
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 560efc7654c7..02a4e1cf624e 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -44,6 +44,7 @@ enum ice_protocol_type {
ICE_GTP,
ICE_GTP_NO_PAY,
ICE_PPPOE,
+ ICE_L2TPV3,
ICE_VLAN_EX,
ICE_VLAN_IN,
ICE_VXLAN_GPE,
@@ -111,6 +112,7 @@ enum ice_prot_id {
#define ICE_UDP_ILOS_HW 53
#define ICE_GRE_OF_HW 64
#define ICE_PPPOE_HW 103
+#define ICE_L2TPV3_HW 104
#define ICE_UDP_OF_HW 52 /* UDP Tunnels */
#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel and VLAN type */
@@ -217,6 +219,11 @@ struct ice_pppoe_hdr {
__be16 ppp_prot_id; /* control and data only */
};
+struct ice_l2tpv3_sess_hdr {
+ __be32 session_id;
+ __be64 cookie;
+};
+
struct ice_nvgre_hdr {
__be16 flags;
__be16 protocol;
@@ -235,6 +242,7 @@ union ice_prot_hdr {
struct ice_nvgre_hdr nvgre_hdr;
struct ice_udp_gtp_hdr gtp_hdr;
struct ice_pppoe_hdr pppoe_hdr;
+ struct ice_l2tpv3_sess_hdr l2tpv3_sess_hdr;
};
/* This is mapping table entry that maps every word within a given protocol
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 72b663108a4a..011b727ab190 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -490,56 +490,6 @@ ice_ptp_read_src_clk_reg(struct ice_pf *pf, struct ptp_system_timestamp *sts)
return ((u64)hi << 32) | lo;
}
-/**
- * ice_ptp_update_cached_phctime - Update the cached PHC time values
- * @pf: Board specific private structure
- *
- * This function updates the system time values which are cached in the PF
- * structure and the Rx rings.
- *
- * This function must be called periodically to ensure that the cached value
- * is never more than 2 seconds old. It must also be called whenever the PHC
- * time has been changed.
- *
- * Return:
- * * 0 - OK, successfully updated
- * * -EAGAIN - PF was busy, need to reschedule the update
- */
-static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
-{
- u64 systime;
- int i;
-
- if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
- return -EAGAIN;
-
- /* Read the current PHC time */
- systime = ice_ptp_read_src_clk_reg(pf, NULL);
-
- /* Update the cached PHC time stored in the PF structure */
- WRITE_ONCE(pf->ptp.cached_phc_time, systime);
-
- ice_for_each_vsi(pf, i) {
- struct ice_vsi *vsi = pf->vsi[i];
- int j;
-
- if (!vsi)
- continue;
-
- if (vsi->type != ICE_VSI_PF)
- continue;
-
- ice_for_each_rxq(vsi, j) {
- if (!vsi->rx_rings[j])
- continue;
- WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
- }
- }
- clear_bit(ICE_CFG_BUSY, pf->state);
-
- return 0;
-}
-
/**
* ice_ptp_extend_32b_ts - Convert a 32b nanoseconds timestamp to 64b
* @cached_phc_time: recently cached copy of PHC time
@@ -636,11 +586,399 @@ static u64 ice_ptp_extend_32b_ts(u64 cached_phc_time, u32 in_tstamp)
static u64 ice_ptp_extend_40b_ts(struct ice_pf *pf, u64 in_tstamp)
{
const u64 mask = GENMASK_ULL(31, 0);
+ unsigned long discard_time;
+
+ /* Discard the hardware timestamp if the cached PHC time is too old */
+ discard_time = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
+ if (time_is_before_jiffies(discard_time)) {
+ pf->ptp.tx_hwtstamp_discarded++;
+ return 0;
+ }
return ice_ptp_extend_32b_ts(pf->ptp.cached_phc_time,
(in_tstamp >> 8) & mask);
}
+/**
+ * ice_ptp_tx_tstamp - Process Tx timestamps for a port
+ * @tx: the PTP Tx timestamp tracker
+ *
+ * Process timestamps captured by the PHY associated with this port. To do
+ * this, loop over each index with a waiting skb.
+ *
+ * If a given index has a valid timestamp, perform the following steps:
+ *
+ * 1) copy the timestamp out of the PHY register
+ * 4) clear the timestamp valid bit in the PHY register
+ * 5) unlock the index by clearing the associated in_use bit.
+ * 2) extend the 40b timestamp value to get a 64bit timestamp
+ * 3) send that timestamp to the stack
+ *
+ * After looping, if we still have waiting SKBs, return true. This may cause us
+ * effectively poll even when not strictly necessary. We do this because it's
+ * possible a new timestamp was requested around the same time as the interrupt.
+ * In some cases hardware might not interrupt us again when the timestamp is
+ * captured.
+ *
+ * Note that we only take the tracking lock when clearing the bit and when
+ * checking if we need to re-queue this task. The only place where bits can be
+ * set is the hard xmit routine where an SKB has a request flag set. The only
+ * places where we clear bits are this work function, or the periodic cleanup
+ * thread. If the cleanup thread clears a bit we're processing we catch it
+ * when we lock to clear the bit and then grab the SKB pointer. If a Tx thread
+ * starts a new timestamp, we might not begin processing it right away but we
+ * will notice it at the end when we re-queue the task. If a Tx thread starts
+ * a new timestamp just after this function exits without re-queuing,
+ * the interrupt when the timestamp finishes should trigger. Avoiding holding
+ * the lock for the entire function is important in order to ensure that Tx
+ * threads do not get blocked while waiting for the lock.
+ */
+static bool ice_ptp_tx_tstamp(struct ice_ptp_tx *tx)
+{
+ struct ice_ptp_port *ptp_port;
+ bool ts_handled = true;
+ struct ice_pf *pf;
+ u8 idx;
+
+ if (!tx->init)
+ return false;
+
+ ptp_port = container_of(tx, struct ice_ptp_port, tx);
+ pf = ptp_port_to_pf(ptp_port);
+
+ for_each_set_bit(idx, tx->in_use, tx->len) {
+ struct skb_shared_hwtstamps shhwtstamps = {};
+ u8 phy_idx = idx + tx->quad_offset;
+ u64 raw_tstamp, tstamp;
+ struct sk_buff *skb;
+ int err;
+
+ ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
+
+ err = ice_read_phy_tstamp(&pf->hw, tx->quad, phy_idx,
+ &raw_tstamp);
+ if (err)
+ continue;
+
+ ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
+
+ /* Check if the timestamp is invalid or stale */
+ if (!(raw_tstamp & ICE_PTP_TS_VALID) ||
+ raw_tstamp == tx->tstamps[idx].cached_tstamp)
+ continue;
+
+ /* The timestamp is valid, so we'll go ahead and clear this
+ * index and then send the timestamp up to the stack.
+ */
+ spin_lock(&tx->lock);
+ tx->tstamps[idx].cached_tstamp = raw_tstamp;
+ clear_bit(idx, tx->in_use);
+ skb = tx->tstamps[idx].skb;
+ tx->tstamps[idx].skb = NULL;
+ spin_unlock(&tx->lock);
+
+ /* it's (unlikely but) possible we raced with the cleanup
+ * thread for discarding old timestamp requests.
+ */
+ if (!skb)
+ continue;
+
+ /* Extend the timestamp using cached PHC time */
+ tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
+ if (tstamp) {
+ shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
+ ice_trace(tx_tstamp_complete, skb, idx);
+ }
+
+ skb_tstamp_tx(skb, &shhwtstamps);
+ dev_kfree_skb_any(skb);
+ }
+
+ /* Check if we still have work to do. If so, re-queue this task to
+ * poll for remaining timestamps.
+ */
+ spin_lock(&tx->lock);
+ if (!bitmap_empty(tx->in_use, tx->len))
+ ts_handled = false;
+ spin_unlock(&tx->lock);
+
+ return ts_handled;
+}
+
+/**
+ * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
+ * @tx: Tx tracking structure to initialize
+ *
+ * Assumes that the length has already been initialized. Do not call directly,
+ * use the ice_ptp_init_tx_e822 or ice_ptp_init_tx_e810 instead.
+ */
+static int
+ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
+{
+ tx->tstamps = kcalloc(tx->len, sizeof(*tx->tstamps), GFP_KERNEL);
+ if (!tx->tstamps)
+ return -ENOMEM;
+
+ tx->in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
+ if (!tx->in_use) {
+ kfree(tx->tstamps);
+ tx->tstamps = NULL;
+ return -ENOMEM;
+ }
+
+ spin_lock_init(&tx->lock);
+
+ tx->init = 1;
+
+ return 0;
+}
+
+/**
+ * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
+ * @pf: Board private structure
+ * @tx: the tracker to flush
+ */
+static void
+ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ u8 idx;
+
+ for (idx = 0; idx < tx->len; idx++) {
+ u8 phy_idx = idx + tx->quad_offset;
+
+ spin_lock(&tx->lock);
+ if (tx->tstamps[idx].skb) {
+ dev_kfree_skb_any(tx->tstamps[idx].skb);
+ tx->tstamps[idx].skb = NULL;
+ pf->ptp.tx_hwtstamp_flushed++;
+ }
+ clear_bit(idx, tx->in_use);
+ spin_unlock(&tx->lock);
+
+ /* Clear any potential residual timestamp in the PHY block */
+ if (!pf->hw.reset_ongoing)
+ ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
+ }
+}
+
+/**
+ * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
+ * @pf: Board private structure
+ * @tx: Tx tracking structure to release
+ *
+ * Free memory associated with the Tx timestamp tracker.
+ */
+static void
+ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ tx->init = 0;
+
+ ice_ptp_flush_tx_tracker(pf, tx);
+
+ kfree(tx->tstamps);
+ tx->tstamps = NULL;
+
+ bitmap_free(tx->in_use);
+ tx->in_use = NULL;
+
+ tx->len = 0;
+}
+
+/**
+ * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps
+ * @pf: Board private structure
+ * @tx: the Tx tracking structure to initialize
+ * @port: the port this structure tracks
+ *
+ * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
+ * the timestamp block is shared for all ports in the same quad. To avoid
+ * ports using the same timestamp index, logically break the block of
+ * registers into chunks based on the port number.
+ */
+static int
+ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
+{
+ tx->quad = port / ICE_PORTS_PER_QUAD;
+ tx->quad_offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT;
+ tx->len = INDEX_PER_PORT;
+
+ return ice_ptp_alloc_tx_tracker(tx);
+}
+
+/**
+ * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
+ * @pf: Board private structure
+ * @tx: the Tx tracking structure to initialize
+ *
+ * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
+ * port has its own block of timestamps, independent of the other ports.
+ */
+static int
+ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ tx->quad = pf->hw.port_info->lport;
+ tx->quad_offset = 0;
+ tx->len = INDEX_PER_QUAD;
+
+ return ice_ptp_alloc_tx_tracker(tx);
+}
+
+/**
+ * ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped
+ * @pf: pointer to the PF struct
+ * @tx: PTP Tx tracker to clean up
+ *
+ * Loop through the Tx timestamp requests and see if any of them have been
+ * waiting for a long time. Discard any SKBs that have been waiting for more
+ * than 2 seconds. This is long enough to be reasonably sure that the
+ * timestamp will never be captured. This might happen if the packet gets
+ * discarded before it reaches the PHY timestamping block.
+ */
+static void ice_ptp_tx_tstamp_cleanup(struct ice_pf *pf, struct ice_ptp_tx *tx)
+{
+ struct ice_hw *hw = &pf->hw;
+ u8 idx;
+
+ if (!tx->init)
+ return;
+
+ for_each_set_bit(idx, tx->in_use, tx->len) {
+ struct sk_buff *skb;
+ u64 raw_tstamp;
+
+ /* Check if this SKB has been waiting for too long */
+ if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
+ continue;
+
+ /* Read tstamp to be able to use this register again */
+ ice_read_phy_tstamp(hw, tx->quad, idx + tx->quad_offset,
+ &raw_tstamp);
+
+ spin_lock(&tx->lock);
+ skb = tx->tstamps[idx].skb;
+ tx->tstamps[idx].skb = NULL;
+ clear_bit(idx, tx->in_use);
+ spin_unlock(&tx->lock);
+
+ /* Count the number of Tx timestamps which have timed out */
+ pf->ptp.tx_hwtstamp_timeouts++;
+
+ /* Free the SKB after we've cleared the bit */
+ dev_kfree_skb_any(skb);
+ }
+}
+
+/**
+ * ice_ptp_update_cached_phctime - Update the cached PHC time values
+ * @pf: Board specific private structure
+ *
+ * This function updates the system time values which are cached in the PF
+ * structure and the Rx rings.
+ *
+ * This function must be called periodically to ensure that the cached value
+ * is never more than 2 seconds old.
+ *
+ * Note that the cached copy in the PF PTP structure is always updated, even
+ * if we can't update the copy in the Rx rings.
+ *
+ * Return:
+ * * 0 - OK, successfully updated
+ * * -EAGAIN - PF was busy, need to reschedule the update
+ */
+static int ice_ptp_update_cached_phctime(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ unsigned long update_before;
+ u64 systime;
+ int i;
+
+ update_before = pf->ptp.cached_phc_jiffies + msecs_to_jiffies(2000);
+ if (pf->ptp.cached_phc_time &&
+ time_is_before_jiffies(update_before)) {
+ unsigned long time_taken = jiffies - pf->ptp.cached_phc_jiffies;
+
+ dev_warn(dev, "%u msecs passed between update to cached PHC time\n",
+ jiffies_to_msecs(time_taken));
+ pf->ptp.late_cached_phc_updates++;
+ }
+
+ /* Read the current PHC time */
+ systime = ice_ptp_read_src_clk_reg(pf, NULL);
+
+ /* Update the cached PHC time stored in the PF structure */
+ WRITE_ONCE(pf->ptp.cached_phc_time, systime);
+ WRITE_ONCE(pf->ptp.cached_phc_jiffies, jiffies);
+
+ if (test_and_set_bit(ICE_CFG_BUSY, pf->state))
+ return -EAGAIN;
+
+ ice_for_each_vsi(pf, i) {
+ struct ice_vsi *vsi = pf->vsi[i];
+ int j;
+
+ if (!vsi)
+ continue;
+
+ if (vsi->type != ICE_VSI_PF)
+ continue;
+
+ ice_for_each_rxq(vsi, j) {
+ if (!vsi->rx_rings[j])
+ continue;
+ WRITE_ONCE(vsi->rx_rings[j]->cached_phctime, systime);
+ }
+ }
+ clear_bit(ICE_CFG_BUSY, pf->state);
+
+ return 0;
+}
+
+/**
+ * ice_ptp_reset_cached_phctime - Reset cached PHC time after an update
+ * @pf: Board specific private structure
+ *
+ * This function must be called when the cached PHC time is no longer valid,
+ * such as after a time adjustment. It discards any outstanding Tx timestamps,
+ * and updates the cached PHC time for both the PF and Rx rings. If updating
+ * the PHC time cannot be done immediately, a warning message is logged and
+ * the work item is scheduled.
+ *
+ * These steps are required in order to ensure that we do not accidentally
+ * report a timestamp extended by the wrong PHC cached copy. Note that we
+ * do not directly update the cached timestamp here because it is possible
+ * this might produce an error when ICE_CFG_BUSY is set. If this occurred, we
+ * would have to try again. During that time window, timestamps might be
+ * requested and returned with an invalid extension. Thus, on failure to
+ * immediately update the cached PHC time we would need to zero the value
+ * anyways. For this reason, we just zero the value immediately and queue the
+ * update work item.
+ */
+static void ice_ptp_reset_cached_phctime(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ /* Update the cached PHC time immediately if possible, otherwise
+ * schedule the work item to execute soon.
+ */
+ err = ice_ptp_update_cached_phctime(pf);
+ if (err) {
+ /* If another thread is updating the Rx rings, we won't
+ * properly reset them here. This could lead to reporting of
+ * invalid timestamps, but there isn't much we can do.
+ */
+ dev_warn(dev, "%s: ICE_CFG_BUSY, unable to immediately update cached PHC time\n",
+ __func__);
+
+ /* Queue the work item to update the Rx rings when possible */
+ kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work,
+ msecs_to_jiffies(10));
+ }
+
+ /* Flush any outstanding Tx timestamps */
+ ice_ptp_flush_tx_tracker(pf, &pf->ptp.port.tx);
+}
+
/**
* ice_ptp_read_time - Read the time from the device
* @pf: Board private structure
@@ -900,6 +1238,9 @@ static void ice_ptp_wait_for_offset_valid(struct kthread_work *work)
hw = &pf->hw;
dev = ice_pf_to_dev(pf);
+ if (ice_is_reset_in_progress(pf->state))
+ return;
+
if (ice_ptp_check_offset_valid(port)) {
/* Offsets not ready yet, try again later */
kthread_queue_delayed_work(pf->ptp.kworker,
@@ -1509,7 +1850,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts)
ice_ptp_unlock(hw);
if (!err)
- ice_ptp_update_cached_phctime(pf);
+ ice_ptp_reset_cached_phctime(pf);
/* Reenable periodic outputs */
ice_ptp_enable_all_clkout(pf);
@@ -1588,7 +1929,7 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta)
return err;
}
- ice_ptp_update_cached_phctime(pf);
+ ice_ptp_reset_cached_phctime(pf);
return 0;
}
@@ -1796,26 +2137,31 @@ void
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
{
+ struct skb_shared_hwtstamps *hwtstamps;
+ u64 ts_ns, cached_time;
u32 ts_high;
- u64 ts_ns;
- /* Populate timesync data into skb */
- if (rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID) {
- struct skb_shared_hwtstamps *hwtstamps;
+ if (!(rx_desc->wb.time_stamp_low & ICE_PTP_TS_VALID))
+ return;
- /* Use ice_ptp_extend_32b_ts directly, using the ring-specific
- * cached PHC value, rather than accessing the PF. This also
- * allows us to simply pass the upper 32bits of nanoseconds
- * directly. Calling ice_ptp_extend_40b_ts is unnecessary as
- * it would just discard these bits itself.
- */
- ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
- ts_ns = ice_ptp_extend_32b_ts(rx_ring->cached_phctime, ts_high);
+ cached_time = READ_ONCE(rx_ring->cached_phctime);
- hwtstamps = skb_hwtstamps(skb);
- memset(hwtstamps, 0, sizeof(*hwtstamps));
- hwtstamps->hwtstamp = ns_to_ktime(ts_ns);
- }
+ /* Do not report a timestamp if we don't have a cached PHC time */
+ if (!cached_time)
+ return;
+
+ /* Use ice_ptp_extend_32b_ts directly, using the ring-specific cached
+ * PHC value, rather than accessing the PF. This also allows us to
+ * simply pass the upper 32bits of nanoseconds directly. Calling
+ * ice_ptp_extend_40b_ts is unnecessary as it would just discard these
+ * bits itself.
+ */
+ ts_high = le32_to_cpu(rx_desc->wb.flex_ts.ts_high);
+ ts_ns = ice_ptp_extend_32b_ts(cached_time, ts_high);
+
+ hwtstamps = skb_hwtstamps(skb);
+ memset(hwtstamps, 0, sizeof(*hwtstamps));
+ hwtstamps->hwtstamp = ns_to_ktime(ts_ns);
}
/**
@@ -1870,50 +2216,27 @@ ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
ice_ptp_disable_sma_pins_e810t(pf, info);
}
-/**
- * ice_ptp_setup_pins_e810t - Setup PTP pins in sysfs
- * @pf: pointer to the PF instance
- * @info: PTP clock capabilities
- */
-static void
-ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
-{
- /* Check if SMA controller is in the netlist */
- if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL) &&
- !ice_is_pca9575_present(&pf->hw))
- ice_clear_feature_support(pf, ICE_F_SMA_CTRL);
-
- if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
- info->n_ext_ts = N_EXT_TS_E810_NO_SMA;
- info->n_per_out = N_PER_OUT_E810T_NO_SMA;
- return;
- }
-
- info->n_per_out = N_PER_OUT_E810T;
-
- if (ice_is_feature_supported(pf, ICE_F_PTP_EXTTS)) {
- info->n_ext_ts = N_EXT_TS_E810;
- info->n_pins = NUM_PTP_PINS_E810T;
- info->verify = ice_verify_pin_e810t;
- }
-
- /* Complete setup of the SMA pins */
- ice_ptp_setup_sma_pins_e810t(pf, info);
-}
-
/**
* ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
* @pf: pointer to the PF instance
* @info: PTP clock capabilities
*/
-static void ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
+static void
+ice_ptp_setup_pins_e810(struct ice_pf *pf, struct ptp_clock_info *info)
{
info->n_per_out = N_PER_OUT_E810;
- if (!ice_is_feature_supported(pf, ICE_F_PTP_EXTTS))
- return;
+ if (ice_is_feature_supported(pf, ICE_F_PTP_EXTTS))
+ info->n_ext_ts = N_EXT_TS_E810;
- info->n_ext_ts = N_EXT_TS_E810;
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
+ info->n_ext_ts = N_EXT_TS_E810;
+ info->n_pins = NUM_PTP_PINS_E810T;
+ info->verify = ice_verify_pin_e810t;
+
+ /* Complete setup of the SMA pins */
+ ice_ptp_setup_sma_pins_e810t(pf, info);
+ }
}
/**
@@ -1950,11 +2273,7 @@ static void
ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
{
info->enable = ice_ptp_gpio_enable_e810;
-
- if (ice_is_e810t(&pf->hw))
- ice_ptp_setup_pins_e810t(pf, info);
- else
- ice_ptp_setup_pins_e810(pf, info);
+ ice_ptp_setup_pins_e810(pf, info);
}
/**
@@ -2015,112 +2334,6 @@ static long ice_ptp_create_clock(struct ice_pf *pf)
return 0;
}
-/**
- * ice_ptp_tx_tstamp_work - Process Tx timestamps for a port
- * @work: pointer to the kthread_work struct
- *
- * Process timestamps captured by the PHY associated with this port. To do
- * this, loop over each index with a waiting skb.
- *
- * If a given index has a valid timestamp, perform the following steps:
- *
- * 1) copy the timestamp out of the PHY register
- * 4) clear the timestamp valid bit in the PHY register
- * 5) unlock the index by clearing the associated in_use bit.
- * 2) extend the 40b timestamp value to get a 64bit timestamp
- * 3) send that timestamp to the stack
- *
- * After looping, if we still have waiting SKBs, then re-queue the work. This
- * may cause us effectively poll even when not strictly necessary. We do this
- * because it's possible a new timestamp was requested around the same time as
- * the interrupt. In some cases hardware might not interrupt us again when the
- * timestamp is captured.
- *
- * Note that we only take the tracking lock when clearing the bit and when
- * checking if we need to re-queue this task. The only place where bits can be
- * set is the hard xmit routine where an SKB has a request flag set. The only
- * places where we clear bits are this work function, or the periodic cleanup
- * thread. If the cleanup thread clears a bit we're processing we catch it
- * when we lock to clear the bit and then grab the SKB pointer. If a Tx thread
- * starts a new timestamp, we might not begin processing it right away but we
- * will notice it at the end when we re-queue the work item. If a Tx thread
- * starts a new timestamp just after this function exits without re-queuing,
- * the interrupt when the timestamp finishes should trigger. Avoiding holding
- * the lock for the entire function is important in order to ensure that Tx
- * threads do not get blocked while waiting for the lock.
- */
-static void ice_ptp_tx_tstamp_work(struct kthread_work *work)
-{
- struct ice_ptp_port *ptp_port;
- struct ice_ptp_tx *tx;
- struct ice_pf *pf;
- struct ice_hw *hw;
- u8 idx;
-
- tx = container_of(work, struct ice_ptp_tx, work);
- if (!tx->init)
- return;
-
- ptp_port = container_of(tx, struct ice_ptp_port, tx);
- pf = ptp_port_to_pf(ptp_port);
- hw = &pf->hw;
-
- for_each_set_bit(idx, tx->in_use, tx->len) {
- struct skb_shared_hwtstamps shhwtstamps = {};
- u8 phy_idx = idx + tx->quad_offset;
- u64 raw_tstamp, tstamp;
- struct sk_buff *skb;
- int err;
-
- ice_trace(tx_tstamp_fw_req, tx->tstamps[idx].skb, idx);
-
- err = ice_read_phy_tstamp(hw, tx->quad, phy_idx,
- &raw_tstamp);
- if (err)
- continue;
-
- ice_trace(tx_tstamp_fw_done, tx->tstamps[idx].skb, idx);
-
- /* Check if the timestamp is invalid or stale */
- if (!(raw_tstamp & ICE_PTP_TS_VALID) ||
- raw_tstamp == tx->tstamps[idx].cached_tstamp)
- continue;
-
- /* The timestamp is valid, so we'll go ahead and clear this
- * index and then send the timestamp up to the stack.
- */
- spin_lock(&tx->lock);
- tx->tstamps[idx].cached_tstamp = raw_tstamp;
- clear_bit(idx, tx->in_use);
- skb = tx->tstamps[idx].skb;
- tx->tstamps[idx].skb = NULL;
- spin_unlock(&tx->lock);
-
- /* it's (unlikely but) possible we raced with the cleanup
- * thread for discarding old timestamp requests.
- */
- if (!skb)
- continue;
-
- /* Extend the timestamp using cached PHC time */
- tstamp = ice_ptp_extend_40b_ts(pf, raw_tstamp);
- shhwtstamps.hwtstamp = ns_to_ktime(tstamp);
-
- ice_trace(tx_tstamp_complete, skb, idx);
-
- skb_tstamp_tx(skb, &shhwtstamps);
- dev_kfree_skb_any(skb);
- }
-
- /* Check if we still have work to do. If so, re-queue this task to
- * poll for remaining timestamps.
- */
- spin_lock(&tx->lock);
- if (!bitmap_empty(tx->in_use, tx->len))
- kthread_queue_work(pf->ptp.kworker, &tx->work);
- spin_unlock(&tx->lock);
-}
-
/**
* ice_ptp_request_ts - Request an available Tx timestamp index
* @tx: the PTP Tx timestamp tracker to request from
@@ -2161,177 +2374,17 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
}
/**
- * ice_ptp_process_ts - Spawn kthread work to handle timestamps
+ * ice_ptp_process_ts - Process the PTP Tx timestamps
* @pf: Board private structure
*
- * Queue work required to process the PTP Tx timestamps outside of interrupt
- * context.
+ * Returns true if timestamps are processed.
*/
-void ice_ptp_process_ts(struct ice_pf *pf)
+bool ice_ptp_process_ts(struct ice_pf *pf)
{
if (pf->ptp.port.tx.init)
- kthread_queue_work(pf->ptp.kworker, &pf->ptp.port.tx.work);
-}
+ return ice_ptp_tx_tstamp(&pf->ptp.port.tx);
-/**
- * ice_ptp_alloc_tx_tracker - Initialize tracking for Tx timestamps
- * @tx: Tx tracking structure to initialize
- *
- * Assumes that the length has already been initialized. Do not call directly,
- * use the ice_ptp_init_tx_e822 or ice_ptp_init_tx_e810 instead.
- */
-static int
-ice_ptp_alloc_tx_tracker(struct ice_ptp_tx *tx)
-{
- tx->tstamps = kcalloc(tx->len, sizeof(*tx->tstamps), GFP_KERNEL);
- if (!tx->tstamps)
- return -ENOMEM;
-
- tx->in_use = bitmap_zalloc(tx->len, GFP_KERNEL);
- if (!tx->in_use) {
- kfree(tx->tstamps);
- tx->tstamps = NULL;
- return -ENOMEM;
- }
-
- spin_lock_init(&tx->lock);
- kthread_init_work(&tx->work, ice_ptp_tx_tstamp_work);
-
- tx->init = 1;
-
- return 0;
-}
-
-/**
- * ice_ptp_flush_tx_tracker - Flush any remaining timestamps from the tracker
- * @pf: Board private structure
- * @tx: the tracker to flush
- */
-static void
-ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
-{
- u8 idx;
-
- for (idx = 0; idx < tx->len; idx++) {
- u8 phy_idx = idx + tx->quad_offset;
-
- spin_lock(&tx->lock);
- if (tx->tstamps[idx].skb) {
- dev_kfree_skb_any(tx->tstamps[idx].skb);
- tx->tstamps[idx].skb = NULL;
- }
- clear_bit(idx, tx->in_use);
- spin_unlock(&tx->lock);
-
- /* Clear any potential residual timestamp in the PHY block */
- if (!pf->hw.reset_ongoing)
- ice_clear_phy_tstamp(&pf->hw, tx->quad, phy_idx);
- }
-}
-
-/**
- * ice_ptp_release_tx_tracker - Release allocated memory for Tx tracker
- * @pf: Board private structure
- * @tx: Tx tracking structure to release
- *
- * Free memory associated with the Tx timestamp tracker.
- */
-static void
-ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx)
-{
- tx->init = 0;
-
- kthread_cancel_work_sync(&tx->work);
-
- ice_ptp_flush_tx_tracker(pf, tx);
-
- kfree(tx->tstamps);
- tx->tstamps = NULL;
-
- bitmap_free(tx->in_use);
- tx->in_use = NULL;
-
- tx->len = 0;
-}
-
-/**
- * ice_ptp_init_tx_e822 - Initialize tracking for Tx timestamps
- * @pf: Board private structure
- * @tx: the Tx tracking structure to initialize
- * @port: the port this structure tracks
- *
- * Initialize the Tx timestamp tracker for this port. For generic MAC devices,
- * the timestamp block is shared for all ports in the same quad. To avoid
- * ports using the same timestamp index, logically break the block of
- * registers into chunks based on the port number.
- */
-static int
-ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port)
-{
- tx->quad = port / ICE_PORTS_PER_QUAD;
- tx->quad_offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT;
- tx->len = INDEX_PER_PORT;
-
- return ice_ptp_alloc_tx_tracker(tx);
-}
-
-/**
- * ice_ptp_init_tx_e810 - Initialize tracking for Tx timestamps
- * @pf: Board private structure
- * @tx: the Tx tracking structure to initialize
- *
- * Initialize the Tx timestamp tracker for this PF. For E810 devices, each
- * port has its own block of timestamps, independent of the other ports.
- */
-static int
-ice_ptp_init_tx_e810(struct ice_pf *pf, struct ice_ptp_tx *tx)
-{
- tx->quad = pf->hw.port_info->lport;
- tx->quad_offset = 0;
- tx->len = INDEX_PER_QUAD;
-
- return ice_ptp_alloc_tx_tracker(tx);
-}
-
-/**
- * ice_ptp_tx_tstamp_cleanup - Cleanup old timestamp requests that got dropped
- * @hw: pointer to the hw struct
- * @tx: PTP Tx tracker to clean up
- *
- * Loop through the Tx timestamp requests and see if any of them have been
- * waiting for a long time. Discard any SKBs that have been waiting for more
- * than 2 seconds. This is long enough to be reasonably sure that the
- * timestamp will never be captured. This might happen if the packet gets
- * discarded before it reaches the PHY timestamping block.
- */
-static void ice_ptp_tx_tstamp_cleanup(struct ice_hw *hw, struct ice_ptp_tx *tx)
-{
- u8 idx;
-
- if (!tx->init)
- return;
-
- for_each_set_bit(idx, tx->in_use, tx->len) {
- struct sk_buff *skb;
- u64 raw_tstamp;
-
- /* Check if this SKB has been waiting for too long */
- if (time_is_after_jiffies(tx->tstamps[idx].start + 2 * HZ))
- continue;
-
- /* Read tstamp to be able to use this register again */
- ice_read_phy_tstamp(hw, tx->quad, idx + tx->quad_offset,
- &raw_tstamp);
-
- spin_lock(&tx->lock);
- skb = tx->tstamps[idx].skb;
- tx->tstamps[idx].skb = NULL;
- clear_bit(idx, tx->in_use);
- spin_unlock(&tx->lock);
-
- /* Free the SKB after we've cleared the bit */
- dev_kfree_skb_any(skb);
- }
+ return false;
}
static void ice_ptp_periodic_work(struct kthread_work *work)
@@ -2345,7 +2398,7 @@ static void ice_ptp_periodic_work(struct kthread_work *work)
err = ice_ptp_update_cached_phctime(pf);
- ice_ptp_tx_tstamp_cleanup(&pf->hw, &pf->ptp.port.tx);
+ ice_ptp_tx_tstamp_cleanup(pf, &pf->ptp.port.tx);
/* Run twice a second or reschedule if phc update failed */
kthread_queue_delayed_work(ptp->kworker, &ptp->work,
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index 10e396abf130..028349295b71 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -105,7 +105,6 @@ struct ice_tx_tstamp {
/**
* struct ice_ptp_tx - Tracking structure for all Tx timestamp requests on a port
- * @work: work function to handle processing of Tx timestamps
* @lock: lock to prevent concurrent write to in_use bitmap
* @tstamps: array of len to store outstanding requests
* @in_use: bitmap of len to indicate which slots are in use
@@ -117,7 +116,6 @@ struct ice_tx_tstamp {
* window, timestamps are temporarily disabled.
*/
struct ice_ptp_tx {
- struct kthread_work work;
spinlock_t lock; /* lock protecting in_use bitmap */
struct ice_tx_tstamp *tstamps;
unsigned long *in_use;
@@ -163,6 +161,7 @@ struct ice_ptp_port {
* @work: delayed work function for periodic tasks
* @extts_work: work function for handling external Tx timestamps
* @cached_phc_time: a cached copy of the PHC time for timestamp extension
+ * @cached_phc_jiffies: jiffies when cached_phc_time was last updated
* @ext_ts_chan: the external timestamp channel in use
* @ext_ts_irq: the external timestamp IRQ in use
* @kworker: kwork thread for handling periodic work
@@ -171,12 +170,19 @@ struct ice_ptp_port {
* @clock: pointer to registered PTP clock device
* @tstamp_config: hardware timestamping configuration
* @reset_time: kernel time after clock stop on reset
+ * @tx_hwtstamp_skipped: number of Tx time stamp requests skipped
+ * @tx_hwtstamp_timeouts: number of Tx skbs discarded with no time stamp
+ * @tx_hwtstamp_flushed: number of Tx skbs flushed due to interface closed
+ * @tx_hwtstamp_discarded: number of Tx skbs discarded due to cached PHC time
+ * being too old to correctly extend timestamp
+ * @late_cached_phc_updates: number of times cached PHC update is late
*/
struct ice_ptp {
struct ice_ptp_port port;
struct kthread_delayed_work work;
struct kthread_work extts_work;
u64 cached_phc_time;
+ unsigned long cached_phc_jiffies;
u8 ext_ts_chan;
u8 ext_ts_irq;
struct kthread_worker *kworker;
@@ -185,6 +191,11 @@ struct ice_ptp {
struct ptp_clock *clock;
struct hwtstamp_config tstamp_config;
u64 reset_time;
+ u32 tx_hwtstamp_skipped;
+ u32 tx_hwtstamp_timeouts;
+ u32 tx_hwtstamp_flushed;
+ u32 tx_hwtstamp_discarded;
+ u32 late_cached_phc_updates;
};
#define __ptp_port_to_ptp(p) \
@@ -224,8 +235,8 @@ struct ice_ptp {
#define N_EXT_TS_E810 3
#define N_PER_OUT_E810 4
#define N_PER_OUT_E810T 3
-#define N_PER_OUT_E810T_NO_SMA 2
-#define N_EXT_TS_E810_NO_SMA 2
+#define N_PER_OUT_NO_SMA_E810T 2
+#define N_EXT_TS_NO_SMA_E810T 2
#define ETH_GLTSYN_ENA(_i) (0x03000348 + ((_i) * 4))
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
@@ -236,7 +247,7 @@ void ice_ptp_cfg_timestamp(struct ice_pf *pf, bool ena);
int ice_get_ptp_clock_index(struct ice_pf *pf);
s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
-void ice_ptp_process_ts(struct ice_pf *pf);
+bool ice_ptp_process_ts(struct ice_pf *pf);
void
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
@@ -269,7 +280,10 @@ ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
return -1;
}
-static inline void ice_ptp_process_ts(struct ice_pf *pf) { }
+static inline bool ice_ptp_process_ts(struct ice_pf *pf)
+{
+ return true;
+}
static inline void
ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { }
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 6dff97d53d81..772b1f566d6e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2021, Intel Corporation. */
+#include
#include "ice_common.h"
#include "ice_ptp_hw.h"
#include "ice_ptp_consts.h"
@@ -2586,6 +2587,89 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
return 0;
}
+/**
+ * ice_read_phy_tstamp_ll_e810 - Read a PHY timestamp registers through the FW
+ * @hw: pointer to the HW struct
+ * @idx: the timestamp index to read
+ * @hi: 8 bit timestamp high value
+ * @lo: 32 bit timestamp low value
+ *
+ * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
+ * timestamp block of the external PHY on the E810 device using the low latency
+ * timestamp read.
+ */
+static int
+ice_read_phy_tstamp_ll_e810(struct ice_hw *hw, u8 idx, u8 *hi, u32 *lo)
+{
+ u32 val;
+ u8 i;
+
+ /* Write TS index to read to the PF register so the FW can read it */
+ val = FIELD_PREP(TS_LL_READ_TS_IDX, idx) | TS_LL_READ_TS;
+ wr32(hw, PF_SB_ATQBAL, val);
+
+ /* Read the register repeatedly until the FW provides us the TS */
+ for (i = TS_LL_READ_RETRIES; i > 0; i--) {
+ val = rd32(hw, PF_SB_ATQBAL);
+
+ /* When the bit is cleared, the TS is ready in the register */
+ if (!(FIELD_GET(TS_LL_READ_TS, val))) {
+ /* High 8 bit value of the TS is on the bits 16:23 */
+ *hi = FIELD_GET(TS_LL_READ_TS_HIGH, val);
+
+ /* Read the low 32 bit value and set the TS valid bit */
+ *lo = rd32(hw, PF_SB_ATQBAH) | TS_VALID;
+ return 0;
+ }
+
+ udelay(10);
+ }
+
+ /* FW failed to provide the TS in time */
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read PTP timestamp using low latency read\n");
+ return -EINVAL;
+}
+
+/**
+ * ice_read_phy_tstamp_sbq_e810 - Read a PHY timestamp registers through the sbq
+ * @hw: pointer to the HW struct
+ * @lport: the lport to read from
+ * @idx: the timestamp index to read
+ * @hi: 8 bit timestamp high value
+ * @lo: 32 bit timestamp low value
+ *
+ * Read a 8bit timestamp high value and 32 bit timestamp low value out of the
+ * timestamp block of the external PHY on the E810 device using sideband queue.
+ */
+static int
+ice_read_phy_tstamp_sbq_e810(struct ice_hw *hw, u8 lport, u8 idx, u8 *hi,
+ u32 *lo)
+{
+ u32 hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
+ u32 lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
+ u32 lo_val, hi_val;
+ int err;
+
+ err = ice_read_phy_reg_e810(hw, lo_addr, &lo_val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ err = ice_read_phy_reg_e810(hw, hi_addr, &hi_val);
+ if (err) {
+ ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
+ err);
+ return err;
+ }
+
+ *lo = lo_val;
+ *hi = (u8)hi_val;
+
+ return 0;
+}
+
/**
* ice_read_phy_tstamp_e810 - Read a PHY timestamp out of the external PHY
* @hw: pointer to the HW struct
@@ -2599,25 +2683,17 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val)
static int
ice_read_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx, u64 *tstamp)
{
- u32 lo_addr, hi_addr, lo, hi;
+ u32 lo = 0;
+ u8 hi = 0;
int err;
- lo_addr = TS_EXT(LOW_TX_MEMORY_BANK_START, lport, idx);
- hi_addr = TS_EXT(HIGH_TX_MEMORY_BANK_START, lport, idx);
+ if (hw->dev_caps.ts_dev_info.ts_ll_read)
+ err = ice_read_phy_tstamp_ll_e810(hw, idx, &hi, &lo);
+ else
+ err = ice_read_phy_tstamp_sbq_e810(hw, lport, idx, &hi, &lo);
- err = ice_read_phy_reg_e810(hw, lo_addr, &lo);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n",
- err);
+ if (err)
return err;
- }
-
- err = ice_read_phy_reg_e810(hw, hi_addr, &hi);
- if (err) {
- ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n",
- err);
- return err;
- }
/* For E810 devices, the timestamp is reported with the lower 32 bits
* in the low register, and the upper 8 bits in the high register.
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 1246e4ee4b5d..2bda64c76abc 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -402,6 +402,7 @@ bool ice_is_pca9575_present(struct ice_hw *hw);
#define INCVAL_HIGH_M 0xFF
/* Timestamp block macros */
+#define TS_VALID BIT(0)
#define TS_LOW_M 0xFFFFFFFF
#define TS_HIGH_M 0xFF
#define TS_HIGH_S 32
@@ -413,6 +414,12 @@ bool ice_is_pca9575_present(struct ice_hw *hw);
#define BYTES_PER_IDX_ADDR_L_U 8
#define BYTES_PER_IDX_ADDR_L 4
+/* Tx timestamp low latency read definitions */
+#define TS_LL_READ_RETRIES 200
+#define TS_LL_READ_TS_HIGH GENMASK(23, 16)
+#define TS_LL_READ_TS_IDX GENMASK(29, 24)
+#define TS_LL_READ_TS BIT(31)
+
/* Internal PHY timestamp address */
#define TS_L(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U))
#define TS_H(a, idx) ((a) + ((idx) * BYTES_PER_IDX_ADDR_L_U + \
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
index 0dac67cd9c77..bd31748aae1b 100644
--- a/drivers/net/ethernet/intel/ice/ice_repr.c
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -377,10 +377,10 @@ static void ice_repr_rem(struct ice_vf *vf)
if (!vf->repr)
return;
- ice_devlink_destroy_vf_port(vf);
kfree(vf->repr->q_vector);
vf->repr->q_vector = NULL;
unregister_netdev(vf->repr->netdev);
+ ice_devlink_destroy_vf_port(vf);
free_netdev(vf->repr->netdev);
vf->repr->netdev = NULL;
#ifdef CONFIG_ICE_SWITCHDEV
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 7947223536e3..118595763bba 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -1212,7 +1212,7 @@ int ice_sched_init_port(struct ice_port_info *pi)
hw = pi->hw;
/* Query the Default Topology from FW */
- buf = devm_kzalloc(ice_hw_to_dev(hw), ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
+ buf = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -1290,7 +1290,7 @@ err_init_port:
pi->root = NULL;
}
- devm_kfree(ice_hw_to_dev(hw), buf);
+ kfree(buf);
return status;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 3808034f7e7e..9b762f7972ce 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -42,6 +42,7 @@ enum {
ICE_PKT_GTP_NOPAY = BIT(8),
ICE_PKT_KMALLOC = BIT(9),
ICE_PKT_PPPOE = BIT(10),
+ ICE_PKT_L2TPV3 = BIT(11),
};
struct ice_dummy_pkt_offsets {
@@ -1258,6 +1259,65 @@ ICE_DECLARE_PKT_TEMPLATE(pppoe_ipv6_udp) = {
0x00, 0x00, /* 2 bytes for 4 bytes alignment */
};
+ICE_DECLARE_PKT_OFFSETS(ipv4_l2tpv3) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_L2TPV3, 34 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv4_l2tpv3) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x20, /* ICE_IPV4_IL 14 */
+ 0x00, 0x00, 0x40, 0x00,
+ 0x40, 0x73, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 34 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
+ICE_DECLARE_PKT_OFFSETS(ipv6_l2tpv3) = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_L2TPV3, 54 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+ICE_DECLARE_PKT_TEMPLATE(ipv6_l2tpv3) = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_IL 14 */
+ 0x00, 0x0c, 0x73, 0x40,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_L2TPV3 54 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, /* 2 bytes for 4 bytes alignment */
+};
+
static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
ICE_PKT_PROFILE(ipv6_gtp, ICE_PKT_TUN_GTPU | ICE_PKT_OUTER_IPV6 |
ICE_PKT_GTP_NOPAY),
@@ -1297,6 +1357,8 @@ static const struct ice_dummy_pkt_profile ice_dummy_pkt_profiles[] = {
ICE_PKT_PROFILE(udp_tun_ipv6_tcp, ICE_PKT_TUN_UDP |
ICE_PKT_INNER_IPV6 |
ICE_PKT_INNER_TCP),
+ ICE_PKT_PROFILE(ipv6_l2tpv3, ICE_PKT_L2TPV3 | ICE_PKT_OUTER_IPV6),
+ ICE_PKT_PROFILE(ipv4_l2tpv3, ICE_PKT_L2TPV3),
ICE_PKT_PROFILE(udp_tun_tcp, ICE_PKT_TUN_UDP | ICE_PKT_INNER_TCP),
ICE_PKT_PROFILE(udp_tun_ipv6_udp, ICE_PKT_TUN_UDP |
ICE_PKT_INNER_IPV6),
@@ -2274,9 +2336,7 @@ int ice_get_initial_sw_cfg(struct ice_hw *hw)
int status;
u16 i;
- rbuf = devm_kzalloc(ice_hw_to_dev(hw), ICE_SW_CFG_MAX_BUF_LEN,
- GFP_KERNEL);
-
+ rbuf = kzalloc(ICE_SW_CFG_MAX_BUF_LEN, GFP_KERNEL);
if (!rbuf)
return -ENOMEM;
@@ -2324,7 +2384,7 @@ int ice_get_initial_sw_cfg(struct ice_hw *hw)
}
} while (req_desc && !status);
- devm_kfree(ice_hw_to_dev(hw), rbuf);
+ kfree(rbuf);
return status;
}
@@ -3449,31 +3509,15 @@ bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
* ice_add_mac - Add a MAC address based filter rule
* @hw: pointer to the hardware structure
* @m_list: list of MAC addresses and forwarding information
- *
- * IMPORTANT: When the ucast_shared flag is set to false and m_list has
- * multiple unicast addresses, the function assumes that all the
- * addresses are unique in a given add_mac call. It doesn't
- * check for duplicates in this case, removing duplicates from a given
- * list should be taken care of in the caller of this function.
*/
int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
{
- struct ice_sw_rule_lkup_rx_tx *s_rule, *r_iter;
struct ice_fltr_list_entry *m_list_itr;
- struct list_head *rule_head;
- u16 total_elem_left, s_rule_size;
- struct ice_switch_info *sw;
- struct mutex *rule_lock; /* Lock to protect filter rule list */
- u16 num_unicast = 0;
int status = 0;
- u8 elem_sent;
if (!m_list || !hw)
return -EINVAL;
- s_rule = NULL;
- sw = hw->switch_info;
- rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
list_for_each_entry(m_list_itr, m_list, list_entry) {
u8 *add = &m_list_itr->fltr_info.l_data.mac.mac_addr[0];
u16 vsi_handle;
@@ -3492,106 +3536,13 @@ int ice_add_mac(struct ice_hw *hw, struct list_head *m_list)
if (m_list_itr->fltr_info.lkup_type != ICE_SW_LKUP_MAC ||
is_zero_ether_addr(add))
return -EINVAL;
- if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
- /* Don't overwrite the unicast address */
- mutex_lock(rule_lock);
- if (ice_find_rule_entry(hw, ICE_SW_LKUP_MAC,
- &m_list_itr->fltr_info)) {
- mutex_unlock(rule_lock);
- return -EEXIST;
- }
- mutex_unlock(rule_lock);
- num_unicast++;
- } else if (is_multicast_ether_addr(add) ||
- (is_unicast_ether_addr(add) && hw->ucast_shared)) {
- m_list_itr->status =
- ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
- m_list_itr);
- if (m_list_itr->status)
- return m_list_itr->status;
- }
+
+ m_list_itr->status = ice_add_rule_internal(hw, ICE_SW_LKUP_MAC,
+ m_list_itr);
+ if (m_list_itr->status)
+ return m_list_itr->status;
}
- mutex_lock(rule_lock);
- /* Exit if no suitable entries were found for adding bulk switch rule */
- if (!num_unicast) {
- status = 0;
- goto ice_add_mac_exit;
- }
-
- rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
-
- /* Allocate switch rule buffer for the bulk update for unicast */
- s_rule_size = ICE_SW_RULE_RX_TX_ETH_HDR_SIZE(s_rule);
- s_rule = devm_kcalloc(ice_hw_to_dev(hw), num_unicast, s_rule_size,
- GFP_KERNEL);
- if (!s_rule) {
- status = -ENOMEM;
- goto ice_add_mac_exit;
- }
-
- r_iter = s_rule;
- list_for_each_entry(m_list_itr, m_list, list_entry) {
- struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
- u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
-
- if (is_unicast_ether_addr(mac_addr)) {
- ice_fill_sw_rule(hw, &m_list_itr->fltr_info, r_iter,
- ice_aqc_opc_add_sw_rules);
- r_iter = (typeof(s_rule))((u8 *)r_iter + s_rule_size);
- }
- }
-
- /* Call AQ bulk switch rule update for all unicast addresses */
- r_iter = s_rule;
- /* Call AQ switch rule in AQ_MAX chunk */
- for (total_elem_left = num_unicast; total_elem_left > 0;
- total_elem_left -= elem_sent) {
- struct ice_sw_rule_lkup_rx_tx *entry = r_iter;
-
- elem_sent = min_t(u8, total_elem_left,
- (ICE_AQ_MAX_BUF_LEN / s_rule_size));
- status = ice_aq_sw_rules(hw, entry, elem_sent * s_rule_size,
- elem_sent, ice_aqc_opc_add_sw_rules,
- NULL);
- if (status)
- goto ice_add_mac_exit;
- r_iter = (typeof(s_rule))
- ((u8 *)r_iter + (elem_sent * s_rule_size));
- }
-
- /* Fill up rule ID based on the value returned from FW */
- r_iter = s_rule;
- list_for_each_entry(m_list_itr, m_list, list_entry) {
- struct ice_fltr_info *f_info = &m_list_itr->fltr_info;
- u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
- struct ice_fltr_mgmt_list_entry *fm_entry;
-
- if (is_unicast_ether_addr(mac_addr)) {
- f_info->fltr_rule_id = le16_to_cpu(r_iter->index);
- f_info->fltr_act = ICE_FWD_TO_VSI;
- /* Create an entry to track this MAC address */
- fm_entry = devm_kzalloc(ice_hw_to_dev(hw),
- sizeof(*fm_entry), GFP_KERNEL);
- if (!fm_entry) {
- status = -ENOMEM;
- goto ice_add_mac_exit;
- }
- fm_entry->fltr_info = *f_info;
- fm_entry->vsi_count = 1;
- /* The book keeping entries will get removed when
- * base driver calls remove filter AQ command
- */
-
- list_add(&fm_entry->list_entry, rule_head);
- r_iter = (typeof(s_rule))((u8 *)r_iter + s_rule_size);
- }
- }
-
-ice_add_mac_exit:
- mutex_unlock(rule_lock);
- if (s_rule)
- devm_kfree(ice_hw_to_dev(hw), s_rule);
return status;
}
@@ -3978,38 +3929,6 @@ ice_check_if_dflt_vsi(struct ice_port_info *pi, u16 vsi_handle,
return ret;
}
-/**
- * ice_find_ucast_rule_entry - Search for a unicast MAC filter rule entry
- * @hw: pointer to the hardware structure
- * @recp_id: lookup type for which the specified rule needs to be searched
- * @f_info: rule information
- *
- * Helper function to search for a unicast rule entry - this is to be used
- * to remove unicast MAC filter that is not shared with other VSIs on the
- * PF switch.
- *
- * Returns pointer to entry storing the rule if found
- */
-static struct ice_fltr_mgmt_list_entry *
-ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
- struct ice_fltr_info *f_info)
-{
- struct ice_switch_info *sw = hw->switch_info;
- struct ice_fltr_mgmt_list_entry *list_itr;
- struct list_head *list_head;
-
- list_head = &sw->recp_list[recp_id].filt_rules;
- list_for_each_entry(list_itr, list_head, list_entry) {
- if (!memcmp(&f_info->l_data, &list_itr->fltr_info.l_data,
- sizeof(f_info->l_data)) &&
- f_info->fwd_id.hw_vsi_id ==
- list_itr->fltr_info.fwd_id.hw_vsi_id &&
- f_info->flag == list_itr->fltr_info.flag)
- return list_itr;
- }
- return NULL;
-}
-
/**
* ice_remove_mac - remove a MAC address based filter rule
* @hw: pointer to the hardware structure
@@ -4026,15 +3945,12 @@ ice_find_ucast_rule_entry(struct ice_hw *hw, u8 recp_id,
int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
{
struct ice_fltr_list_entry *list_itr, *tmp;
- struct mutex *rule_lock; /* Lock to protect filter rule list */
if (!m_list)
return -EINVAL;
- rule_lock = &hw->switch_info->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
list_for_each_entry_safe(list_itr, tmp, m_list, list_entry) {
enum ice_sw_lkup_type l_type = list_itr->fltr_info.lkup_type;
- u8 *add = &list_itr->fltr_info.l_data.mac.mac_addr[0];
u16 vsi_handle;
if (l_type != ICE_SW_LKUP_MAC)
@@ -4046,19 +3962,7 @@ int ice_remove_mac(struct ice_hw *hw, struct list_head *m_list)
list_itr->fltr_info.fwd_id.hw_vsi_id =
ice_get_hw_vsi_num(hw, vsi_handle);
- if (is_unicast_ether_addr(add) && !hw->ucast_shared) {
- /* Don't remove the unicast address that belongs to
- * another VSI on the switch, since it is not being
- * shared...
- */
- mutex_lock(rule_lock);
- if (!ice_find_ucast_rule_entry(hw, ICE_SW_LKUP_MAC,
- &list_itr->fltr_info)) {
- mutex_unlock(rule_lock);
- return -ENOENT;
- }
- mutex_unlock(rule_lock);
- }
+
list_itr->status = ice_remove_rule_internal(hw,
ICE_SW_LKUP_MAC,
list_itr);
@@ -4648,6 +4552,7 @@ static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
{ ICE_GTP, { 8, 10, 12, 14, 16, 18, 20, 22 } },
{ ICE_GTP_NO_PAY, { 8, 10, 12, 14 } },
{ ICE_PPPOE, { 0, 2, 4, 6 } },
+ { ICE_L2TPV3, { 0, 2, 4, 6, 8, 10 } },
{ ICE_VLAN_EX, { 2, 0 } },
{ ICE_VLAN_IN, { 2, 0 } },
};
@@ -4671,6 +4576,7 @@ static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
{ ICE_GTP, ICE_UDP_OF_HW },
{ ICE_GTP_NO_PAY, ICE_UDP_ILOS_HW },
{ ICE_PPPOE, ICE_PPPOE_HW },
+ { ICE_L2TPV3, ICE_L2TPV3_HW },
{ ICE_VLAN_EX, ICE_VLAN_OF_HW },
{ ICE_VLAN_IN, ICE_VLAN_OL_HW },
};
@@ -5754,7 +5660,8 @@ ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
if (lkups[i].h_u.pppoe_hdr.ppp_prot_id ==
htons(PPP_IPV6))
match |= ICE_PKT_OUTER_IPV6;
- }
+ } else if (lkups[i].type == ICE_L2TPV3)
+ match |= ICE_PKT_L2TPV3;
}
while (ret->match && (match & ret->match) != ret->match)
@@ -5855,6 +5762,9 @@ ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
case ICE_PPPOE:
len = sizeof(struct ice_pppoe_hdr);
break;
+ case ICE_L2TPV3:
+ len = sizeof(struct ice_l2tpv3_sess_hdr);
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
index a298862857a8..f68c555be4e9 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -36,6 +36,10 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
ICE_TC_FLWR_FIELD_ENC_DEST_IPV6))
lkups_cnt++;
+ if (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
+ ICE_TC_FLWR_FIELD_ENC_IP_TTL))
+ lkups_cnt++;
+
if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT)
lkups_cnt++;
@@ -47,11 +51,11 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
lkups_cnt++;
/* is VLAN specified? */
- if (flags & ICE_TC_FLWR_FIELD_VLAN)
+ if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO))
lkups_cnt++;
/* is CVLAN specified? */
- if (flags & ICE_TC_FLWR_FIELD_CVLAN)
+ if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO))
lkups_cnt++;
/* are PPPoE options specified? */
@@ -64,6 +68,13 @@ ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6))
lkups_cnt++;
+ if (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))
+ lkups_cnt++;
+
+ /* are L2TPv3 options specified? */
+ if (flags & ICE_TC_FLWR_FIELD_L2TPV3_SESSID)
+ lkups_cnt++;
+
/* is L4 (TCP/UDP/any other L4 protocol fields) specified? */
if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
ICE_TC_FLWR_FIELD_SRC_L4_PORT))
@@ -257,6 +268,50 @@ ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
i++;
}
+ if (fltr->inner_headers.l2_key.n_proto == htons(ETH_P_IP) &&
+ (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
+ ICE_TC_FLWR_FIELD_ENC_IP_TTL))) {
+ list[i].type = ice_proto_type_from_ipv4(false);
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TOS) {
+ list[i].h_u.ipv4_hdr.tos = hdr->l3_key.tos;
+ list[i].m_u.ipv4_hdr.tos = hdr->l3_mask.tos;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TTL) {
+ list[i].h_u.ipv4_hdr.time_to_live = hdr->l3_key.ttl;
+ list[i].m_u.ipv4_hdr.time_to_live = hdr->l3_mask.ttl;
+ }
+
+ i++;
+ }
+
+ if (fltr->inner_headers.l2_key.n_proto == htons(ETH_P_IPV6) &&
+ (flags & (ICE_TC_FLWR_FIELD_ENC_IP_TOS |
+ ICE_TC_FLWR_FIELD_ENC_IP_TTL))) {
+ struct ice_ipv6_hdr *hdr_h, *hdr_m;
+
+ hdr_h = &list[i].h_u.ipv6_hdr;
+ hdr_m = &list[i].m_u.ipv6_hdr;
+ list[i].type = ice_proto_type_from_ipv6(false);
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TOS) {
+ be32p_replace_bits(&hdr_h->be_ver_tc_flow,
+ hdr->l3_key.tos,
+ ICE_IPV6_HDR_TC_MASK);
+ be32p_replace_bits(&hdr_m->be_ver_tc_flow,
+ hdr->l3_mask.tos,
+ ICE_IPV6_HDR_TC_MASK);
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_IP_TTL) {
+ hdr_h->hop_limit = hdr->l3_key.ttl;
+ hdr_m->hop_limit = hdr->l3_mask.ttl;
+ }
+
+ i++;
+ }
+
if ((flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) &&
hdr->l3_key.ip_proto == IPPROTO_UDP) {
list[i].type = ICE_UDP_OF;
@@ -334,7 +389,7 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
}
/* copy VLAN info */
- if (flags & ICE_TC_FLWR_FIELD_VLAN) {
+ if (flags & (ICE_TC_FLWR_FIELD_VLAN | ICE_TC_FLWR_FIELD_VLAN_PRIO)) {
vlan_tpid = be16_to_cpu(headers->vlan_hdr.vlan_tpid);
rule_info->vlan_type =
ice_check_supported_vlan_tpid(vlan_tpid);
@@ -343,15 +398,45 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
list[i].type = ICE_VLAN_EX;
else
list[i].type = ICE_VLAN_OFOS;
- list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
- list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
+
+ if (flags & ICE_TC_FLWR_FIELD_VLAN) {
+ list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0x0FFF);
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_VLAN_PRIO) {
+ if (flags & ICE_TC_FLWR_FIELD_VLAN) {
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xEFFF);
+ } else {
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xE000);
+ list[i].h_u.vlan_hdr.vlan = 0;
+ }
+ list[i].h_u.vlan_hdr.vlan |=
+ headers->vlan_hdr.vlan_prio;
+ }
+
i++;
}
- if (flags & ICE_TC_FLWR_FIELD_CVLAN) {
+ if (flags & (ICE_TC_FLWR_FIELD_CVLAN | ICE_TC_FLWR_FIELD_CVLAN_PRIO)) {
list[i].type = ICE_VLAN_IN;
- list[i].h_u.vlan_hdr.vlan = headers->cvlan_hdr.vlan_id;
- list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
+
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN) {
+ list[i].h_u.vlan_hdr.vlan = headers->cvlan_hdr.vlan_id;
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0x0FFF);
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN_PRIO) {
+ if (flags & ICE_TC_FLWR_FIELD_CVLAN) {
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xEFFF);
+ } else {
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xE000);
+ list[i].h_u.vlan_hdr.vlan = 0;
+ }
+ list[i].h_u.vlan_hdr.vlan |=
+ headers->cvlan_hdr.vlan_prio;
+ }
+
i++;
}
@@ -420,6 +505,61 @@ ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
i++;
}
+ if (headers->l2_key.n_proto == htons(ETH_P_IP) &&
+ (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))) {
+ list[i].type = ice_proto_type_from_ipv4(inner);
+
+ if (flags & ICE_TC_FLWR_FIELD_IP_TOS) {
+ list[i].h_u.ipv4_hdr.tos = headers->l3_key.tos;
+ list[i].m_u.ipv4_hdr.tos = headers->l3_mask.tos;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_IP_TTL) {
+ list[i].h_u.ipv4_hdr.time_to_live =
+ headers->l3_key.ttl;
+ list[i].m_u.ipv4_hdr.time_to_live =
+ headers->l3_mask.ttl;
+ }
+
+ i++;
+ }
+
+ if (headers->l2_key.n_proto == htons(ETH_P_IPV6) &&
+ (flags & (ICE_TC_FLWR_FIELD_IP_TOS | ICE_TC_FLWR_FIELD_IP_TTL))) {
+ struct ice_ipv6_hdr *hdr_h, *hdr_m;
+
+ hdr_h = &list[i].h_u.ipv6_hdr;
+ hdr_m = &list[i].m_u.ipv6_hdr;
+ list[i].type = ice_proto_type_from_ipv6(inner);
+
+ if (flags & ICE_TC_FLWR_FIELD_IP_TOS) {
+ be32p_replace_bits(&hdr_h->be_ver_tc_flow,
+ headers->l3_key.tos,
+ ICE_IPV6_HDR_TC_MASK);
+ be32p_replace_bits(&hdr_m->be_ver_tc_flow,
+ headers->l3_mask.tos,
+ ICE_IPV6_HDR_TC_MASK);
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_IP_TTL) {
+ hdr_h->hop_limit = headers->l3_key.ttl;
+ hdr_m->hop_limit = headers->l3_mask.ttl;
+ }
+
+ i++;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_L2TPV3_SESSID) {
+ list[i].type = ICE_L2TPV3;
+
+ list[i].h_u.l2tpv3_sess_hdr.session_id =
+ headers->l2tpv3_hdr.session_id;
+ list[i].m_u.l2tpv3_sess_hdr.session_id =
+ cpu_to_be32(0xFFFFFFFF);
+
+ i++;
+ }
+
/* copy L4 (src, dest) port */
if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
@@ -838,6 +978,40 @@ ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match,
return 0;
}
+/**
+ * ice_tc_set_tos_ttl - Parse IP ToS/TTL from TC flower filter
+ * @match: Pointer to flow match structure
+ * @fltr: Pointer to filter structure
+ * @headers: inner or outer header fields
+ * @is_encap: set true for tunnel
+ */
+static void
+ice_tc_set_tos_ttl(struct flow_match_ip *match,
+ struct ice_tc_flower_fltr *fltr,
+ struct ice_tc_flower_lyr_2_4_hdrs *headers,
+ bool is_encap)
+{
+ if (match->mask->tos) {
+ if (is_encap)
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_IP_TOS;
+ else
+ fltr->flags |= ICE_TC_FLWR_FIELD_IP_TOS;
+
+ headers->l3_key.tos = match->key->tos;
+ headers->l3_mask.tos = match->mask->tos;
+ }
+
+ if (match->mask->ttl) {
+ if (is_encap)
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_IP_TTL;
+ else
+ fltr->flags |= ICE_TC_FLWR_FIELD_IP_TTL;
+
+ headers->l3_key.ttl = match->key->ttl;
+ headers->l3_mask.ttl = match->mask->ttl;
+ }
+}
+
/**
* ice_tc_set_port - Parse ports from TC flower filter
* @match: Flow match structure
@@ -967,10 +1141,7 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
struct flow_match_ip match;
flow_rule_match_enc_ip(rule, &match);
- headers->l3_key.tos = match.key->tos;
- headers->l3_key.ttl = match.key->ttl;
- headers->l3_mask.tos = match.mask->tos;
- headers->l3_mask.ttl = match.mask->ttl;
+ ice_tc_set_tos_ttl(&match, fltr, headers, true);
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS) &&
@@ -1039,9 +1210,11 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
+ BIT(FLOW_DISSECTOR_KEY_IP) |
BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
BIT(FLOW_DISSECTOR_KEY_PORTS) |
- BIT(FLOW_DISSECTOR_KEY_PPPOE))) {
+ BIT(FLOW_DISSECTOR_KEY_PPPOE) |
+ BIT(FLOW_DISSECTOR_KEY_L2TPV3))) {
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
return -EOPNOTSUPP;
}
@@ -1137,16 +1310,22 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
if (match.mask->vlan_id) {
if (match.mask->vlan_id == VLAN_VID_MASK) {
fltr->flags |= ICE_TC_FLWR_FIELD_VLAN;
+ headers->vlan_hdr.vlan_id =
+ cpu_to_be16(match.key->vlan_id &
+ VLAN_VID_MASK);
} else {
NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask");
return -EINVAL;
}
}
- headers->vlan_hdr.vlan_id =
- cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
- if (match.mask->vlan_priority)
- headers->vlan_hdr.vlan_prio = match.key->vlan_priority;
+ if (match.mask->vlan_priority) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_PRIO;
+ headers->vlan_hdr.vlan_prio =
+ cpu_to_be16((match.key->vlan_priority <<
+ VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK);
+ }
+
if (match.mask->vlan_tpid)
headers->vlan_hdr.vlan_tpid = match.key->vlan_tpid;
}
@@ -1164,6 +1343,9 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
if (match.mask->vlan_id) {
if (match.mask->vlan_id == VLAN_VID_MASK) {
fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN;
+ headers->cvlan_hdr.vlan_id =
+ cpu_to_be16(match.key->vlan_id &
+ VLAN_VID_MASK);
} else {
NL_SET_ERR_MSG_MOD(fltr->extack,
"Bad CVLAN mask");
@@ -1171,10 +1353,12 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
}
}
- headers->cvlan_hdr.vlan_id =
- cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
- if (match.mask->vlan_priority)
- headers->cvlan_hdr.vlan_prio = match.key->vlan_priority;
+ if (match.mask->vlan_priority) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN_PRIO;
+ headers->cvlan_hdr.vlan_prio =
+ cpu_to_be16((match.key->vlan_priority <<
+ VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK);
+ }
}
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PPPOE)) {
@@ -1217,6 +1401,22 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
return -EINVAL;
}
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_ip(rule, &match);
+ ice_tc_set_tos_ttl(&match, fltr, headers, false);
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_L2TPV3)) {
+ struct flow_match_l2tpv3 match;
+
+ flow_rule_match_l2tpv3(rule, &match);
+
+ fltr->flags |= ICE_TC_FLWR_FIELD_L2TPV3_SESSID;
+ headers->l2tpv3_hdr.session_id = match.key->session_id;
+ }
+
if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
struct flow_match_ports match;
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
index 91cd3d3778c7..92642faad595 100644
--- a/drivers/net/ethernet/intel/ice/ice_tc_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -26,9 +26,18 @@
#define ICE_TC_FLWR_FIELD_CVLAN BIT(19)
#define ICE_TC_FLWR_FIELD_PPPOE_SESSID BIT(20)
#define ICE_TC_FLWR_FIELD_PPP_PROTO BIT(21)
+#define ICE_TC_FLWR_FIELD_IP_TOS BIT(22)
+#define ICE_TC_FLWR_FIELD_IP_TTL BIT(23)
+#define ICE_TC_FLWR_FIELD_ENC_IP_TOS BIT(24)
+#define ICE_TC_FLWR_FIELD_ENC_IP_TTL BIT(25)
+#define ICE_TC_FLWR_FIELD_L2TPV3_SESSID BIT(26)
+#define ICE_TC_FLWR_FIELD_VLAN_PRIO BIT(27)
+#define ICE_TC_FLWR_FIELD_CVLAN_PRIO BIT(28)
#define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF
+#define ICE_IPV6_HDR_TC_MASK 0xFF00000
+
struct ice_indr_block_priv {
struct net_device *netdev;
struct ice_netdev_priv *np;
@@ -42,7 +51,7 @@ struct ice_tc_flower_action {
struct ice_tc_vlan_hdr {
__be16 vlan_id; /* Only last 12 bits valid */
- u16 vlan_prio; /* Only last 3 bits valid (valid values: 0..7) */
+ __be16 vlan_prio; /* Only last 3 bits valid (valid values: 0..7) */
__be16 vlan_tpid;
};
@@ -80,6 +89,10 @@ struct ice_tc_l3_hdr {
u8 ttl;
};
+struct ice_tc_l2tpv3_hdr {
+ __be32 session_id;
+};
+
struct ice_tc_l4_hdr {
__be16 dst_port;
__be16 src_port;
@@ -92,6 +105,7 @@ struct ice_tc_flower_lyr_2_4_hdrs {
struct ice_tc_vlan_hdr vlan_hdr;
struct ice_tc_vlan_hdr cvlan_hdr;
struct ice_tc_pppoe_hdr pppoe_hdr;
+ struct ice_tc_l2tpv3_hdr l2tpv3_hdr;
/* L3 (IPv4[6]) layer fields with their mask */
struct ice_tc_l3_hdr l3_key;
struct ice_tc_l3_hdr l3_mask;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index dd2285d4bef4..dbe80e5053a8 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -2258,8 +2258,10 @@ ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
/* Grab an open timestamp slot */
idx = ice_ptp_request_ts(tx_ring->tx_tstamps, skb);
- if (idx < 0)
+ if (idx < 0) {
+ tx_ring->vsi->back->ptp.tx_hwtstamp_skipped++;
return;
+ }
off->cd_qw1 |= (u64)(ICE_TX_DESC_DTYPE_CTX |
(ICE_TX_CTX_DESC_TSYN << ICE_TXD_CTX_QW1_CMD_S) |
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index ca902af54bb4..932b5661ec4d 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -295,10 +295,11 @@ struct ice_rx_ring {
struct xsk_buff_pool *xsk_pool;
struct sk_buff *skb;
dma_addr_t dma; /* physical address of ring */
-#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
u64 cached_phctime;
u8 dcb_tc; /* Traffic class of ring */
u8 ptp_rx;
+#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
+#define ICE_RX_FLAGS_CRC_STRIP_DIS BIT(2)
u8 flags;
} ____cacheline_internodealigned_in_smp;
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 861b64322959..e1abfcee96dc 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -347,6 +347,7 @@ struct ice_ts_func_info {
#define ICE_TS_DEV_ENA_M BIT(24)
#define ICE_TS_TMR0_ENA_M BIT(25)
#define ICE_TS_TMR1_ENA_M BIT(26)
+#define ICE_TS_LL_TX_TS_READ_M BIT(28)
struct ice_ts_dev_info {
/* Device specific info */
@@ -359,6 +360,7 @@ struct ice_ts_dev_info {
u8 ena;
u8 tmr0_ena;
u8 tmr1_ena;
+ u8 ts_ll_read;
};
/* Function specific capabilities */
@@ -564,6 +566,8 @@ enum ice_rl_type {
#define ICE_SCHED_INVAL_PROF_ID 0xFFFF
#define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024) /* in bytes (15k) */
+#define ICE_MAX_PORT_PER_PCI_DEV 8
+
/* Data structure for saving BW information */
enum ice_bw_type {
ICE_BW_TYPE_PRIO,
@@ -885,8 +889,6 @@ struct ice_hw {
/* INTRL granularity in 1 us */
u8 intrl_gran;
- u8 ucast_shared; /* true if VSIs can share unicast addr */
-
#define ICE_PHY_PER_NAC 1
#define ICE_MAX_QUAD 2
#define ICE_NUM_QUAD_TYPE 2
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index c14fc871dd41..e5f3e7680dc6 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -850,14 +850,14 @@ static void igb_get_drvinfo(struct net_device *netdev,
{
struct igb_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
/* EEPROM image version # is reported as firmware version # for
* 82575 controllers
*/
- strlcpy(drvinfo->fw_version, adapter->fw_version,
+ strscpy(drvinfo->fw_version, adapter->fw_version,
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = IGB_PRIV_FLAGS_STR_LEN;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 2796e81d2726..f8e32833226c 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1211,8 +1211,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi,
- igb_poll, 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
@@ -3138,7 +3137,7 @@ static s32 igb_init_i2c(struct igb_adapter *adapter)
adapter->i2c_algo.data = adapter;
adapter->i2c_adap.algo_data = &adapter->i2c_algo;
adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
- strlcpy(adapter->i2c_adap.name, "igb BB",
+ strscpy(adapter->i2c_adap.name, "igb BB",
sizeof(adapter->i2c_adap.name));
status = i2c_bit_add_bus(&adapter->i2c_adap);
return status;
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 9d4322b74163..83b97989a6bd 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -169,8 +169,8 @@ static void igbvf_get_drvinfo(struct net_device *netdev,
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, igbvf_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, igbvf_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index f4e91db89fe5..3a32809510fc 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1109,7 +1109,7 @@ static int igbvf_alloc_queues(struct igbvf_adapter *adapter)
return -ENOMEM;
}
- netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll, 64);
+ netif_napi_add(netdev, &adapter->rx_ring->napi, igbvf_poll);
return 0;
}
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index 5c66b97c0cfa..4f9d7f013a95 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -610,7 +610,6 @@
#define IGC_MDIC_OP_WRITE 0x04000000
#define IGC_MDIC_OP_READ 0x08000000
#define IGC_MDIC_READY 0x10000000
-#define IGC_MDIC_INT_EN 0x20000000
#define IGC_MDIC_ERROR 0x40000000
#define IGC_N0_QUEUE -1
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index ebff0e04045d..34889be63e78 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -2129,65 +2129,102 @@ static bool igc_alloc_rx_buffers_zc(struct igc_ring *ring, u16 count)
return ok;
}
-static int igc_xdp_init_tx_buffer(struct igc_tx_buffer *buffer,
- struct xdp_frame *xdpf,
- struct igc_ring *ring)
-{
- dma_addr_t dma;
-
- dma = dma_map_single(ring->dev, xdpf->data, xdpf->len, DMA_TO_DEVICE);
- if (dma_mapping_error(ring->dev, dma)) {
- netdev_err_once(ring->netdev, "Failed to map DMA for TX\n");
- return -ENOMEM;
- }
-
- buffer->type = IGC_TX_BUFFER_TYPE_XDP;
- buffer->xdpf = xdpf;
- buffer->protocol = 0;
- buffer->bytecount = xdpf->len;
- buffer->gso_segs = 1;
- buffer->time_stamp = jiffies;
- dma_unmap_len_set(buffer, len, xdpf->len);
- dma_unmap_addr_set(buffer, dma, dma);
- return 0;
-}
-
/* This function requires __netif_tx_lock is held by the caller. */
static int igc_xdp_init_tx_descriptor(struct igc_ring *ring,
struct xdp_frame *xdpf)
{
- struct igc_tx_buffer *buffer;
- union igc_adv_tx_desc *desc;
- u32 cmd_type, olinfo_status;
- int err;
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ u16 count, index = ring->next_to_use;
+ struct igc_tx_buffer *head = &ring->tx_buffer_info[index];
+ struct igc_tx_buffer *buffer = head;
+ union igc_adv_tx_desc *desc = IGC_TX_DESC(ring, index);
+ u32 olinfo_status, len = xdpf->len, cmd_type;
+ void *data = xdpf->data;
+ u16 i;
- if (!igc_desc_unused(ring))
+ count = TXD_USE_COUNT(len);
+ for (i = 0; i < nr_frags; i++)
+ count += TXD_USE_COUNT(skb_frag_size(&sinfo->frags[i]));
+
+ if (igc_maybe_stop_tx(ring, count + 3)) {
+ /* this is a hard error */
return -EBUSY;
+ }
- buffer = &ring->tx_buffer_info[ring->next_to_use];
- err = igc_xdp_init_tx_buffer(buffer, xdpf, ring);
- if (err)
- return err;
+ i = 0;
+ head->bytecount = xdp_get_frame_len(xdpf);
+ head->type = IGC_TX_BUFFER_TYPE_XDP;
+ head->gso_segs = 1;
+ head->xdpf = xdpf;
- cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
- IGC_ADVTXD_DCMD_IFCS | IGC_TXD_DCMD |
- buffer->bytecount;
- olinfo_status = buffer->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
-
- desc = IGC_TX_DESC(ring, ring->next_to_use);
- desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ olinfo_status = head->bytecount << IGC_ADVTXD_PAYLEN_SHIFT;
desc->read.olinfo_status = cpu_to_le32(olinfo_status);
- desc->read.buffer_addr = cpu_to_le64(dma_unmap_addr(buffer, dma));
- netdev_tx_sent_queue(txring_txq(ring), buffer->bytecount);
+ for (;;) {
+ dma_addr_t dma;
- buffer->next_to_watch = desc;
+ dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(ring->dev, dma)) {
+ netdev_err_once(ring->netdev,
+ "Failed to map DMA for TX\n");
+ goto unmap;
+ }
- ring->next_to_use++;
- if (ring->next_to_use == ring->count)
- ring->next_to_use = 0;
+ dma_unmap_len_set(buffer, len, len);
+ dma_unmap_addr_set(buffer, dma, dma);
+
+ cmd_type = IGC_ADVTXD_DTYP_DATA | IGC_ADVTXD_DCMD_DEXT |
+ IGC_ADVTXD_DCMD_IFCS | len;
+
+ desc->read.cmd_type_len = cpu_to_le32(cmd_type);
+ desc->read.buffer_addr = cpu_to_le64(dma);
+
+ buffer->protocol = 0;
+
+ if (++index == ring->count)
+ index = 0;
+
+ if (i == nr_frags)
+ break;
+
+ buffer = &ring->tx_buffer_info[index];
+ desc = IGC_TX_DESC(ring, index);
+ desc->read.olinfo_status = 0;
+
+ data = skb_frag_address(&sinfo->frags[i]);
+ len = skb_frag_size(&sinfo->frags[i]);
+ i++;
+ }
+ desc->read.cmd_type_len |= cpu_to_le32(IGC_TXD_DCMD);
+
+ netdev_tx_sent_queue(txring_txq(ring), head->bytecount);
+ /* set the timestamp */
+ head->time_stamp = jiffies;
+ /* set next_to_watch value indicating a packet is present */
+ head->next_to_watch = desc;
+ ring->next_to_use = index;
return 0;
+
+unmap:
+ for (;;) {
+ buffer = &ring->tx_buffer_info[index];
+ if (dma_unmap_len(buffer, len))
+ dma_unmap_page(ring->dev,
+ dma_unmap_addr(buffer, dma),
+ dma_unmap_len(buffer, len),
+ DMA_TO_DEVICE);
+ dma_unmap_len_set(buffer, len, 0);
+ if (buffer == head)
+ break;
+
+ if (!index)
+ index += ring->count;
+ index--;
+ }
+
+ return -ENOMEM;
}
static struct igc_ring *igc_xdp_get_tx_ring(struct igc_adapter *adapter,
@@ -2369,6 +2406,7 @@ static int igc_clean_rx_irq(struct igc_q_vector *q_vector, const int budget)
xdp_prepare_buff(&xdp, pktbuf - igc_rx_offset(rx_ring),
igc_rx_offset(rx_ring) + pkt_offset,
size, true);
+ xdp_buff_clear_frags_flag(&xdp);
skb = igc_xdp_run_prog(adapter, &xdp);
}
@@ -4356,8 +4394,7 @@ static int igc_alloc_q_vector(struct igc_adapter *adapter,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi,
- igc_poll, 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi, igc_poll);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index 46efcfab7234..efa980514944 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -456,9 +456,9 @@ ixgb_get_drvinfo(struct net_device *netdev,
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, ixgb_driver_name,
+ strscpy(drvinfo->driver, ixgb_driver_name,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 45be9a1ab6af..b4d47e7a76c8 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -414,7 +414,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &ixgb_netdev_ops;
ixgb_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- netif_napi_add(netdev, &adapter->napi, ixgb_clean, 64);
+ netif_napi_add(netdev, &adapter->napi, ixgb_clean);
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 04f453eabef6..e88e3dfac8c2 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1106,12 +1106,12 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->fw_version, adapter->eeprom_id,
+ strscpy(drvinfo->fw_version, adapter->eeprom_id,
sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
@@ -1964,15 +1964,13 @@ static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
frame_size >>= 1;
- data = kmap(rx_buffer->page) + rx_buffer->page_offset;
+ data = page_address(rx_buffer->page) + rx_buffer->page_offset;
if (data[3] != 0xFF ||
data[frame_size + 10] != 0xBE ||
data[frame_size + 12] != 0xAF)
match = false;
- kunmap(rx_buffer->page);
-
return match;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index 0fcd82036d4e..7311bd545acf 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -1004,7 +1004,7 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
ixgbe_driver_name,
UTS_RELEASE);
/* Firmware Version */
- strlcpy(info->firmware_version, adapter->eeprom_id,
+ strscpy(info->firmware_version, adapter->eeprom_id,
sizeof(info->firmware_version));
/* Model */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 86b11164655e..f8156fe4b1dc 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -874,8 +874,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
#endif
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi,
- ixgbe_poll, 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbe_poll);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index d1e430b8c8aa..298cfbfcb7b6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -10849,7 +10849,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->netdev_ops = &ixgbe_netdev_ops;
ixgbe_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
- strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
+ strscpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
/* Setup hw api */
hw->mac.ops = *ii->mac_ops;
@@ -11140,7 +11140,7 @@ skip_sriov:
err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
if (err)
- strlcpy(part_str, "Unknown", sizeof(part_str));
+ strscpy(part_str, "Unknown", sizeof(part_str));
if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, hw->phy.sfp_type,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 7f7ea468ffa9..2b00db92b08f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -3712,7 +3712,9 @@ struct ixgbe_info {
#define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200)
#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C)
#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C)
+#define IXGBE_KRM_AN_CNTL_4(P) ((P) ? 0x8238 : 0x4238)
#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248)
+#define IXGBE_KRM_PCS_KX_AN(P) ((P) ? 0x9918 : 0x5918)
#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0)
#define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C)
#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634)
@@ -3722,6 +3724,7 @@ struct ixgbe_info {
#define IXGBE_KRM_PMD_FLX_MASK_ST20(P) ((P) ? 0x9054 : 0x5054)
#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520)
#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00)
+#define IXGBE_KRM_FLX_TMRS_CTRL_ST31(P) ((P) ? 0x9180 : 0x5180)
#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20)
#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR BIT(20)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
index 35c2b9b8bd19..aa4bf6c9a2f7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
@@ -1721,9 +1721,59 @@ static s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed)
return IXGBE_ERR_LINK_SETUP;
}
- status = mac->ops.write_iosf_sb_reg(hw,
- IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
- IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* change mode enforcement rules to hybrid */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
+ reg_val |= 0x0400;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_FLX_TMRS_CTRL_ST31(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* manually control the config */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
+ reg_val |= 0x20002240;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* move the AN base page values */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
+ reg_val |= 0x1;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_PCS_KX_AN(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* set the AN37 over CB mode */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
+ reg_val |= 0x20000000;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_AN_CNTL_4(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
+
+ /* restart AN manually */
+ (void)mac->ops.read_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val);
+ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART;
+
+ (void)mac->ops.write_iosf_sb_reg(hw,
+ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id),
+ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val);
/* Toggle port SW reset by AN reset. */
status = ixgbe_restart_an_internal_phy_x550em(hw);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index fed46872af2b..ccfa6b91aac6 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -213,8 +213,8 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev,
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ strscpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
+ strscpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = IXGBEVF_PRIV_FLAGS_STR_LEN;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 2f12fbe229c1..99933e89717a 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2733,7 +2733,7 @@ static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
return -ENOMEM;
/* initialize NAPI */
- netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64);
+ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index f43d6616bc0d..1732ec3c3dbd 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -2332,9 +2332,9 @@ jme_get_drvinfo(struct net_device *netdev,
{
struct jme_adapter *jme = netdev_priv(netdev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info));
}
static int
@@ -3009,7 +3009,7 @@ jme_init_one(struct pci_dev *pdev,
jwrite32(jme, JME_APMC, apmc);
}
- netif_napi_add(netdev, &jme->napi, jme_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(netdev, &jme->napi, jme_poll);
spin_lock_init(&jme->phy_lock);
spin_lock_init(&jme->macaddr_lock);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index df9a8eefa007..2b9335cb4bb3 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -416,7 +416,8 @@ static void korina_abort_rx(struct net_device *dev)
}
/* transmit packet */
-static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t korina_send_packet(struct sk_buff *skb,
+ struct net_device *dev)
{
struct korina_private *lp = netdev_priv(dev);
u32 chain_prev, chain_next;
@@ -938,9 +939,9 @@ static void netdev_get_drvinfo(struct net_device *dev,
{
struct korina_private *lp = netdev_priv(dev);
- strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
- strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, lp->dev->name, sizeof(info->bus_info));
+ strscpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strscpy(info->version, DRV_VERSION, sizeof(info->version));
+ strscpy(info->bus_info, lp->dev->name, sizeof(info->bus_info));
}
static int netdev_get_link_ksettings(struct net_device *dev,
@@ -1354,7 +1355,7 @@ static int korina_probe(struct platform_device *pdev)
dev->netdev_ops = &korina_netdev_ops;
dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- netif_napi_add(dev, &lp->napi, korina_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &lp->napi, korina_poll);
lp->mii_if.dev = dev;
lp->mii_if.mdio_read = korina_mdio_read;
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 7cedbe1fdfd7..59aab4086dcc 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -470,7 +470,7 @@ ltq_etop_stop(struct net_device *dev)
return 0;
}
-static int
+static netdev_tx_t
ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
{
int queue = skb_get_queue_mapping(skb);
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index 57f27cc7724e..8d646c7f8c82 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -620,8 +620,7 @@ static int xrx200_probe(struct platform_device *pdev)
PMAC_HD_CTL);
/* setup NAPI */
- netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx);
netif_napi_add_tx(net_dev, &priv->chan_tx.napi,
xrx200_tx_housekeeping);
diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c
index fdd99f0de424..35f24e0f0934 100644
--- a/drivers/net/ethernet/litex/litex_liteeth.c
+++ b/drivers/net/ethernet/litex/litex_liteeth.c
@@ -152,7 +152,8 @@ static int liteeth_stop(struct net_device *netdev)
return 0;
}
-static int liteeth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+static netdev_tx_t liteeth_start_xmit(struct sk_buff *skb,
+ struct net_device *netdev)
{
struct liteeth *priv = netdev_priv(netdev);
void __iomem *txbuffer;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index b6be0552a6c1..707993b445d1 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1603,12 +1603,12 @@ mv643xx_eth_set_link_ksettings(struct net_device *dev,
static void mv643xx_eth_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, mv643xx_eth_driver_name,
+ strscpy(drvinfo->driver, mv643xx_eth_driver_name,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, mv643xx_eth_driver_version,
+ strscpy(drvinfo->version, mv643xx_eth_driver_version,
sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
- strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
+ strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strscpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
}
static int mv643xx_eth_get_coalesce(struct net_device *dev,
@@ -3183,7 +3183,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
- netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &mp->napi, mv643xx_eth_poll);
timer_setup(&mp->rx_oom, oom_timer_wrapper, 0);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 0caa2df87c04..ff3e361e06e7 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4656,11 +4656,11 @@ mvneta_ethtool_get_coalesce(struct net_device *dev,
static void mvneta_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, MVNETA_DRIVER_NAME,
+ strscpy(drvinfo->driver, MVNETA_DRIVER_NAME,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, MVNETA_DRIVER_VERSION,
+ strscpy(drvinfo->version, MVNETA_DRIVER_VERSION,
sizeof(drvinfo->version));
- strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
+ strscpy(drvinfo->bus_info, dev_name(&dev->dev),
sizeof(drvinfo->bus_info));
}
@@ -5600,14 +5600,13 @@ static int mvneta_probe(struct platform_device *pdev)
* operation, so only single NAPI should be initialized.
*/
if (pp->neta_armada3700) {
- netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &pp->napi, mvneta_poll);
} else {
for_each_present_cpu(cpu) {
struct mvneta_pcpu_port *port =
per_cpu_ptr(pp->ports, cpu);
- netif_napi_add(dev, &port->napi, mvneta_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(dev, &port->napi, mvneta_poll);
port->pp = pp;
}
}
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
index ad73a488fc5f..11e603686a27 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2.h
@@ -1530,6 +1530,7 @@ u32 mvpp2_read(struct mvpp2 *priv, u32 offset);
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name);
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv);
+void mvpp2_dbgfs_exit(void);
void mvpp23_rx_fifo_fc_en(struct mvpp2 *priv, int port, bool en);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
index 4a3baa7e0142..75e83ea2a926 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_debugfs.c
@@ -691,6 +691,13 @@ static int mvpp2_dbgfs_port_init(struct dentry *parent,
return 0;
}
+static struct dentry *mvpp2_root;
+
+void mvpp2_dbgfs_exit(void)
+{
+ debugfs_remove(mvpp2_root);
+}
+
void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
{
debugfs_remove_recursive(priv->dbgfs_dir);
@@ -700,10 +707,9 @@ void mvpp2_dbgfs_cleanup(struct mvpp2 *priv)
void mvpp2_dbgfs_init(struct mvpp2 *priv, const char *name)
{
- struct dentry *mvpp2_dir, *mvpp2_root;
+ struct dentry *mvpp2_dir;
int ret, i;
- mvpp2_root = debugfs_lookup(MVPP2_DRIVER_NAME, NULL);
if (!mvpp2_root)
mvpp2_root = debugfs_create_dir(MVPP2_DRIVER_NAME, NULL);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index b84128b549b4..eb0fb8128096 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -5425,11 +5425,11 @@ mvpp2_ethtool_get_coalesce(struct net_device *dev,
static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
+ strscpy(drvinfo->driver, MVPP2_DRIVER_NAME,
sizeof(drvinfo->driver));
- strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
+ strscpy(drvinfo->version, MVPP2_DRIVER_VERSION,
sizeof(drvinfo->version));
- strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
+ strscpy(drvinfo->bus_info, dev_name(&dev->dev),
sizeof(drvinfo->bus_info));
}
@@ -5770,8 +5770,7 @@ static int mvpp2_simple_queue_vectors_init(struct mvpp2_port *port,
v->irq = irq_of_parse_and_map(port_node, 0);
if (v->irq <= 0)
return -EINVAL;
- netif_napi_add(port->dev, &v->napi, mvpp2_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(port->dev, &v->napi, mvpp2_poll);
port->nqvecs = 1;
@@ -5831,8 +5830,7 @@ static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
goto err;
}
- netif_napi_add(port->dev, &v->napi, mvpp2_poll,
- NAPI_POLL_WEIGHT);
+ netif_napi_add(port->dev, &v->napi, mvpp2_poll);
}
return 0;
@@ -7706,7 +7704,18 @@ static struct platform_driver mvpp2_driver = {
},
};
-module_platform_driver(mvpp2_driver);
+static int __init mvpp2_driver_init(void)
+{
+ return platform_driver_register(&mvpp2_driver);
+}
+module_init(mvpp2_driver_init);
+
+static void __exit mvpp2_driver_exit(void)
+{
+ platform_driver_unregister(&mvpp2_driver);
+ mvpp2_dbgfs_exit();
+}
+module_exit(mvpp2_driver_exit);
MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
MODULE_AUTHOR("Marcin Wojtas ");
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 97f080c66dd4..9089adcb75f9 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -410,7 +410,7 @@ static void octep_napi_add(struct octep_device *oct)
for (i = 0; i < oct->num_oqs; i++) {
netdev_dbg(oct->netdev, "Adding NAPI on Q-%d\n", i);
netif_napi_add(oct->netdev, &oct->ioq_vector[i]->napi,
- octep_napi_poll, 64);
+ octep_napi_poll);
oct->oq[i]->napi = &oct->ioq_vector[i]->napi;
}
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
index d9ae0937d17a..392d9b0da0d7 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
@@ -158,8 +158,7 @@ static int octep_setup_oq(struct octep_device *oct, int q_no)
goto desc_dma_alloc_err;
}
- oq->buff_info = (struct octep_rx_buffer *)
- vzalloc(oq->max_count * OCTEP_OQ_RECVBUF_SIZE);
+ oq->buff_info = vzalloc(oq->max_count * OCTEP_OQ_RECVBUF_SIZE);
if (unlikely(!oq->buff_info)) {
dev_err(&oct->pdev->dev,
"Failed to allocate buffer info for OQ-%d\n", q_no);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 40203560b291..3cf4c8285c90 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -11,4 +11,4 @@ rvu_mbox-y := mbox.o rvu_trace.o
rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \
rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \
- rvu_sdp.o rvu_npc_hash.o
+ rvu_sdp.o rvu_npc_hash.o mcs.o mcs_rvu_if.o mcs_cnf10kb.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index d7762577e285..8d5d5a0f68c4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -293,20 +293,74 @@ M(NIX_BANDPROF_ALLOC, 0x801d, nix_bandprof_alloc, nix_bandprof_alloc_req, \
M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \
msg_rsp) \
M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
- nix_bandprof_get_hwinfo_rsp)
+ nix_bandprof_get_hwinfo_rsp) \
+/* MCS mbox IDs (range 0xA000 - 0xBFFF) */ \
+M(MCS_ALLOC_RESOURCES, 0xa000, mcs_alloc_resources, mcs_alloc_rsrc_req, \
+ mcs_alloc_rsrc_rsp) \
+M(MCS_FREE_RESOURCES, 0xa001, mcs_free_resources, mcs_free_rsrc_req, msg_rsp) \
+M(MCS_FLOWID_ENTRY_WRITE, 0xa002, mcs_flowid_entry_write, mcs_flowid_entry_write_req, \
+ msg_rsp) \
+M(MCS_SECY_PLCY_WRITE, 0xa003, mcs_secy_plcy_write, mcs_secy_plcy_write_req, \
+ msg_rsp) \
+M(MCS_RX_SC_CAM_WRITE, 0xa004, mcs_rx_sc_cam_write, mcs_rx_sc_cam_write_req, \
+ msg_rsp) \
+M(MCS_SA_PLCY_WRITE, 0xa005, mcs_sa_plcy_write, mcs_sa_plcy_write_req, \
+ msg_rsp) \
+M(MCS_TX_SC_SA_MAP_WRITE, 0xa006, mcs_tx_sc_sa_map_write, mcs_tx_sc_sa_map, \
+ msg_rsp) \
+M(MCS_RX_SC_SA_MAP_WRITE, 0xa007, mcs_rx_sc_sa_map_write, mcs_rx_sc_sa_map, \
+ msg_rsp) \
+M(MCS_FLOWID_ENA_ENTRY, 0xa008, mcs_flowid_ena_entry, mcs_flowid_ena_dis_entry, \
+ msg_rsp) \
+M(MCS_PN_TABLE_WRITE, 0xa009, mcs_pn_table_write, mcs_pn_table_write_req, \
+ msg_rsp) \
+M(MCS_SET_ACTIVE_LMAC, 0xa00a, mcs_set_active_lmac, mcs_set_active_lmac, \
+ msg_rsp) \
+M(MCS_GET_HW_INFO, 0xa00b, mcs_get_hw_info, msg_req, mcs_hw_info) \
+M(MCS_GET_FLOWID_STATS, 0xa00c, mcs_get_flowid_stats, mcs_stats_req, \
+ mcs_flowid_stats) \
+M(MCS_GET_SECY_STATS, 0xa00d, mcs_get_secy_stats, mcs_stats_req, \
+ mcs_secy_stats) \
+M(MCS_GET_SC_STATS, 0xa00e, mcs_get_sc_stats, mcs_stats_req, mcs_sc_stats) \
+M(MCS_GET_SA_STATS, 0xa00f, mcs_get_sa_stats, mcs_stats_req, mcs_sa_stats) \
+M(MCS_GET_PORT_STATS, 0xa010, mcs_get_port_stats, mcs_stats_req, \
+ mcs_port_stats) \
+M(MCS_CLEAR_STATS, 0xa011, mcs_clear_stats, mcs_clear_stats, msg_rsp) \
+M(MCS_INTR_CFG, 0xa012, mcs_intr_cfg, mcs_intr_cfg, msg_rsp) \
+M(MCS_SET_LMAC_MODE, 0xa013, mcs_set_lmac_mode, mcs_set_lmac_mode, msg_rsp) \
+M(MCS_SET_PN_THRESHOLD, 0xa014, mcs_set_pn_threshold, mcs_set_pn_threshold, \
+ msg_rsp) \
+M(MCS_ALLOC_CTRL_PKT_RULE, 0xa015, mcs_alloc_ctrl_pkt_rule, \
+ mcs_alloc_ctrl_pkt_rule_req, \
+ mcs_alloc_ctrl_pkt_rule_rsp) \
+M(MCS_FREE_CTRL_PKT_RULE, 0xa016, mcs_free_ctrl_pkt_rule, \
+ mcs_free_ctrl_pkt_rule_req, msg_rsp) \
+M(MCS_CTRL_PKT_RULE_WRITE, 0xa017, mcs_ctrl_pkt_rule_write, \
+ mcs_ctrl_pkt_rule_write_req, msg_rsp) \
+M(MCS_PORT_RESET, 0xa018, mcs_port_reset, mcs_port_reset_req, msg_rsp) \
+M(MCS_PORT_CFG_SET, 0xa019, mcs_port_cfg_set, mcs_port_cfg_set_req, msg_rsp)\
+M(MCS_PORT_CFG_GET, 0xa020, mcs_port_cfg_get, mcs_port_cfg_get_req, \
+ mcs_port_cfg_get_rsp) \
+M(MCS_CUSTOM_TAG_CFG_GET, 0xa021, mcs_custom_tag_cfg_get, \
+ mcs_custom_tag_cfg_get_req, \
+ mcs_custom_tag_cfg_get_rsp)
-/* Messages initiated by AF (range 0xC00 - 0xDFF) */
+/* Messages initiated by AF (range 0xC00 - 0xEFF) */
#define MBOX_UP_CGX_MESSAGES \
M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp)
#define MBOX_UP_CPT_MESSAGES \
M(CPT_INST_LMTST, 0xD00, cpt_inst_lmtst, cpt_inst_lmtst_req, msg_rsp)
+#define MBOX_UP_MCS_MESSAGES \
+M(MCS_INTR_NOTIFY, 0xE00, mcs_intr_notify, mcs_intr_info, msg_rsp)
+
enum {
#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
MBOX_MESSAGES
MBOX_UP_CGX_MESSAGES
MBOX_UP_CPT_MESSAGES
+MBOX_UP_MCS_MESSAGES
#undef M
};
@@ -1471,6 +1525,7 @@ enum ptp_op {
PTP_OP_GET_CLOCK = 1,
PTP_OP_GET_TSTMP = 2,
PTP_OP_SET_THRESH = 3,
+ PTP_OP_EXTTS_ON = 4,
};
struct ptp_req {
@@ -1478,6 +1533,7 @@ struct ptp_req {
u8 op;
s64 scaled_ppm;
u64 thresh;
+ int extts_on;
};
struct ptp_rsp {
@@ -1655,4 +1711,415 @@ enum cgx_af_status {
LMAC_AF_ERR_EXACT_MATCH_TBL_LOOK_UP_FAILED = -1110,
};
+enum mcs_direction {
+ MCS_RX,
+ MCS_TX,
+};
+
+enum mcs_rsrc_type {
+ MCS_RSRC_TYPE_FLOWID,
+ MCS_RSRC_TYPE_SECY,
+ MCS_RSRC_TYPE_SC,
+ MCS_RSRC_TYPE_SA,
+};
+
+struct mcs_alloc_rsrc_req {
+ struct mbox_msghdr hdr;
+ u8 rsrc_type;
+ u8 rsrc_cnt; /* Resources count */
+ u8 mcs_id; /* MCS block ID */
+ u8 dir; /* Macsec ingress or egress side */
+ u8 all; /* Allocate all resource type one each */
+ u64 rsvd;
+};
+
+struct mcs_alloc_rsrc_rsp {
+ struct mbox_msghdr hdr;
+ u8 flow_ids[128]; /* Index of reserved entries */
+ u8 secy_ids[128];
+ u8 sc_ids[128];
+ u8 sa_ids[256];
+ u8 rsrc_type;
+ u8 rsrc_cnt; /* No of entries reserved */
+ u8 mcs_id;
+ u8 dir;
+ u8 all;
+ u8 rsvd[256]; /* reserved fields for future expansion */
+};
+
+struct mcs_free_rsrc_req {
+ struct mbox_msghdr hdr;
+ u8 rsrc_id; /* Index of the entry to be freed */
+ u8 rsrc_type;
+ u8 mcs_id;
+ u8 dir;
+ u8 all; /* Free all the cam resources */
+ u64 rsvd;
+};
+
+struct mcs_flowid_entry_write_req {
+ struct mbox_msghdr hdr;
+ u64 data[4];
+ u64 mask[4];
+ u64 sci; /* CNF10K-B for tx_secy_mem_map */
+ u8 flow_id;
+ u8 secy_id; /* secyid for which flowid is mapped */
+ u8 sc_id; /* Valid if dir = MCS_TX, SC_CAM id mapped to flowid */
+ u8 ena; /* Enable tcam entry */
+ u8 ctrl_pkt;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_secy_plcy_write_req {
+ struct mbox_msghdr hdr;
+ u64 plcy;
+ u8 secy_id;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+/* RX SC_CAM mapping */
+struct mcs_rx_sc_cam_write_req {
+ struct mbox_msghdr hdr;
+ u64 sci; /* SCI */
+ u64 secy_id; /* secy index mapped to SC */
+ u8 sc_id; /* SC CAM entry index */
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_sa_plcy_write_req {
+ struct mbox_msghdr hdr;
+ u64 plcy[2][9]; /* Support 2 SA policy */
+ u8 sa_index[2];
+ u8 sa_cnt;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_tx_sc_sa_map {
+ struct mbox_msghdr hdr;
+ u8 sa_index0;
+ u8 sa_index1;
+ u8 rekey_ena;
+ u8 sa_index0_vld;
+ u8 sa_index1_vld;
+ u8 tx_sa_active;
+ u64 sectag_sci;
+ u8 sc_id; /* used as index for SA_MEM_MAP */
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_rx_sc_sa_map {
+ struct mbox_msghdr hdr;
+ u8 sa_index;
+ u8 sa_in_use;
+ u8 sc_id;
+ u8 an; /* value range 0-3, sc_id + an used as index SA_MEM_MAP */
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_flowid_ena_dis_entry {
+ struct mbox_msghdr hdr;
+ u8 flow_id;
+ u8 ena;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_pn_table_write_req {
+ struct mbox_msghdr hdr;
+ u64 next_pn;
+ u8 pn_id;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_hw_info {
+ struct mbox_msghdr hdr;
+ u8 num_mcs_blks; /* Number of MCS blocks */
+ u8 tcam_entries; /* RX/TX Tcam entries per mcs block */
+ u8 secy_entries; /* RX/TX SECY entries per mcs block */
+ u8 sc_entries; /* RX/TX SC CAM entries per mcs block */
+ u8 sa_entries; /* PN table entries = SA entries */
+ u64 rsvd[16];
+};
+
+struct mcs_set_active_lmac {
+ struct mbox_msghdr hdr;
+ u32 lmac_bmap; /* bitmap of active lmac per mcs block */
+ u8 mcs_id;
+ u16 chan_base; /* MCS channel base */
+ u64 rsvd;
+};
+
+struct mcs_set_lmac_mode {
+ struct mbox_msghdr hdr;
+ u8 mode; /* 1:Bypass 0:Operational */
+ u8 lmac_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_port_reset_req {
+ struct mbox_msghdr hdr;
+ u8 reset;
+ u8 mcs_id;
+ u8 port_id;
+ u64 rsvd;
+};
+
+struct mcs_port_cfg_set_req {
+ struct mbox_msghdr hdr;
+ u8 cstm_tag_rel_mode_sel;
+ u8 custom_hdr_enb;
+ u8 fifo_skid;
+ u8 port_mode;
+ u8 port_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_port_cfg_get_req {
+ struct mbox_msghdr hdr;
+ u8 port_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_port_cfg_get_rsp {
+ struct mbox_msghdr hdr;
+ u8 cstm_tag_rel_mode_sel;
+ u8 custom_hdr_enb;
+ u8 fifo_skid;
+ u8 port_mode;
+ u8 port_id;
+ u8 mcs_id;
+ u64 rsvd;
+};
+
+struct mcs_custom_tag_cfg_get_req {
+ struct mbox_msghdr hdr;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_custom_tag_cfg_get_rsp {
+ struct mbox_msghdr hdr;
+ u16 cstm_etype[8];
+ u8 cstm_indx[8];
+ u8 cstm_etype_en;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+/* MCS mailbox error codes
+ * Range 1201 - 1300.
+ */
+enum mcs_af_status {
+ MCS_AF_ERR_INVALID_MCSID = -1201,
+ MCS_AF_ERR_NOT_MAPPED = -1202,
+};
+
+struct mcs_set_pn_threshold {
+ struct mbox_msghdr hdr;
+ u64 threshold;
+ u8 xpn; /* '1' for setting xpn threshold */
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+enum mcs_ctrl_pkt_rulew_type {
+ MCS_CTRL_PKT_RULE_TYPE_ETH,
+ MCS_CTRL_PKT_RULE_TYPE_DA,
+ MCS_CTRL_PKT_RULE_TYPE_RANGE,
+ MCS_CTRL_PKT_RULE_TYPE_COMBO,
+ MCS_CTRL_PKT_RULE_TYPE_MAC,
+};
+
+struct mcs_alloc_ctrl_pkt_rule_req {
+ struct mbox_msghdr hdr;
+ u8 rule_type;
+ u8 mcs_id; /* MCS block ID */
+ u8 dir; /* Macsec ingress or egress side */
+ u64 rsvd;
+};
+
+struct mcs_alloc_ctrl_pkt_rule_rsp {
+ struct mbox_msghdr hdr;
+ u8 rule_idx;
+ u8 rule_type;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_free_ctrl_pkt_rule_req {
+ struct mbox_msghdr hdr;
+ u8 rule_idx;
+ u8 rule_type;
+ u8 mcs_id;
+ u8 dir;
+ u8 all;
+ u64 rsvd;
+};
+
+struct mcs_ctrl_pkt_rule_write_req {
+ struct mbox_msghdr hdr;
+ u64 data0;
+ u64 data1;
+ u64 data2;
+ u8 rule_idx;
+ u8 rule_type;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_stats_req {
+ struct mbox_msghdr hdr;
+ u8 id;
+ u8 mcs_id;
+ u8 dir;
+ u64 rsvd;
+};
+
+struct mcs_flowid_stats {
+ struct mbox_msghdr hdr;
+ u64 tcam_hit_cnt;
+ u64 rsvd;
+};
+
+struct mcs_secy_stats {
+ struct mbox_msghdr hdr;
+ u64 ctl_pkt_bcast_cnt;
+ u64 ctl_pkt_mcast_cnt;
+ u64 ctl_pkt_ucast_cnt;
+ u64 ctl_octet_cnt;
+ u64 unctl_pkt_bcast_cnt;
+ u64 unctl_pkt_mcast_cnt;
+ u64 unctl_pkt_ucast_cnt;
+ u64 unctl_octet_cnt;
+ /* Valid only for RX */
+ u64 octet_decrypted_cnt;
+ u64 octet_validated_cnt;
+ u64 pkt_port_disabled_cnt;
+ u64 pkt_badtag_cnt;
+ u64 pkt_nosa_cnt;
+ u64 pkt_nosaerror_cnt;
+ u64 pkt_tagged_ctl_cnt;
+ u64 pkt_untaged_cnt;
+ u64 pkt_ctl_cnt; /* CN10K-B */
+ u64 pkt_notag_cnt; /* CNF10K-B */
+ /* Valid only for TX */
+ u64 octet_encrypted_cnt;
+ u64 octet_protected_cnt;
+ u64 pkt_noactivesa_cnt;
+ u64 pkt_toolong_cnt;
+ u64 pkt_untagged_cnt;
+ u64 rsvd[4];
+};
+
+struct mcs_port_stats {
+ struct mbox_msghdr hdr;
+ u64 tcam_miss_cnt;
+ u64 parser_err_cnt;
+ u64 preempt_err_cnt; /* CNF10K-B */
+ u64 sectag_insert_err_cnt;
+ u64 rsvd[4];
+};
+
+/* Only for CN10K-B */
+struct mcs_sa_stats {
+ struct mbox_msghdr hdr;
+ /* RX */
+ u64 pkt_invalid_cnt;
+ u64 pkt_nosaerror_cnt;
+ u64 pkt_notvalid_cnt;
+ u64 pkt_ok_cnt;
+ u64 pkt_nosa_cnt;
+ /* TX */
+ u64 pkt_encrypt_cnt;
+ u64 pkt_protected_cnt;
+ u64 rsvd[4];
+};
+
+struct mcs_sc_stats {
+ struct mbox_msghdr hdr;
+ /* RX */
+ u64 hit_cnt;
+ u64 pkt_invalid_cnt;
+ u64 pkt_late_cnt;
+ u64 pkt_notvalid_cnt;
+ u64 pkt_unchecked_cnt;
+ u64 pkt_delay_cnt; /* CNF10K-B */
+ u64 pkt_ok_cnt; /* CNF10K-B */
+ u64 octet_decrypt_cnt; /* CN10K-B */
+ u64 octet_validate_cnt; /* CN10K-B */
+ /* TX */
+ u64 pkt_encrypt_cnt;
+ u64 pkt_protected_cnt;
+ u64 octet_encrypt_cnt; /* CN10K-B */
+ u64 octet_protected_cnt; /* CN10K-B */
+ u64 rsvd[4];
+};
+
+struct mcs_clear_stats {
+ struct mbox_msghdr hdr;
+#define MCS_FLOWID_STATS 0
+#define MCS_SECY_STATS 1
+#define MCS_SC_STATS 2
+#define MCS_SA_STATS 3
+#define MCS_PORT_STATS 4
+ u8 type; /* FLOWID, SECY, SC, SA, PORT */
+ u8 id; /* type = PORT, If id = FF(invalid) port no is derived from pcifunc */
+ u8 mcs_id;
+ u8 dir;
+ u8 all; /* All resources stats mapped to PF are cleared */
+};
+
+struct mcs_intr_cfg {
+ struct mbox_msghdr hdr;
+#define MCS_CPM_RX_SECTAG_V_EQ1_INT BIT_ULL(0)
+#define MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT BIT_ULL(1)
+#define MCS_CPM_RX_SECTAG_SL_GTE48_INT BIT_ULL(2)
+#define MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT BIT_ULL(3)
+#define MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT BIT_ULL(4)
+#define MCS_CPM_RX_PACKET_XPN_EQ0_INT BIT_ULL(5)
+#define MCS_CPM_RX_PN_THRESH_REACHED_INT BIT_ULL(6)
+#define MCS_CPM_TX_PACKET_XPN_EQ0_INT BIT_ULL(7)
+#define MCS_CPM_TX_PN_THRESH_REACHED_INT BIT_ULL(8)
+#define MCS_CPM_TX_SA_NOT_VALID_INT BIT_ULL(9)
+#define MCS_BBE_RX_DFIFO_OVERFLOW_INT BIT_ULL(10)
+#define MCS_BBE_RX_PLFIFO_OVERFLOW_INT BIT_ULL(11)
+#define MCS_BBE_TX_DFIFO_OVERFLOW_INT BIT_ULL(12)
+#define MCS_BBE_TX_PLFIFO_OVERFLOW_INT BIT_ULL(13)
+#define MCS_PAB_RX_CHAN_OVERFLOW_INT BIT_ULL(14)
+#define MCS_PAB_TX_CHAN_OVERFLOW_INT BIT_ULL(15)
+ u64 intr_mask; /* Interrupt enable mask */
+ u8 mcs_id;
+ u8 lmac_id;
+ u64 rsvd;
+};
+
+struct mcs_intr_info {
+ struct mbox_msghdr hdr;
+ u64 intr_mask;
+ int sa_id;
+ u8 mcs_id;
+ u8 lmac_id;
+ u64 rsvd;
+};
+
#endif /* MBOX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
new file mode 100644
index 000000000000..5ba618aed6ad
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.c
@@ -0,0 +1,1601 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include
+#include
+#include
+#include
+#include
+
+#include "mcs.h"
+#include "mcs_reg.h"
+
+#define DRV_NAME "Marvell MCS Driver"
+
+#define PCI_CFG_REG_BAR_NUM 0
+
+static const struct pci_device_id mcs_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_MCS) },
+ { 0, } /* end of table */
+};
+
+static LIST_HEAD(mcs_list);
+
+void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
+{
+ u64 reg;
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLBCPKTSX(id);
+ stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLMCPKTSX(id);
+ stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLOCTETSX(id);
+ stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLUCPKTSX(id);
+ stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLBCPKTSX(id);
+ stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLMCPKTSX(id);
+ stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLOCTETSX(id);
+ stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLUCPKTSX(id);
+ stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYENCRYPTEDX(id);
+ stats->octet_encrypted_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYPROTECTEDX(id);
+ stats->octet_protected_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYNOACTIVESAX(id);
+ stats->pkt_noactivesa_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYTOOLONGX(id);
+ stats->pkt_toolong_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYUNTAGGEDX(id);
+ stats->pkt_untagged_cnt = mcs_reg_read(mcs, reg);
+}
+
+void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id)
+{
+ u64 reg;
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLBCPKTSX(id);
+ stats->ctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLMCPKTSX(id);
+ stats->ctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLOCTETSX(id);
+ stats->ctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINCTLUCPKTSX(id);
+ stats->ctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLBCPKTSX(id);
+ stats->unctl_pkt_bcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLMCPKTSX(id);
+ stats->unctl_pkt_mcast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLOCTETSX(id);
+ stats->unctl_octet_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLUCPKTSX(id);
+ stats->unctl_pkt_ucast_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYDECRYPTEDX(id);
+ stats->octet_decrypted_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYVALIDATEX(id);
+ stats->octet_validated_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSCTRLPORTDISABLEDX(id);
+ stats->pkt_port_disabled_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYBADTAGX(id);
+ stats->pkt_badtag_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAX(id);
+ stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAERRORX(id);
+ stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(id);
+ stats->pkt_tagged_ctl_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(id);
+ stats->pkt_untaged_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(id);
+ stats->pkt_ctl_cnt = mcs_reg_read(mcs, reg);
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(id);
+ stats->pkt_notag_cnt = mcs_reg_read(mcs, reg);
+ }
+}
+
+void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats,
+ int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMHITX(id);
+ else
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMHITX(id);
+
+ stats->tcam_hit_cnt = mcs_reg_read(mcs, reg);
+}
+
+void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats,
+ int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMMISSX(id);
+ stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSPARSEERRX(id);
+ stats->parser_err_cnt = mcs_reg_read(mcs, reg);
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSEARLYPREEMPTERRX(id);
+ stats->preempt_err_cnt = mcs_reg_read(mcs, reg);
+ }
+ } else {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMMISSX(id);
+ stats->tcam_miss_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSPARSEERRX(id);
+ stats->parser_err_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECTAGINSERTIONERRX(id);
+ stats->sectag_insert_err_cnt = mcs_reg_read(mcs, reg);
+ }
+}
+
+void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(id);
+ stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(id);
+ stats->pkt_nosaerror_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(id);
+ stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAOKX(id);
+ stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSAUNUSEDSAX(id);
+ stats->pkt_nosa_cnt = mcs_reg_read(mcs, reg);
+ } else {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAENCRYPTEDX(id);
+ stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAPROTECTEDX(id);
+ stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
+ }
+}
+
+void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats,
+ int id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCCAMHITX(id);
+ stats->hit_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCINVALIDX(id);
+ stats->pkt_invalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(id);
+ stats->pkt_late_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(id);
+ stats->pkt_notvalid_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(id);
+ stats->pkt_unchecked_cnt = mcs_reg_read(mcs, reg);
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCDELAYEDX(id);
+ stats->pkt_delay_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INPKTSSCOKX(id);
+ stats->pkt_ok_cnt = mcs_reg_read(mcs, reg);
+ }
+ if (mcs->hw->mcs_blks == 1) {
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(id);
+ stats->octet_decrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(id);
+ stats->octet_validate_cnt = mcs_reg_read(mcs, reg);
+ }
+ } else {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCENCRYPTEDX(id);
+ stats->pkt_encrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCPROTECTEDX(id);
+ stats->pkt_protected_cnt = mcs_reg_read(mcs, reg);
+
+ if (mcs->hw->mcs_blks == 1) {
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCENCRYPTEDX(id);
+ stats->octet_encrypt_cnt = mcs_reg_read(mcs, reg);
+
+ reg = MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCPROTECTEDX(id);
+ stats->octet_protected_cnt = mcs_reg_read(mcs, reg);
+ }
+ }
+}
+
+void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir)
+{
+ struct mcs_flowid_stats flowid_st;
+ struct mcs_port_stats port_st;
+ struct mcs_secy_stats secy_st;
+ struct mcs_sc_stats sc_st;
+ struct mcs_sa_stats sa_st;
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CSE_RX_SLAVE_CTRL;
+ else
+ reg = MCSX_CSE_TX_SLAVE_CTRL;
+
+ mcs_reg_write(mcs, reg, BIT_ULL(0));
+
+ switch (type) {
+ case MCS_FLOWID_STATS:
+ mcs_get_flowid_stats(mcs, &flowid_st, id, dir);
+ break;
+ case MCS_SECY_STATS:
+ if (dir == MCS_RX)
+ mcs_get_rx_secy_stats(mcs, &secy_st, id);
+ else
+ mcs_get_tx_secy_stats(mcs, &secy_st, id);
+ break;
+ case MCS_SC_STATS:
+ mcs_get_sc_stats(mcs, &sc_st, id, dir);
+ break;
+ case MCS_SA_STATS:
+ mcs_get_sa_stats(mcs, &sa_st, id, dir);
+ break;
+ case MCS_PORT_STATS:
+ mcs_get_port_stats(mcs, &port_st, id, dir);
+ break;
+ }
+
+ mcs_reg_write(mcs, reg, 0x0);
+}
+
+int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir)
+{
+ struct mcs_rsrc_map *map;
+ int id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ /* Clear FLOWID stats */
+ for (id = 0; id < map->flow_ids.max; id++) {
+ if (map->flowid2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_FLOWID_STATS, id, dir);
+ }
+
+ /* Clear SECY stats */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->secy2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_SECY_STATS, id, dir);
+ }
+
+ /* Clear SC stats */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->sc2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_SC_STATS, id, dir);
+ }
+
+ /* Clear SA stats */
+ for (id = 0; id < map->sa.max; id++) {
+ if (map->sa2pf_map[id] != pcifunc)
+ continue;
+ mcs_clear_stats(mcs, MCS_SA_STATS, id, dir);
+ }
+ return 0;
+}
+
+void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CPM_RX_SLAVE_SA_PN_TABLE_MEMX(pn_id);
+ else
+ reg = MCSX_CPM_TX_SLAVE_SA_PN_TABLE_MEMX(pn_id);
+ mcs_reg_write(mcs, reg, next_pn);
+}
+
+void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
+{
+ u64 reg, val;
+
+ val = (map->sa_index0 & 0xFF) |
+ (map->sa_index1 & 0xFF) << 9 |
+ (map->rekey_ena & 0x1) << 18 |
+ (map->sa_index0_vld & 0x1) << 19 |
+ (map->sa_index1_vld & 0x1) << 20 |
+ (map->tx_sa_active & 0x1) << 21 |
+ map->sectag_sci << 22;
+ reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id);
+ mcs_reg_write(mcs, reg, val);
+
+ val = map->sectag_sci >> 42;
+ reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_1X(map->sc_id);
+ mcs_reg_write(mcs, reg, val);
+}
+
+void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
+{
+ u64 val, reg;
+
+ val = (map->sa_index & 0xFF) | map->sa_in_use << 9;
+
+ reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an);
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa_id, int dir)
+{
+ int reg_id;
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ for (reg_id = 0; reg_id < 8; reg_id++) {
+ reg = MCSX_CPM_RX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id);
+ mcs_reg_write(mcs, reg, plcy[reg_id]);
+ }
+ } else {
+ for (reg_id = 0; reg_id < 9; reg_id++) {
+ reg = MCSX_CPM_TX_SLAVE_SA_PLCY_MEMX(reg_id, sa_id);
+ mcs_reg_write(mcs, reg, plcy[reg_id]);
+ }
+ }
+}
+
+void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int sc_id, int ena)
+{
+ u64 reg, val;
+
+ reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(0);
+ if (sc_id > 63)
+ reg = MCSX_CPM_RX_SLAVE_SC_CAM_ENA(1);
+
+ if (ena)
+ val = mcs_reg_read(mcs, reg) | BIT_ULL(sc_id);
+ else
+ val = mcs_reg_read(mcs, reg) & ~BIT_ULL(sc_id);
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id)
+{
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(0, sc_id), sci);
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SC_CAMX(1, sc_id), secy);
+ /* Enable SC CAM */
+ mcs_ena_dis_sc_cam_entry(mcs, sc_id, true);
+}
+
+void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int secy_id, int dir)
+{
+ u64 reg;
+
+ if (dir == MCS_RX)
+ reg = MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_0X(secy_id);
+ else
+ reg = MCSX_CPM_TX_SLAVE_SECY_PLCY_MEMX(secy_id);
+
+ mcs_reg_write(mcs, reg, plcy);
+
+ if (mcs->hw->mcs_blks == 1 && dir == MCS_RX)
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_1X(secy_id), 0x0ull);
+}
+
+void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
+{
+ u64 reg, val;
+
+ val = (map->secy & 0x7F) | (map->ctrl_pkt & 0x1) << 8;
+ if (dir == MCS_RX) {
+ reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id);
+ } else {
+ val |= (map->sc & 0x7F) << 9;
+ reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id);
+ }
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_ena_dis_flowid_entry(struct mcs *mcs, int flow_id, int dir, int ena)
+{
+ u64 reg, val;
+
+ if (dir == MCS_RX) {
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_0;
+ if (flow_id > 63)
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_1;
+ } else {
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_0;
+ if (flow_id > 63)
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_1;
+ }
+
+ /* Enable/Disable the tcam entry */
+ if (ena)
+ val = mcs_reg_read(mcs, reg) | BIT_ULL(flow_id);
+ else
+ val = mcs_reg_read(mcs, reg) & ~BIT_ULL(flow_id);
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int flow_id, int dir)
+{
+ int reg_id;
+ u64 reg;
+
+ if (dir == MCS_RX) {
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, data[reg_id]);
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, mask[reg_id]);
+ }
+ } else {
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, data[reg_id]);
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, mask[reg_id]);
+ }
+ }
+}
+
+int mcs_install_flowid_bypass_entry(struct mcs *mcs)
+{
+ int flow_id, secy_id, reg_id;
+ struct secy_mem_map map;
+ u64 reg, plcy = 0;
+
+ /* Flow entry */
+ flow_id = mcs->hw->tcam_entries - MCS_RSRC_RSVD_CNT;
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
+ }
+ for (reg_id = 0; reg_id < 4; reg_id++) {
+ reg = MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(reg_id, flow_id);
+ mcs_reg_write(mcs, reg, GENMASK_ULL(63, 0));
+ }
+ /* secy */
+ secy_id = mcs->hw->secy_entries - MCS_RSRC_RSVD_CNT;
+
+ /* Set validate frames to NULL and enable control port */
+ plcy = 0x7ull;
+ if (mcs->hw->mcs_blks > 1)
+ plcy = BIT_ULL(0) | 0x3ull << 4;
+ mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_RX);
+
+ /* Enable control port and set mtu to max */
+ plcy = BIT_ULL(0) | GENMASK_ULL(43, 28);
+ if (mcs->hw->mcs_blks > 1)
+ plcy = BIT_ULL(0) | GENMASK_ULL(63, 48);
+ mcs_secy_plcy_write(mcs, plcy, secy_id, MCS_TX);
+
+ /* Map flowid to secy */
+ map.secy = secy_id;
+ map.ctrl_pkt = 0;
+ map.flow_id = flow_id;
+ mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_RX);
+ map.sc = secy_id;
+ mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, MCS_TX);
+
+ /* Enable Flowid entry */
+ mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_RX, true);
+ mcs_ena_dis_flowid_entry(mcs, flow_id, MCS_TX, true);
+ return 0;
+}
+
+void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir)
+{
+ struct mcs_rsrc_map *map;
+ int flow_id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ /* Clear secy memory to zero */
+ mcs_secy_plcy_write(mcs, 0, secy_id, dir);
+
+ /* Disable the tcam entry using this secy */
+ for (flow_id = 0; flow_id < map->flow_ids.max; flow_id++) {
+ if (map->flowid2secy_map[flow_id] != secy_id)
+ continue;
+ mcs_ena_dis_flowid_entry(mcs, flow_id, dir, false);
+ }
+}
+
+int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc)
+{
+ int rsrc_id;
+
+ if (!rsrc->bmap)
+ return -EINVAL;
+
+ rsrc_id = bitmap_find_next_zero_area(rsrc->bmap, rsrc->max, offset, 1, 0);
+ if (rsrc_id >= rsrc->max)
+ return -ENOSPC;
+
+ bitmap_set(rsrc->bmap, rsrc_id, 1);
+ pf_map[rsrc_id] = pcifunc;
+
+ return rsrc_id;
+}
+
+int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ u64 dis, reg;
+ int id, rc;
+
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE;
+ map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
+
+ if (req->all) {
+ for (id = 0; id < map->ctrlpktrule.max; id++) {
+ if (map->ctrlpktrule2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, id, pcifunc);
+ dis = mcs_reg_read(mcs, reg);
+ dis &= ~BIT_ULL(id);
+ mcs_reg_write(mcs, reg, dis);
+ }
+ return 0;
+ }
+
+ rc = mcs_free_rsrc(&map->ctrlpktrule, map->ctrlpktrule2pf_map, req->rule_idx, pcifunc);
+ dis = mcs_reg_read(mcs, reg);
+ dis &= ~BIT_ULL(req->rule_idx);
+ mcs_reg_write(mcs, reg, dis);
+
+ return rc;
+}
+
+int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req)
+{
+ u64 reg, enb;
+ u64 idx;
+
+ switch (req->rule_type) {
+ case MCS_CTRL_PKT_RULE_TYPE_ETH:
+ req->data0 &= GENMASK(15, 0);
+ if (req->data0 != ETH_P_PAE)
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_ETYPE_RULE_OFFSET;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(idx) :
+ MCSX_PEX_TX_SLAVE_RULE_ETYPE_CFGX(idx);
+
+ mcs_reg_write(mcs, reg, req->data0);
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_DA:
+ if (!(req->data0 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_DA_RULE_OFFSET;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_DAX(idx) :
+ MCSX_PEX_TX_SLAVE_RULE_DAX(idx);
+
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_RANGE:
+ if (!(req->data0 & BIT_ULL(40)) || !(req->data1 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_DA_RANGE_RULE_OFFSET;
+ if (req->dir == MCS_RX) {
+ reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ } else {
+ reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ }
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_COMBO:
+ req->data2 &= GENMASK(15, 0);
+ if (req->data2 != ETH_P_PAE || !(req->data0 & BIT_ULL(40)) ||
+ !(req->data1 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_COMBO_RULE_OFFSET;
+ if (req->dir == MCS_RX) {
+ reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_RX_SLAVE_RULE_COMBO_ETX(idx);
+ mcs_reg_write(mcs, reg, req->data2);
+ } else {
+ reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MINX(idx);
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_MAXX(idx);
+ mcs_reg_write(mcs, reg, req->data1 & GENMASK_ULL(47, 0));
+ reg = MCSX_PEX_TX_SLAVE_RULE_COMBO_ETX(idx);
+ mcs_reg_write(mcs, reg, req->data2);
+ }
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_MAC:
+ if (!(req->data0 & BIT_ULL(40)))
+ return -EINVAL;
+
+ idx = req->rule_idx - MCS_CTRLPKT_MAC_EN_RULE_OFFSET;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_MAC :
+ MCSX_PEX_TX_SLAVE_RULE_MAC;
+
+ mcs_reg_write(mcs, reg, req->data0 & GENMASK_ULL(47, 0));
+ break;
+ }
+
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_RULE_ENABLE : MCSX_PEX_TX_SLAVE_RULE_ENABLE;
+
+ enb = mcs_reg_read(mcs, reg);
+ enb |= BIT_ULL(req->rule_idx);
+ mcs_reg_write(mcs, reg, enb);
+
+ return 0;
+}
+
+int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc)
+{
+ /* Check if the rsrc_id is mapped to PF/VF */
+ if (pf_map[rsrc_id] != pcifunc)
+ return -EINVAL;
+
+ rvu_free_rsrc(rsrc, rsrc_id);
+ pf_map[rsrc_id] = 0;
+ return 0;
+}
+
+/* Free all the cam resources mapped to pf */
+int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc)
+{
+ struct mcs_rsrc_map *map;
+ int id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ /* free tcam entries */
+ for (id = 0; id < map->flow_ids.max; id++) {
+ if (map->flowid2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map,
+ id, pcifunc);
+ mcs_ena_dis_flowid_entry(mcs, id, dir, false);
+ }
+
+ /* free secy entries */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->secy2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->secy, map->secy2pf_map,
+ id, pcifunc);
+ mcs_clear_secy_plcy(mcs, id, dir);
+ }
+
+ /* free sc entries */
+ for (id = 0; id < map->secy.max; id++) {
+ if (map->sc2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->sc, map->sc2pf_map, id, pcifunc);
+
+ /* Disable SC CAM only on RX side */
+ if (dir == MCS_RX)
+ mcs_ena_dis_sc_cam_entry(mcs, id, false);
+ }
+
+ /* free sa entries */
+ for (id = 0; id < map->sa.max; id++) {
+ if (map->sa2pf_map[id] != pcifunc)
+ continue;
+ mcs_free_rsrc(&map->sa, map->sa2pf_map, id, pcifunc);
+ }
+ return 0;
+}
+
+int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc)
+{
+ int rsrc_id;
+
+ rsrc_id = rvu_alloc_rsrc(rsrc);
+ if (rsrc_id < 0)
+ return -ENOMEM;
+ pf_map[rsrc_id] = pcifunc;
+ return rsrc_id;
+}
+
+int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flow_id, u8 *secy_id,
+ u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir)
+{
+ struct mcs_rsrc_map *map;
+ int id;
+
+ if (dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *flow_id = id;
+
+ id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *secy_id = id;
+
+ id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *sc_id = id;
+
+ id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *sa1_id = id;
+
+ id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
+ if (id < 0)
+ return -ENOMEM;
+ *sa2_id = id;
+
+ return 0;
+}
+
+static void cn10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ struct rsrc_bmap *sc_bmap;
+ u64 val;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
+
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+
+ if (mcs->tx_sa_active[sc])
+ /* SA_index1 was used and got expired */
+ event.sa_id = (val >> 9) & 0xFF;
+ else
+ /* SA_index0 was used and got expired */
+ event.sa_id = val & 0xFF;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+static void cn10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ struct rsrc_bmap *sc_bmap;
+ u64 val, status;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT;
+
+ /* TX SA interrupt is raised only if autorekey is enabled.
+ * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if
+ * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies
+ * SA in SA_index1 got expired else SA in SA_index0 got expired.
+ */
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+ /* Auto rekey is enable */
+ if (!((val >> 18) & 0x1))
+ continue;
+
+ status = (val >> 21) & 0x1;
+
+ /* Check if tx_sa_active status had changed */
+ if (status == mcs->tx_sa_active[sc])
+ continue;
+ /* SA_index0 is expired */
+ if (status)
+ event.sa_id = val & 0xFF;
+ else
+ event.sa_id = (val >> 9) & 0xFF;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+static void mcs_rx_pn_thresh_reached_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ int sa, reg;
+ u64 intr;
+
+ /* Check expired SAs */
+ for (reg = 0; reg < (mcs->hw->sa_entries / 64); reg++) {
+ /* Bit high in *PN_THRESH_REACHEDX implies
+ * corresponding SAs are expired.
+ */
+ intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_PN_THRESH_REACHEDX(reg));
+ for (sa = 0; sa < 64; sa++) {
+ if (!(intr & BIT_ULL(sa)))
+ continue;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_RX_PN_THRESH_REACHED_INT;
+ event.sa_id = sa + (reg * 64);
+ event.pcifunc = mcs->rx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+ }
+}
+
+static void mcs_rx_misc_intr_handler(struct mcs *mcs, u64 intr)
+{
+ struct mcs_intr_event event = { 0 };
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ if (intr & MCS_CPM_RX_INT_SECTAG_V_EQ1)
+ event.intr_mask = MCS_CPM_RX_SECTAG_V_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_E_EQ0_C_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_SL_GTE48)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_SL_GTE48_INT;
+ if (intr & MCS_CPM_RX_INT_ES_EQ1_SC_EQ1)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_ES_EQ1_SC_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1)
+ event.intr_mask |= MCS_CPM_RX_SECTAG_SC_EQ1_SCB_EQ1_INT;
+ if (intr & MCS_CPM_RX_INT_PACKET_XPN_EQ0)
+ event.intr_mask |= MCS_CPM_RX_PACKET_XPN_EQ0_INT;
+
+ mcs_add_intr_wq_entry(mcs, &event);
+}
+
+static void mcs_tx_misc_intr_handler(struct mcs *mcs, u64 intr)
+{
+ struct mcs_intr_event event = { 0 };
+
+ if (!(intr & MCS_CPM_TX_INT_SA_NOT_VALID))
+ return;
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ event.intr_mask = MCS_CPM_TX_SA_NOT_VALID_INT;
+
+ mcs_add_intr_wq_entry(mcs, &event);
+}
+
+static void mcs_bbe_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
+{
+ struct mcs_intr_event event = { 0 };
+ int i;
+
+ if (!(intr & MCS_BBE_INT_MASK))
+ return;
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ for (i = 0; i < MCS_MAX_BBE_INT; i++) {
+ if (!(intr & BIT_ULL(i)))
+ continue;
+
+ /* Lower nibble denotes data fifo overflow interrupts and
+ * upper nibble indicates policy fifo overflow interrupts.
+ */
+ if (intr & 0xFULL)
+ event.intr_mask = (dir == MCS_RX) ?
+ MCS_BBE_RX_DFIFO_OVERFLOW_INT :
+ MCS_BBE_TX_DFIFO_OVERFLOW_INT;
+ else
+ event.intr_mask = (dir == MCS_RX) ?
+ MCS_BBE_RX_PLFIFO_OVERFLOW_INT :
+ MCS_BBE_RX_PLFIFO_OVERFLOW_INT;
+
+ /* Notify the lmac_id info which ran into BBE fatal error */
+ event.lmac_id = i & 0x3ULL;
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+static void mcs_pab_intr_handler(struct mcs *mcs, u64 intr, enum mcs_direction dir)
+{
+ struct mcs_intr_event event = { 0 };
+ int i;
+
+ if (!(intr & MCS_PAB_INT_MASK))
+ return;
+
+ event.mcs_id = mcs->mcs_id;
+ event.pcifunc = mcs->pf_map[0];
+
+ for (i = 0; i < MCS_MAX_PAB_INT; i++) {
+ if (!(intr & BIT_ULL(i)))
+ continue;
+
+ event.intr_mask = (dir == MCS_RX) ? MCS_PAB_RX_CHAN_OVERFLOW_INT :
+ MCS_PAB_TX_CHAN_OVERFLOW_INT;
+
+ /* Notify the lmac_id info which ran into PAB fatal error */
+ event.lmac_id = i;
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+static irqreturn_t mcs_ip_intr_handler(int irq, void *mcs_irq)
+{
+ struct mcs *mcs = (struct mcs *)mcs_irq;
+ u64 intr, cpm_intr, bbe_intr, pab_intr;
+
+ /* Disable and clear the interrupt */
+ mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1C, BIT_ULL(0));
+ mcs_reg_write(mcs, MCSX_IP_INT, BIT_ULL(0));
+
+ /* Check which block has interrupt*/
+ intr = mcs_reg_read(mcs, MCSX_TOP_SLAVE_INT_SUM);
+
+ /* CPM RX */
+ if (intr & MCS_CPM_RX_INT_ENA) {
+ /* Check for PN thresh interrupt bit */
+ cpm_intr = mcs_reg_read(mcs, MCSX_CPM_RX_SLAVE_RX_INT);
+
+ if (cpm_intr & MCS_CPM_RX_INT_PN_THRESH_REACHED)
+ mcs_rx_pn_thresh_reached_handler(mcs);
+
+ if (cpm_intr & MCS_CPM_RX_INT_ALL)
+ mcs_rx_misc_intr_handler(mcs, cpm_intr);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT, cpm_intr);
+ }
+
+ /* CPM TX */
+ if (intr & MCS_CPM_TX_INT_ENA) {
+ cpm_intr = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_INT);
+
+ if (cpm_intr & MCS_CPM_TX_INT_PN_THRESH_REACHED) {
+ if (mcs->hw->mcs_blks > 1)
+ cnf10kb_mcs_tx_pn_thresh_reached_handler(mcs);
+ else
+ cn10kb_mcs_tx_pn_thresh_reached_handler(mcs);
+ }
+
+ if (cpm_intr & MCS_CPM_TX_INT_SA_NOT_VALID)
+ mcs_tx_misc_intr_handler(mcs, cpm_intr);
+
+ if (cpm_intr & MCS_CPM_TX_INT_PACKET_XPN_EQ0) {
+ if (mcs->hw->mcs_blks > 1)
+ cnf10kb_mcs_tx_pn_wrapped_handler(mcs);
+ else
+ cn10kb_mcs_tx_pn_wrapped_handler(mcs);
+ }
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT, cpm_intr);
+ }
+
+ /* BBE RX */
+ if (intr & MCS_BBE_RX_INT_ENA) {
+ bbe_intr = mcs_reg_read(mcs, MCSX_BBE_RX_SLAVE_BBE_INT);
+ mcs_bbe_intr_handler(mcs, bbe_intr, MCS_RX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT, bbe_intr);
+ }
+
+ /* BBE TX */
+ if (intr & MCS_BBE_TX_INT_ENA) {
+ bbe_intr = mcs_reg_read(mcs, MCSX_BBE_TX_SLAVE_BBE_INT);
+ mcs_bbe_intr_handler(mcs, bbe_intr, MCS_TX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT, bbe_intr);
+ }
+
+ /* PAB RX */
+ if (intr & MCS_PAB_RX_INT_ENA) {
+ pab_intr = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PAB_INT);
+ mcs_pab_intr_handler(mcs, pab_intr, MCS_RX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT, pab_intr);
+ }
+
+ /* PAB TX */
+ if (intr & MCS_PAB_TX_INT_ENA) {
+ pab_intr = mcs_reg_read(mcs, MCSX_PAB_TX_SLAVE_PAB_INT);
+ mcs_pab_intr_handler(mcs, pab_intr, MCS_TX);
+
+ /* Clear the interrupt */
+ mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW, 0);
+ mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT, pab_intr);
+ }
+
+ /* Enable the interrupt */
+ mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
+
+ return IRQ_HANDLED;
+}
+
+static void *alloc_mem(struct mcs *mcs, int n)
+{
+ return devm_kcalloc(mcs->dev, n, sizeof(u16), GFP_KERNEL);
+}
+
+static int mcs_alloc_struct_mem(struct mcs *mcs, struct mcs_rsrc_map *res)
+{
+ struct hwinfo *hw = mcs->hw;
+ int err;
+
+ res->flowid2pf_map = alloc_mem(mcs, hw->tcam_entries);
+ if (!res->flowid2pf_map)
+ return -ENOMEM;
+
+ res->secy2pf_map = alloc_mem(mcs, hw->secy_entries);
+ if (!res->secy2pf_map)
+ return -ENOMEM;
+
+ res->sc2pf_map = alloc_mem(mcs, hw->sc_entries);
+ if (!res->sc2pf_map)
+ return -ENOMEM;
+
+ res->sa2pf_map = alloc_mem(mcs, hw->sa_entries);
+ if (!res->sa2pf_map)
+ return -ENOMEM;
+
+ res->flowid2secy_map = alloc_mem(mcs, hw->tcam_entries);
+ if (!res->flowid2secy_map)
+ return -ENOMEM;
+
+ res->ctrlpktrule2pf_map = alloc_mem(mcs, MCS_MAX_CTRLPKT_RULES);
+ if (!res->ctrlpktrule2pf_map)
+ return -ENOMEM;
+
+ res->flow_ids.max = hw->tcam_entries - MCS_RSRC_RSVD_CNT;
+ err = rvu_alloc_bitmap(&res->flow_ids);
+ if (err)
+ return err;
+
+ res->secy.max = hw->secy_entries - MCS_RSRC_RSVD_CNT;
+ err = rvu_alloc_bitmap(&res->secy);
+ if (err)
+ return err;
+
+ res->sc.max = hw->sc_entries;
+ err = rvu_alloc_bitmap(&res->sc);
+ if (err)
+ return err;
+
+ res->sa.max = hw->sa_entries;
+ err = rvu_alloc_bitmap(&res->sa);
+ if (err)
+ return err;
+
+ res->ctrlpktrule.max = MCS_MAX_CTRLPKT_RULES;
+ err = rvu_alloc_bitmap(&res->ctrlpktrule);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int mcs_register_interrupts(struct mcs *mcs)
+{
+ int ret = 0;
+
+ mcs->num_vec = pci_msix_vec_count(mcs->pdev);
+
+ ret = pci_alloc_irq_vectors(mcs->pdev, mcs->num_vec,
+ mcs->num_vec, PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_err(mcs->dev, "MCS Request for %d msix vector failed err:%d\n",
+ mcs->num_vec, ret);
+ return ret;
+ }
+
+ ret = request_irq(pci_irq_vector(mcs->pdev, MCS_INT_VEC_IP),
+ mcs_ip_intr_handler, 0, "MCS_IP", mcs);
+ if (ret) {
+ dev_err(mcs->dev, "MCS IP irq registration failed\n");
+ goto exit;
+ }
+
+ /* MCS enable IP interrupts */
+ mcs_reg_write(mcs, MCSX_IP_INT_ENA_W1S, BIT_ULL(0));
+
+ /* Enable CPM Rx/Tx interrupts */
+ mcs_reg_write(mcs, MCSX_TOP_SLAVE_INT_SUM_ENB,
+ MCS_CPM_RX_INT_ENA | MCS_CPM_TX_INT_ENA |
+ MCS_BBE_RX_INT_ENA | MCS_BBE_TX_INT_ENA |
+ MCS_PAB_RX_INT_ENA | MCS_PAB_TX_INT_ENA);
+
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_INT_ENB, 0x7ULL);
+ mcs_reg_write(mcs, MCSX_CPM_RX_SLAVE_RX_INT_ENB, 0x7FULL);
+
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_BBE_INT_ENB, 0xff);
+ mcs_reg_write(mcs, MCSX_BBE_TX_SLAVE_BBE_INT_ENB, 0xff);
+
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PAB_INT_ENB, 0xff);
+ mcs_reg_write(mcs, MCSX_PAB_TX_SLAVE_PAB_INT_ENB, 0xff);
+
+ mcs->tx_sa_active = alloc_mem(mcs, mcs->hw->sc_entries);
+ if (!mcs->tx_sa_active)
+ goto exit;
+
+ return ret;
+exit:
+ pci_free_irq_vectors(mcs->pdev);
+ mcs->num_vec = 0;
+ return ret;
+}
+
+int mcs_get_blkcnt(void)
+{
+ struct mcs *mcs;
+ int idmax = -ENODEV;
+
+ /* Check MCS block is present in hardware */
+ if (!pci_dev_present(mcs_id_table))
+ return 0;
+
+ list_for_each_entry(mcs, &mcs_list, mcs_list)
+ if (mcs->mcs_id > idmax)
+ idmax = mcs->mcs_id;
+
+ if (idmax < 0)
+ return 0;
+
+ return idmax + 1;
+}
+
+struct mcs *mcs_get_pdata(int mcs_id)
+{
+ struct mcs *mcs_dev;
+
+ list_for_each_entry(mcs_dev, &mcs_list, mcs_list) {
+ if (mcs_dev->mcs_id == mcs_id)
+ return mcs_dev;
+ }
+ return NULL;
+}
+
+void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req)
+{
+ u64 val = 0;
+
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id),
+ req->port_mode & MCS_PORT_MODE_MASK);
+
+ req->cstm_tag_rel_mode_sel &= 0x3;
+
+ if (mcs->hw->mcs_blks > 1) {
+ req->fifo_skid &= MCS_PORT_FIFO_SKID_MASK;
+ val = (u32)req->fifo_skid << 0x10;
+ val |= req->fifo_skid;
+ mcs_reg_write(mcs, MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id), val);
+ mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id),
+ req->cstm_tag_rel_mode_sel);
+ val = mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION);
+
+ if (req->custom_hdr_enb)
+ val |= BIT_ULL(req->port_id);
+ else
+ val &= ~BIT_ULL(req->port_id);
+
+ mcs_reg_write(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION, val);
+ } else {
+ val = mcs_reg_read(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id));
+ val |= (req->cstm_tag_rel_mode_sel << 2);
+ mcs_reg_write(mcs, MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id), val);
+ }
+}
+
+void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req,
+ struct mcs_port_cfg_get_rsp *rsp)
+{
+ u64 reg = 0;
+
+ rsp->port_mode = mcs_reg_read(mcs, MCSX_PAB_RX_SLAVE_PORT_CFGX(req->port_id)) &
+ MCS_PORT_MODE_MASK;
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(req->port_id);
+ rsp->fifo_skid = mcs_reg_read(mcs, reg) & MCS_PORT_FIFO_SKID_MASK;
+ reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(req->port_id);
+ rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) & 0x3;
+ if (mcs_reg_read(mcs, MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION) & BIT_ULL(req->port_id))
+ rsp->custom_hdr_enb = 1;
+ } else {
+ reg = MCSX_PEX_TX_SLAVE_PORT_CONFIG(req->port_id);
+ rsp->cstm_tag_rel_mode_sel = mcs_reg_read(mcs, reg) >> 2;
+ }
+
+ rsp->port_id = req->port_id;
+ rsp->mcs_id = req->mcs_id;
+}
+
+void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req,
+ struct mcs_custom_tag_cfg_get_rsp *rsp)
+{
+ u64 reg = 0, val = 0;
+ u8 idx;
+
+ for (idx = 0; idx < MCS_MAX_CUSTOM_TAGS; idx++) {
+ if (mcs->hw->mcs_blks > 1)
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(idx) :
+ MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(idx);
+ else
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_VLAN_CFGX(idx) :
+ MCSX_PEX_TX_SLAVE_VLAN_CFGX(idx);
+
+ val = mcs_reg_read(mcs, reg);
+ if (mcs->hw->mcs_blks > 1) {
+ rsp->cstm_etype[idx] = val & GENMASK(15, 0);
+ rsp->cstm_indx[idx] = (val >> 0x16) & 0x3;
+ reg = (req->dir == MCS_RX) ? MCSX_PEX_RX_SLAVE_ETYPE_ENABLE :
+ MCSX_PEX_TX_SLAVE_ETYPE_ENABLE;
+ rsp->cstm_etype_en = mcs_reg_read(mcs, reg) & 0xFF;
+ } else {
+ rsp->cstm_etype[idx] = (val >> 0x1) & GENMASK(15, 0);
+ rsp->cstm_indx[idx] = (val >> 0x11) & 0x3;
+ rsp->cstm_etype_en |= (val & 0x1) << idx;
+ }
+ }
+
+ rsp->mcs_id = req->mcs_id;
+ rsp->dir = req->dir;
+}
+
+void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset)
+{
+ u64 reg = MCSX_MCS_TOP_SLAVE_PORT_RESET(port_id);
+
+ mcs_reg_write(mcs, reg, reset & 0x1);
+}
+
+/* Set lmac to bypass/operational mode */
+void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode)
+{
+ u64 reg;
+
+ reg = MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(lmac_id * 2);
+ mcs_reg_write(mcs, reg, (u64)mode);
+}
+
+void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn)
+{
+ u64 reg;
+
+ if (pn->dir == MCS_RX)
+ reg = pn->xpn ? MCSX_CPM_RX_SLAVE_XPN_THRESHOLD : MCSX_CPM_RX_SLAVE_PN_THRESHOLD;
+ else
+ reg = pn->xpn ? MCSX_CPM_TX_SLAVE_XPN_THRESHOLD : MCSX_CPM_TX_SLAVE_PN_THRESHOLD;
+
+ mcs_reg_write(mcs, reg, pn->threshold);
+}
+
+void cn10kb_mcs_parser_cfg(struct mcs *mcs)
+{
+ u64 reg, val;
+
+ /* VLAN CTag */
+ val = BIT_ULL(0) | (0x8100ull & 0xFFFF) << 1 | BIT_ULL(17);
+ /* RX */
+ reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ /* TX */
+ reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ /* VLAN STag */
+ val = BIT_ULL(0) | (0x88a8ull & 0xFFFF) << 1 | BIT_ULL(18);
+ /* RX */
+ reg = MCSX_PEX_RX_SLAVE_VLAN_CFGX(1);
+ mcs_reg_write(mcs, reg, val);
+
+ /* TX */
+ reg = MCSX_PEX_TX_SLAVE_VLAN_CFGX(1);
+ mcs_reg_write(mcs, reg, val);
+}
+
+static void mcs_lmac_init(struct mcs *mcs, int lmac_id)
+{
+ u64 reg;
+
+ /* Port mode 25GB */
+ reg = MCSX_PAB_RX_SLAVE_PORT_CFGX(lmac_id);
+ mcs_reg_write(mcs, reg, 0);
+
+ if (mcs->hw->mcs_blks > 1) {
+ reg = MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(lmac_id);
+ mcs_reg_write(mcs, reg, 0xe000e);
+ return;
+ }
+
+ reg = MCSX_PAB_TX_SLAVE_PORT_CFGX(lmac_id);
+ mcs_reg_write(mcs, reg, 0);
+}
+
+int mcs_set_lmac_channels(int mcs_id, u16 base)
+{
+ struct mcs *mcs;
+ int lmac;
+ u64 cfg;
+
+ mcs = mcs_get_pdata(mcs_id);
+ if (!mcs)
+ return -ENODEV;
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++) {
+ cfg = mcs_reg_read(mcs, MCSX_LINK_LMACX_CFG(lmac));
+ cfg &= ~(MCSX_LINK_LMAC_BASE_MASK | MCSX_LINK_LMAC_RANGE_MASK);
+ cfg |= FIELD_PREP(MCSX_LINK_LMAC_RANGE_MASK, ilog2(16));
+ cfg |= FIELD_PREP(MCSX_LINK_LMAC_BASE_MASK, base);
+ mcs_reg_write(mcs, MCSX_LINK_LMACX_CFG(lmac), cfg);
+ base += 16;
+ }
+ return 0;
+}
+
+static int mcs_x2p_calibration(struct mcs *mcs)
+{
+ unsigned long timeout = jiffies + usecs_to_jiffies(20000);
+ int i, err = 0;
+ u64 val;
+
+ /* set X2P calibration */
+ val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
+ val |= BIT_ULL(5);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+
+ /* Wait for calibration to complete */
+ while (!(mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS) & BIT_ULL(0))) {
+ if (time_before(jiffies, timeout)) {
+ usleep_range(80, 100);
+ continue;
+ } else {
+ err = -EBUSY;
+ dev_err(mcs->dev, "MCS X2P calibration failed..ignoring\n");
+ return err;
+ }
+ }
+
+ val = mcs_reg_read(mcs, MCSX_MIL_RX_GBL_STATUS);
+ for (i = 0; i < mcs->hw->mcs_x2p_intf; i++) {
+ if (val & BIT_ULL(1 + i))
+ continue;
+ err = -EBUSY;
+ dev_err(mcs->dev, "MCS:%d didn't respond to X2P calibration\n", i);
+ }
+ /* Clear X2P calibrate */
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, mcs_reg_read(mcs, MCSX_MIL_GLOBAL) & ~BIT_ULL(5));
+
+ return err;
+}
+
+static void mcs_set_external_bypass(struct mcs *mcs, u8 bypass)
+{
+ u64 val;
+
+ /* Set MCS to external bypass */
+ val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
+ if (bypass)
+ val |= BIT_ULL(6);
+ else
+ val &= ~BIT_ULL(6);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+}
+
+static void mcs_global_cfg(struct mcs *mcs)
+{
+ /* Disable external bypass */
+ mcs_set_external_bypass(mcs, false);
+
+ /* Reset TX/RX stats memory */
+ mcs_reg_write(mcs, MCSX_CSE_RX_SLAVE_STATS_CLEAR, 0x1F);
+ mcs_reg_write(mcs, MCSX_CSE_TX_SLAVE_STATS_CLEAR, 0x1F);
+
+ /* Set MCS to perform standard IEEE802.1AE macsec processing */
+ if (mcs->hw->mcs_blks == 1) {
+ mcs_reg_write(mcs, MCSX_IP_MODE, BIT_ULL(3));
+ return;
+ }
+
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_ENTRY, 0xe4);
+ mcs_reg_write(mcs, MCSX_BBE_RX_SLAVE_CAL_LEN, 4);
+}
+
+void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs)
+{
+ struct hwinfo *hw = mcs->hw;
+
+ hw->tcam_entries = 128; /* TCAM entries */
+ hw->secy_entries = 128; /* SecY entries */
+ hw->sc_entries = 128; /* SC CAM entries */
+ hw->sa_entries = 256; /* SA entries */
+ hw->lmac_cnt = 20; /* lmacs/ports per mcs block */
+ hw->mcs_x2p_intf = 5; /* x2p clabration intf */
+ hw->mcs_blks = 1; /* MCS blocks */
+}
+
+static struct mcs_ops cn10kb_mcs_ops = {
+ .mcs_set_hw_capabilities = cn10kb_mcs_set_hw_capabilities,
+ .mcs_parser_cfg = cn10kb_mcs_parser_cfg,
+ .mcs_tx_sa_mem_map_write = cn10kb_mcs_tx_sa_mem_map_write,
+ .mcs_rx_sa_mem_map_write = cn10kb_mcs_rx_sa_mem_map_write,
+ .mcs_flowid_secy_map = cn10kb_mcs_flowid_secy_map,
+};
+
+static int mcs_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ int lmac, err = 0;
+ struct mcs *mcs;
+
+ mcs = devm_kzalloc(dev, sizeof(*mcs), GFP_KERNEL);
+ if (!mcs)
+ return -ENOMEM;
+
+ mcs->hw = devm_kzalloc(dev, sizeof(struct hwinfo), GFP_KERNEL);
+ if (!mcs->hw)
+ return -ENOMEM;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto exit;
+ }
+
+ mcs->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!mcs->reg_base) {
+ dev_err(dev, "mcs: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto exit;
+ }
+
+ pci_set_drvdata(pdev, mcs);
+ mcs->pdev = pdev;
+ mcs->dev = &pdev->dev;
+
+ if (pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B)
+ mcs->mcs_ops = &cn10kb_mcs_ops;
+ else
+ mcs->mcs_ops = cnf10kb_get_mac_ops();
+
+ /* Set hardware capabilities */
+ mcs->mcs_ops->mcs_set_hw_capabilities(mcs);
+
+ mcs_global_cfg(mcs);
+
+ /* Perform X2P clibration */
+ err = mcs_x2p_calibration(mcs);
+ if (err)
+ goto err_x2p;
+
+ mcs->mcs_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24)
+ & MCS_ID_MASK;
+
+ /* Set mcs tx side resources */
+ err = mcs_alloc_struct_mem(mcs, &mcs->tx);
+ if (err)
+ goto err_x2p;
+
+ /* Set mcs rx side resources */
+ err = mcs_alloc_struct_mem(mcs, &mcs->rx);
+ if (err)
+ goto err_x2p;
+
+ /* per port config */
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
+ mcs_lmac_init(mcs, lmac);
+
+ /* Parser configuration */
+ mcs->mcs_ops->mcs_parser_cfg(mcs);
+
+ err = mcs_register_interrupts(mcs);
+ if (err)
+ goto exit;
+
+ list_add(&mcs->mcs_list, &mcs_list);
+ mutex_init(&mcs->stats_lock);
+
+ return 0;
+
+err_x2p:
+ /* Enable external bypass */
+ mcs_set_external_bypass(mcs, true);
+exit:
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void mcs_remove(struct pci_dev *pdev)
+{
+ struct mcs *mcs = pci_get_drvdata(pdev);
+
+ /* Set MCS to external bypass */
+ mcs_set_external_bypass(mcs, true);
+ pci_free_irq_vectors(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+struct pci_driver mcs_driver = {
+ .name = DRV_NAME,
+ .id_table = mcs_id_table,
+ .probe = mcs_probe,
+ .remove = mcs_remove,
+};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
new file mode 100644
index 000000000000..64dc2b80e15d
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs.h
@@ -0,0 +1,246 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell CN10K MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#ifndef MCS_H
+#define MCS_H
+
+#include
+#include "rvu.h"
+
+#define PCI_DEVID_CN10K_MCS 0xA096
+
+#define MCSX_LINK_LMAC_RANGE_MASK GENMASK_ULL(19, 16)
+#define MCSX_LINK_LMAC_BASE_MASK GENMASK_ULL(11, 0)
+
+#define MCS_ID_MASK 0x7
+#define MCS_MAX_PFS 128
+
+#define MCS_PORT_MODE_MASK 0x3
+#define MCS_PORT_FIFO_SKID_MASK 0x3F
+#define MCS_MAX_CUSTOM_TAGS 0x8
+
+#define MCS_CTRLPKT_ETYPE_RULE_MAX 8
+#define MCS_CTRLPKT_DA_RULE_MAX 8
+#define MCS_CTRLPKT_DA_RANGE_RULE_MAX 4
+#define MCS_CTRLPKT_COMBO_RULE_MAX 4
+#define MCS_CTRLPKT_MAC_RULE_MAX 1
+
+#define MCS_MAX_CTRLPKT_RULES (MCS_CTRLPKT_ETYPE_RULE_MAX + \
+ MCS_CTRLPKT_DA_RULE_MAX + \
+ MCS_CTRLPKT_DA_RANGE_RULE_MAX + \
+ MCS_CTRLPKT_COMBO_RULE_MAX + \
+ MCS_CTRLPKT_MAC_RULE_MAX)
+
+#define MCS_CTRLPKT_ETYPE_RULE_OFFSET 0
+#define MCS_CTRLPKT_DA_RULE_OFFSET 8
+#define MCS_CTRLPKT_DA_RANGE_RULE_OFFSET 16
+#define MCS_CTRLPKT_COMBO_RULE_OFFSET 20
+#define MCS_CTRLPKT_MAC_EN_RULE_OFFSET 24
+
+/* Reserved resources for default bypass entry */
+#define MCS_RSRC_RSVD_CNT 1
+
+/* MCS Interrupt Vector Enumeration */
+enum mcs_int_vec_e {
+ MCS_INT_VEC_MIL_RX_GBL = 0x0,
+ MCS_INT_VEC_MIL_RX_LMACX = 0x1,
+ MCS_INT_VEC_MIL_TX_LMACX = 0x5,
+ MCS_INT_VEC_HIL_RX_GBL = 0x9,
+ MCS_INT_VEC_HIL_RX_LMACX = 0xa,
+ MCS_INT_VEC_HIL_TX_GBL = 0xe,
+ MCS_INT_VEC_HIL_TX_LMACX = 0xf,
+ MCS_INT_VEC_IP = 0x13,
+ MCS_INT_VEC_CNT = 0x14,
+};
+
+#define MCS_MAX_BBE_INT 8ULL
+#define MCS_BBE_INT_MASK 0xFFULL
+
+#define MCS_MAX_PAB_INT 4ULL
+#define MCS_PAB_INT_MASK 0xFULL
+
+#define MCS_BBE_RX_INT_ENA BIT_ULL(0)
+#define MCS_BBE_TX_INT_ENA BIT_ULL(1)
+#define MCS_CPM_RX_INT_ENA BIT_ULL(2)
+#define MCS_CPM_TX_INT_ENA BIT_ULL(3)
+#define MCS_PAB_RX_INT_ENA BIT_ULL(4)
+#define MCS_PAB_TX_INT_ENA BIT_ULL(5)
+
+#define MCS_CPM_TX_INT_PACKET_XPN_EQ0 BIT_ULL(0)
+#define MCS_CPM_TX_INT_PN_THRESH_REACHED BIT_ULL(1)
+#define MCS_CPM_TX_INT_SA_NOT_VALID BIT_ULL(2)
+
+#define MCS_CPM_RX_INT_SECTAG_V_EQ1 BIT_ULL(0)
+#define MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1 BIT_ULL(1)
+#define MCS_CPM_RX_INT_SL_GTE48 BIT_ULL(2)
+#define MCS_CPM_RX_INT_ES_EQ1_SC_EQ1 BIT_ULL(3)
+#define MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1 BIT_ULL(4)
+#define MCS_CPM_RX_INT_PACKET_XPN_EQ0 BIT_ULL(5)
+#define MCS_CPM_RX_INT_PN_THRESH_REACHED BIT_ULL(6)
+
+#define MCS_CPM_RX_INT_ALL (MCS_CPM_RX_INT_SECTAG_V_EQ1 | \
+ MCS_CPM_RX_INT_SECTAG_E_EQ0_C_EQ1 | \
+ MCS_CPM_RX_INT_SL_GTE48 | \
+ MCS_CPM_RX_INT_ES_EQ1_SC_EQ1 | \
+ MCS_CPM_RX_INT_SC_EQ1_SCB_EQ1 | \
+ MCS_CPM_RX_INT_PACKET_XPN_EQ0 | \
+ MCS_CPM_RX_INT_PN_THRESH_REACHED)
+
+struct mcs_pfvf {
+ u64 intr_mask; /* Enabled Interrupt mask */
+};
+
+struct mcs_intr_event {
+ u16 pcifunc;
+ u64 intr_mask;
+ u64 sa_id;
+ u8 mcs_id;
+ u8 lmac_id;
+};
+
+struct mcs_intrq_entry {
+ struct list_head node;
+ struct mcs_intr_event intr_event;
+};
+
+struct secy_mem_map {
+ u8 flow_id;
+ u8 secy;
+ u8 ctrl_pkt;
+ u8 sc;
+ u64 sci;
+};
+
+struct mcs_rsrc_map {
+ u16 *flowid2pf_map;
+ u16 *secy2pf_map;
+ u16 *sc2pf_map;
+ u16 *sa2pf_map;
+ u16 *flowid2secy_map; /* bitmap flowid mapped to secy*/
+ u16 *ctrlpktrule2pf_map;
+ struct rsrc_bmap flow_ids;
+ struct rsrc_bmap secy;
+ struct rsrc_bmap sc;
+ struct rsrc_bmap sa;
+ struct rsrc_bmap ctrlpktrule;
+};
+
+struct hwinfo {
+ u8 tcam_entries;
+ u8 secy_entries;
+ u8 sc_entries;
+ u16 sa_entries;
+ u8 mcs_x2p_intf;
+ u8 lmac_cnt;
+ u8 mcs_blks;
+ unsigned long lmac_bmap; /* bitmap of enabled mcs lmac */
+};
+
+struct mcs {
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+ struct device *dev;
+ struct hwinfo *hw;
+ struct mcs_rsrc_map tx;
+ struct mcs_rsrc_map rx;
+ u16 pf_map[MCS_MAX_PFS]; /* List of PCIFUNC mapped to MCS */
+ u8 mcs_id;
+ struct mcs_ops *mcs_ops;
+ struct list_head mcs_list;
+ /* Lock for mcs stats */
+ struct mutex stats_lock;
+ struct mcs_pfvf *pf;
+ struct mcs_pfvf *vf;
+ u16 num_vec;
+ void *rvu;
+ u16 *tx_sa_active;
+};
+
+struct mcs_ops {
+ void (*mcs_set_hw_capabilities)(struct mcs *mcs);
+ void (*mcs_parser_cfg)(struct mcs *mcs);
+ void (*mcs_tx_sa_mem_map_write)(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+ void (*mcs_rx_sa_mem_map_write)(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+ void (*mcs_flowid_secy_map)(struct mcs *mcs, struct secy_mem_map *map, int dir);
+};
+
+extern struct pci_driver mcs_driver;
+
+static inline void mcs_reg_write(struct mcs *mcs, u64 offset, u64 val)
+{
+ writeq(val, mcs->reg_base + offset);
+}
+
+static inline u64 mcs_reg_read(struct mcs *mcs, u64 offset)
+{
+ return readq(mcs->reg_base + offset);
+}
+
+/* MCS APIs */
+struct mcs *mcs_get_pdata(int mcs_id);
+int mcs_get_blkcnt(void);
+int mcs_set_lmac_channels(int mcs_id, u16 base);
+int mcs_alloc_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, u16 pcifunc);
+int mcs_free_rsrc(struct rsrc_bmap *rsrc, u16 *pf_map, int rsrc_id, u16 pcifunc);
+int mcs_alloc_all_rsrc(struct mcs *mcs, u8 *flowid, u8 *secy_id,
+ u8 *sc_id, u8 *sa1_id, u8 *sa2_id, u16 pcifunc, int dir);
+int mcs_free_all_rsrc(struct mcs *mcs, int dir, u16 pcifunc);
+void mcs_clear_secy_plcy(struct mcs *mcs, int secy_id, int dir);
+void mcs_ena_dis_flowid_entry(struct mcs *mcs, int id, int dir, int ena);
+void mcs_ena_dis_sc_cam_entry(struct mcs *mcs, int id, int ena);
+void mcs_flowid_entry_write(struct mcs *mcs, u64 *data, u64 *mask, int id, int dir);
+void mcs_secy_plcy_write(struct mcs *mcs, u64 plcy, int id, int dir);
+void mcs_rx_sc_cam_write(struct mcs *mcs, u64 sci, u64 secy, int sc_id);
+void mcs_sa_plcy_write(struct mcs *mcs, u64 *plcy, int sa, int dir);
+void mcs_map_sc_to_sa(struct mcs *mcs, u64 *sa_map, int sc, int dir);
+void mcs_pn_table_write(struct mcs *mcs, u8 pn_id, u64 next_pn, u8 dir);
+void mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+void mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
+void mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+void mcs_pn_threshold_set(struct mcs *mcs, struct mcs_set_pn_threshold *pn);
+int mcs_install_flowid_bypass_entry(struct mcs *mcs);
+void mcs_set_lmac_mode(struct mcs *mcs, int lmac_id, u8 mode);
+void mcs_reset_port(struct mcs *mcs, u8 port_id, u8 reset);
+void mcs_set_port_cfg(struct mcs *mcs, struct mcs_port_cfg_set_req *req);
+void mcs_get_port_cfg(struct mcs *mcs, struct mcs_port_cfg_get_req *req,
+ struct mcs_port_cfg_get_rsp *rsp);
+void mcs_get_custom_tag_cfg(struct mcs *mcs, struct mcs_custom_tag_cfg_get_req *req,
+ struct mcs_custom_tag_cfg_get_rsp *rsp);
+int mcs_alloc_ctrlpktrule(struct rsrc_bmap *rsrc, u16 *pf_map, u16 offset, u16 pcifunc);
+int mcs_free_ctrlpktrule(struct mcs *mcs, struct mcs_free_ctrl_pkt_rule_req *req);
+int mcs_ctrlpktrule_write(struct mcs *mcs, struct mcs_ctrl_pkt_rule_write_req *req);
+
+/* CN10K-B APIs */
+void cn10kb_mcs_set_hw_capabilities(struct mcs *mcs);
+void cn10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+void cn10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
+void cn10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+void cn10kb_mcs_parser_cfg(struct mcs *mcs);
+
+/* CNF10K-B APIs */
+struct mcs_ops *cnf10kb_get_mac_ops(void);
+void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs);
+void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map);
+void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir);
+void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map);
+void cnf10kb_mcs_parser_cfg(struct mcs *mcs);
+void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs);
+void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs);
+
+/* Stats APIs */
+void mcs_get_sc_stats(struct mcs *mcs, struct mcs_sc_stats *stats, int id, int dir);
+void mcs_get_sa_stats(struct mcs *mcs, struct mcs_sa_stats *stats, int id, int dir);
+void mcs_get_port_stats(struct mcs *mcs, struct mcs_port_stats *stats, int id, int dir);
+void mcs_get_flowid_stats(struct mcs *mcs, struct mcs_flowid_stats *stats, int id, int dir);
+void mcs_get_rx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id);
+void mcs_get_tx_secy_stats(struct mcs *mcs, struct mcs_secy_stats *stats, int id);
+void mcs_clear_stats(struct mcs *mcs, u8 type, u8 id, int dir);
+int mcs_clear_all_stats(struct mcs *mcs, u16 pcifunc, int dir);
+int mcs_set_force_clk_en(struct mcs *mcs, bool set);
+
+int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event);
+
+#endif /* MCS_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
new file mode 100644
index 000000000000..7b6205414428
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_cnf10kb.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include "mcs.h"
+#include "mcs_reg.h"
+
+static struct mcs_ops cnf10kb_mcs_ops = {
+ .mcs_set_hw_capabilities = cnf10kb_mcs_set_hw_capabilities,
+ .mcs_parser_cfg = cnf10kb_mcs_parser_cfg,
+ .mcs_tx_sa_mem_map_write = cnf10kb_mcs_tx_sa_mem_map_write,
+ .mcs_rx_sa_mem_map_write = cnf10kb_mcs_rx_sa_mem_map_write,
+ .mcs_flowid_secy_map = cnf10kb_mcs_flowid_secy_map,
+};
+
+struct mcs_ops *cnf10kb_get_mac_ops(void)
+{
+ return &cnf10kb_mcs_ops;
+}
+
+void cnf10kb_mcs_set_hw_capabilities(struct mcs *mcs)
+{
+ struct hwinfo *hw = mcs->hw;
+
+ hw->tcam_entries = 64; /* TCAM entries */
+ hw->secy_entries = 64; /* SecY entries */
+ hw->sc_entries = 64; /* SC CAM entries */
+ hw->sa_entries = 128; /* SA entries */
+ hw->lmac_cnt = 4; /* lmacs/ports per mcs block */
+ hw->mcs_x2p_intf = 1; /* x2p clabration intf */
+ hw->mcs_blks = 7; /* MCS blocks */
+}
+
+void cnf10kb_mcs_parser_cfg(struct mcs *mcs)
+{
+ u64 reg, val;
+
+ /* VLAN Ctag */
+ val = (0x8100ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(22);
+
+ reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(0);
+ mcs_reg_write(mcs, reg, val);
+
+ /* VLAN STag */
+ val = (0x88a8ull & 0xFFFF) | BIT_ULL(20) | BIT_ULL(23);
+
+ /* RX */
+ reg = MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(1);
+ mcs_reg_write(mcs, reg, val);
+
+ /* TX */
+ reg = MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(1);
+ mcs_reg_write(mcs, reg, val);
+
+ /* Enable custom tage 0 and 1 and sectag */
+ val = BIT_ULL(0) | BIT_ULL(1) | BIT_ULL(12);
+
+ reg = MCSX_PEX_RX_SLAVE_ETYPE_ENABLE;
+ mcs_reg_write(mcs, reg, val);
+
+ reg = MCSX_PEX_TX_SLAVE_ETYPE_ENABLE;
+ mcs_reg_write(mcs, reg, val);
+}
+
+void cnf10kb_mcs_flowid_secy_map(struct mcs *mcs, struct secy_mem_map *map, int dir)
+{
+ u64 reg, val;
+
+ val = (map->secy & 0x3F) | (map->ctrl_pkt & 0x1) << 6;
+ if (dir == MCS_RX) {
+ reg = MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(map->flow_id);
+ } else {
+ reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(map->flow_id);
+ mcs_reg_write(mcs, reg, map->sci);
+ val |= (map->sc & 0x3F) << 7;
+ reg = MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_1X(map->flow_id);
+ }
+
+ mcs_reg_write(mcs, reg, val);
+}
+
+void cnf10kb_mcs_tx_sa_mem_map_write(struct mcs *mcs, struct mcs_tx_sc_sa_map *map)
+{
+ u64 reg, val;
+
+ val = (map->sa_index0 & 0x7F) | (map->sa_index1 & 0x7F) << 7;
+
+ reg = MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(map->sc_id);
+ mcs_reg_write(mcs, reg, val);
+
+ reg = MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0;
+ val = mcs_reg_read(mcs, reg);
+
+ if (map->rekey_ena)
+ val |= BIT_ULL(map->sc_id);
+ else
+ val &= ~BIT_ULL(map->sc_id);
+
+ mcs_reg_write(mcs, reg, val);
+
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX0_VLDX(map->sc_id), map->sa_index0_vld);
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_SA_INDEX1_VLDX(map->sc_id), map->sa_index1_vld);
+
+ mcs_reg_write(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(map->sc_id), map->tx_sa_active);
+}
+
+void cnf10kb_mcs_rx_sa_mem_map_write(struct mcs *mcs, struct mcs_rx_sc_sa_map *map)
+{
+ u64 val, reg;
+
+ val = (map->sa_index & 0x7F) | (map->sa_in_use << 7);
+
+ reg = MCSX_CPM_RX_SLAVE_SA_MAP_MEMX((4 * map->sc_id) + map->an);
+ mcs_reg_write(mcs, reg, val);
+}
+
+int mcs_set_force_clk_en(struct mcs *mcs, bool set)
+{
+ unsigned long timeout = jiffies + usecs_to_jiffies(2000);
+ u64 val;
+
+ val = mcs_reg_read(mcs, MCSX_MIL_GLOBAL);
+
+ if (set) {
+ val |= BIT_ULL(4);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+
+ /* Poll till mcsx_mil_ip_gbl_status.mcs_ip_stats_ready value is 1 */
+ while (!(mcs_reg_read(mcs, MCSX_MIL_IP_GBL_STATUS) & BIT_ULL(0))) {
+ if (time_after(jiffies, timeout)) {
+ dev_err(mcs->dev, "MCS set force clk enable failed\n");
+ break;
+ }
+ }
+ } else {
+ val &= ~BIT_ULL(4);
+ mcs_reg_write(mcs, MCSX_MIL_GLOBAL, val);
+ }
+
+ return 0;
+}
+
+/* TX SA interrupt is raised only if autorekey is enabled.
+ * MCS_CPM_TX_SLAVE_SA_MAP_MEM_0X[sc].tx_sa_active bit gets toggled if
+ * one of two SAs mapped to SC gets expired. If tx_sa_active=0 implies
+ * SA in SA_index1 got expired else SA in SA_index0 got expired.
+ */
+void cnf10kb_mcs_tx_pn_thresh_reached_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event;
+ struct rsrc_bmap *sc_bmap;
+ unsigned long rekey_ena;
+ u64 val, sa_status;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PN_THRESH_REACHED_INT;
+
+ rekey_ena = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0);
+
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ /* Auto rekey is enable */
+ if (!test_bit(sc, &rekey_ena))
+ continue;
+ sa_status = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(sc));
+ /* Check if tx_sa_active status had changed */
+ if (sa_status == mcs->tx_sa_active[sc])
+ continue;
+
+ /* SA_index0 is expired */
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+ if (sa_status)
+ event.sa_id = val & 0x7F;
+ else
+ event.sa_id = (val >> 7) & 0x7F;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
+
+void cnf10kb_mcs_tx_pn_wrapped_handler(struct mcs *mcs)
+{
+ struct mcs_intr_event event = { 0 };
+ struct rsrc_bmap *sc_bmap;
+ u64 val;
+ int sc;
+
+ sc_bmap = &mcs->tx.sc;
+
+ event.mcs_id = mcs->mcs_id;
+ event.intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
+
+ for_each_set_bit(sc, sc_bmap->bmap, mcs->hw->sc_entries) {
+ val = mcs_reg_read(mcs, MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(sc));
+
+ if (mcs->tx_sa_active[sc])
+ /* SA_index1 was used and got expired */
+ event.sa_id = (val >> 7) & 0x7F;
+ else
+ /* SA_index0 was used and got expired */
+ event.sa_id = val & 0x7F;
+
+ event.pcifunc = mcs->tx.sa2pf_map[event.sa_id];
+ mcs_add_intr_wq_entry(mcs, &event);
+ }
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
new file mode 100644
index 000000000000..c95a8b8f5eaf
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_reg.h
@@ -0,0 +1,1102 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#ifndef MCS_REG_H
+#define MCS_REG_H
+
+#include
+
+/* Registers */
+#define MCSX_IP_MODE 0x900c8ull
+#define MCSX_MCS_TOP_SLAVE_PORT_RESET(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x408ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa28ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+
+#define MCSX_MCS_TOP_SLAVE_CHANNEL_CFG(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x808ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa68ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_MIL_GLOBAL ({ \
+ u64 offset; \
+ \
+ offset = 0x80000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60000ull; \
+ offset; })
+
+#define MCSX_MIL_RX_LMACX_CFG(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x900a8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x700a8ull; \
+ offset += (a) * 0x800ull; \
+ offset; })
+
+#define MCSX_HIL_GLOBAL ({ \
+ u64 offset; \
+ \
+ offset = 0xc0000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa0000ull; \
+ offset; })
+
+#define MCSX_LINK_LMACX_CFG(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x90000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x70000ull; \
+ offset += (a) * 0x800ull; \
+ offset; })
+
+#define MCSX_MIL_RX_GBL_STATUS ({ \
+ u64 offset; \
+ \
+ offset = 0x800c8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x600c8ull; \
+ offset; })
+
+#define MCSX_MIL_IP_GBL_STATUS ({ \
+ u64 offset; \
+ \
+ offset = 0x800d0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x600d0ull; \
+ offset; })
+
+/* PAB */
+#define MCSX_PAB_RX_SLAVE_PORT_CFGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1718ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x280ull; \
+ offset += (a) * 0x40ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PORT_CFGX(a) (0x2930ull + (a) * 0x40ull)
+
+/* PEX registers */
+#define MCSX_PEX_RX_SLAVE_VLAN_CFGX(a) (0x3b58ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_VLAN_CFGX(a) (0x46f8ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_CUSTOM_TAG_REL_MODE_SEL(a) (0x788ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_PORT_CONFIG(a) (0x4738ull + (a) * 0x8ull)
+#define MCSX_PEX_RX_SLAVE_RULE_ETYPE_CFGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3fc0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x558ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_DAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4000ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x598ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4040ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_DA_RANGE_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4048ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5e0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_COMBO_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4080ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x648ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_COMBO_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4088ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x650ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_COMBO_ETX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4090ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x658ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_MAC ({ \
+ u64 offset; \
+ \
+ offset = 0x40e0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x6d8ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_RULE_ENABLE ({ \
+ u64 offset; \
+ \
+ offset = 0x40e8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x6e0ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_ETYPE_CFGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4b60ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x7d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_DAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4ba0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x818ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4be0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x858ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_DA_RANGE_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4be8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x860ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_COMBO_MINX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4c20ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x8c8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_COMBO_MAXX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4c28ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x8d0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_COMBO_ETX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x4c30ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x8d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_MAC ({ \
+ u64 offset; \
+ \
+ offset = 0x4c80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x958ull; \
+ offset; })
+
+#define MCSX_PEX_TX_SLAVE_RULE_ENABLE ({ \
+ u64 offset; \
+ \
+ offset = 0x4c88ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x960ull; \
+ offset; })
+
+#define MCSX_PEX_RX_SLAVE_PEX_CONFIGURATION ({ \
+ u64 offset; \
+ \
+ offset = 0x3b50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x4c0ull; \
+ offset; })
+
+/* CNF10K-B */
+#define MCSX_PEX_RX_SLAVE_CUSTOM_TAGX(a) (0x4c8ull + (a) * 0x8ull)
+#define MCSX_PEX_TX_SLAVE_CUSTOM_TAGX(a) (0x748ull + (a) * 0x8ull)
+#define MCSX_PEX_RX_SLAVE_ETYPE_ENABLE 0x6e8ull
+#define MCSX_PEX_TX_SLAVE_ETYPE_ENABLE 0x968ull
+
+/* BEE */
+#define MCSX_BBE_RX_SLAVE_PADDING_CTL 0xe08ull
+#define MCSX_BBE_TX_SLAVE_PADDING_CTL 0x12f8ull
+#define MCSX_BBE_RX_SLAVE_CAL_ENTRY 0x180ull
+#define MCSX_BBE_RX_SLAVE_CAL_LEN 0x188ull
+#define MCSX_PAB_RX_SLAVE_FIFO_SKID_CFGX(a) (0x290ull + (a) * 0x40ull)
+
+#define MCSX_BBE_RX_SLAVE_BBE_INT ({ \
+ u64 offset; \
+ \
+ offset = 0xe00ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x160ull; \
+ offset; })
+
+#define MCSX_BBE_RX_SLAVE_BBE_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0xe08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x168ull; \
+ offset; })
+
+#define MCSX_BBE_RX_SLAVE_BBE_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0xe08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x178ull; \
+ offset; })
+
+#define MCSX_BBE_TX_SLAVE_BBE_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x1278ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x1e0ull; \
+ offset; })
+
+#define MCSX_BBE_TX_SLAVE_BBE_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0x1278ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x1f8ull; \
+ offset; })
+
+#define MCSX_BBE_TX_SLAVE_BBE_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x1280ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x1e8ull; \
+ offset; })
+
+#define MCSX_PAB_RX_SLAVE_PAB_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x16f0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x260ull; \
+ offset; })
+
+#define MCSX_PAB_RX_SLAVE_PAB_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x16f8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x268ull; \
+ offset; })
+
+#define MCSX_PAB_RX_SLAVE_PAB_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0x16f8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x278ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PAB_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x2908ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x380ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PAB_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x2910ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x388ull; \
+ offset; })
+
+#define MCSX_PAB_TX_SLAVE_PAB_INT_INTR_RW ({ \
+ u64 offset; \
+ \
+ offset = 0x16f8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x398ull; \
+ offset; })
+
+/* CPM registers */
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_DATAX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x30740ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x3bf8ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_MASKX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x34740ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x43f8ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_0 ({ \
+ u64 offset; \
+ \
+ offset = 0x30700ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x3bd8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SC_CAMX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x38780ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x4c08ull; \
+ offset += (a) * 0x8ull + (b) * 0x10ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SC_CAM_ENA(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x38740ull + (a) * 0x8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x4bf8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SECY_MAP_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23ee0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbd0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_0X(a) ({ \
+ u64 offset; \
+ \
+ offset = (0x246e0ull + (a) * 0x10ull); \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = (0xdd0ull + (a) * 0x8ull); \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_KEY_LOCKOUTX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23E90ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbb0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_MAP_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x256e0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfd0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_PLCY_MEMX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x27700ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x17d8ull; \
+ offset += (a) * 0x8ull + (b) * 0x40ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_SA_PN_TABLE_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x2f700ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x37d8; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_XPN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x23e40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xb90ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_PN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x23e48ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xb98ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_PN_THRESH_REACHEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23e50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xba0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_FLOWID_TCAM_ENA_1 0x30708ull
+#define MCSX_CPM_RX_SLAVE_SECY_PLCY_MEM_1X(a) (0x246e8ull + (a) * 0x10ull)
+
+/* TX registers */
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_DATAX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x51d50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa7c0ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_MASKX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x55d50ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xafc0ull; \
+ offset += (a) * 0x8ull + (b) * 0x20ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_0 ({ \
+ u64 offset; \
+ \
+ offset = 0x51d10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa7a0ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_0X(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3e508ull + (a) * 0x8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5550ull + (a) * 0x10ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SECY_PLCY_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3ed08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5950ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_KEY_LOCKOUTX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3e4c0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5538ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_MAP_MEM_0X(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x3fd10ull + (a) * 0x10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x6150ull + (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_PLCY_MEMX(a, b) ({ \
+ u64 offset; \
+ \
+ offset = 0x40d10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x63a0ull; \
+ offset += (a) * 0x8ull + (b) * 0x80ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_PN_TABLE_MEMX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x50d10ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa3a0ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_XPN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x3e4b0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5528ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_PN_THRESHOLD ({ \
+ u64 offset; \
+ \
+ offset = 0x3e4b8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x5530ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_SA_MAP_MEM_1X(a) (0x3fd18ull + (a) * 0x10ull)
+#define MCSX_CPM_TX_SLAVE_SECY_MAP_MEM_1X(a) (0x5558ull + (a) * 0x10ull)
+#define MCSX_CPM_TX_SLAVE_FLOWID_TCAM_ENA_1 0x51d18ull
+#define MCSX_CPM_TX_SLAVE_TX_SA_ACTIVEX(a) (0x5b50 + (a) * 0x8ull)
+#define MCSX_CPM_TX_SLAVE_SA_INDEX0_VLDX(a) (0x5d50 + (a) * 0x8ull)
+#define MCSX_CPM_TX_SLAVE_SA_INDEX1_VLDX(a) (0x5f50 + (a) * 0x8ull)
+#define MCSX_CPM_TX_SLAVE_AUTO_REKEY_ENABLE_0 0x5500ull
+
+/* CSE */
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x9e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc218ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x9680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc018ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x6e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbc18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x8e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xbe18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x8680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xca18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x7e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc818ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x6680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc418ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_IFINUNCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x7680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xc618ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYDECRYPTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x5e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xdc18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSECYVALIDATEX(a)({ \
+ u64 offset; \
+ \
+ offset = 0x5680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xda18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSCTRLPORTDISABLEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xd680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xce18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMHITX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x16a80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xec78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSFLOWIDTCAMMISSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x16680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xec38ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSPARSEERRX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x16880ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xec18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCCAMHITX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xfe80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xde18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCINVALIDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x10680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xe418ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCNOTVALIDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x10e80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xe218ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYBADTAGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xae80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xd418ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xc680ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xd618ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOSAERRORX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xce80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xd818ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYTAGGEDCTLX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0xbe80ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xcc18ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_SLAVE_CTRL ({ \
+ u64 offset; \
+ \
+ offset = 0x52a0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x9c0ull; \
+ offset; })
+
+#define MCSX_CSE_RX_SLAVE_STATS_CLEAR ({ \
+ u64 offset; \
+ \
+ offset = 0x52b8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x9d8ull; \
+ offset; })
+
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCDECRYPTEDX(a) (0xe680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INOCTETSSCVALIDATEX(a) (0xde80ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDORNOTAGX(a) (0xa680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYNOTAGX(a) (0xd218 + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYUNTAGGEDX(a) (0xd018ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCUNCHECKEDOROKX(a) (0xee80ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSECYCTLX(a) (0xb680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCLATEORDELAYEDX(a) (0xf680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAINVALIDX(a) (0x12680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTUSINGSAERRORX(a) (0x15680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSANOTVALIDX(a) (0x13680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAOKX(a) (0x11680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSAUNUSEDSAX(a) (0x14680ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSEARLYPREEMPTERRX(a) (0xec58ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCOKX(a) (0xea18ull + (a) * 0x8ull)
+#define MCSX_CSE_RX_MEM_SLAVE_INPKTSSCDELAYEDX(a) (0xe618ull + (a) * 0x8ull)
+
+/* CSE TX */
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCOMMONOCTETSX(a) (0x18440ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1c440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf478ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1bc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf278ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x19440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xee78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1b440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf078ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLBCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1ac40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfc78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLMCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1a440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfa78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLOCTETSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x18c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf678ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_IFOUTUNCTLUCPKTSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x19c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xf878ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYENCRYPTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x17c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10878ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSECYPROTECTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x17440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10678ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSCTRLPORTDISABLEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1e440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xfe78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMHITX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23240ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10ed8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSFLOWIDTCAMMISSX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x22c40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10e98ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSPARSEERRX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x22e40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10e78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCENCRYPTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x20440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10c78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSCPROTECTEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1fc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10a78ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECTAGINSERTIONERRX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x23040ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x110d8ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYNOACTIVESAX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1dc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10278ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYTOOLONGX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1d440ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10478ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSECYUNTAGGEDX(a) ({ \
+ u64 offset; \
+ \
+ offset = 0x1cc40ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x10078ull; \
+ offset += (a) * 0x8ull; \
+ offset; })
+
+#define MCSX_CSE_TX_SLAVE_CTRL ({ \
+ u64 offset; \
+ \
+ offset = 0x54a0ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa00ull; \
+ offset; })
+
+#define MCSX_CSE_TX_SLAVE_STATS_CLEAR ({ \
+ u64 offset; \
+ \
+ offset = 0x54b8ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xa18ull; \
+ offset; })
+
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCENCRYPTEDX(a) (0x1f440ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTOCTETSSCPROTECTEDX(a) (0x1ec40ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSEARLYPREEMPTERRX(a) (0x10eb8ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAENCRYPTEDX(a) (0x21c40ull + (a) * 0x8ull)
+#define MCSX_CSE_TX_MEM_SLAVE_OUTPKTSSAPROTECTEDX(a) (0x20c40ull + (a) * 0x8ull)
+
+#define MCSX_IP_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x80028ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60028ull; \
+ offset; })
+
+#define MCSX_IP_INT_ENA_W1S ({ \
+ u64 offset; \
+ \
+ offset = 0x80040ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60040ull; \
+ offset; })
+
+#define MCSX_IP_INT_ENA_W1C ({ \
+ u64 offset; \
+ \
+ offset = 0x80038ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x60038ull; \
+ offset; })
+
+#define MCSX_TOP_SLAVE_INT_SUM ({ \
+ u64 offset; \
+ \
+ offset = 0xc20ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xab8ull; \
+ offset; })
+
+#define MCSX_TOP_SLAVE_INT_SUM_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0xc28ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xac0ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_RX_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x23c00ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x0ad8ull; \
+ offset; })
+
+#define MCSX_CPM_RX_SLAVE_RX_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x23c08ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0xae0ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_TX_INT ({ \
+ u64 offset; \
+ \
+ offset = 0x3d490ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x54a0ull; \
+ offset; })
+
+#define MCSX_CPM_TX_SLAVE_TX_INT_ENB ({ \
+ u64 offset; \
+ \
+ offset = 0x3d498ull; \
+ if (mcs->hw->mcs_blks > 1) \
+ offset = 0x54a8ull; \
+ offset; })
+
+#endif
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
new file mode 100644
index 000000000000..fa8029a94068
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
@@ -0,0 +1,889 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell CN10K MCS driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include
+#include
+#include
+#include
+
+#include "mcs.h"
+#include "rvu.h"
+#include "lmac_common.h"
+
+#define M(_name, _id, _fn_name, _req_type, _rsp_type) \
+static struct _req_type __maybe_unused \
+*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \
+{ \
+ struct _req_type *req; \
+ \
+ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \
+ &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \
+ sizeof(struct _rsp_type)); \
+ if (!req) \
+ return NULL; \
+ req->hdr.sig = OTX2_MBOX_REQ_SIG; \
+ req->hdr.id = _id; \
+ return req; \
+}
+
+MBOX_UP_MCS_MESSAGES
+#undef M
+
+int rvu_mbox_handler_mcs_set_lmac_mode(struct rvu *rvu,
+ struct mcs_set_lmac_mode *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (BIT_ULL(req->lmac_id) & mcs->hw->lmac_bmap)
+ mcs_set_lmac_mode(mcs, req->lmac_id, req->mode);
+
+ return 0;
+}
+
+int mcs_add_intr_wq_entry(struct mcs *mcs, struct mcs_intr_event *event)
+{
+ struct mcs_intrq_entry *qentry;
+ u16 pcifunc = event->pcifunc;
+ struct rvu *rvu = mcs->rvu;
+ struct mcs_pfvf *pfvf;
+
+ /* Check if it is PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
+ else
+ pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
+
+ event->intr_mask &= pfvf->intr_mask;
+
+ /* Check PF/VF interrupt notification is enabled */
+ if (!(pfvf->intr_mask && event->intr_mask))
+ return 0;
+
+ qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC);
+ if (!qentry)
+ return -ENOMEM;
+
+ qentry->intr_event = *event;
+ spin_lock(&rvu->mcs_intrq_lock);
+ list_add_tail(&qentry->node, &rvu->mcs_intrq_head);
+ spin_unlock(&rvu->mcs_intrq_lock);
+ queue_work(rvu->mcs_intr_wq, &rvu->mcs_intr_work);
+
+ return 0;
+}
+
+static int mcs_notify_pfvf(struct mcs_intr_event *event, struct rvu *rvu)
+{
+ struct mcs_intr_info *req;
+ int err, pf;
+
+ pf = rvu_get_pf(event->pcifunc);
+
+ req = otx2_mbox_alloc_msg_mcs_intr_notify(rvu, pf);
+ if (!req)
+ return -ENOMEM;
+
+ req->mcs_id = event->mcs_id;
+ req->intr_mask = event->intr_mask;
+ req->sa_id = event->sa_id;
+ req->hdr.pcifunc = event->pcifunc;
+ req->lmac_id = event->lmac_id;
+
+ otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pf);
+ err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf);
+ if (err)
+ dev_warn(rvu->dev, "MCS notification to pf %d failed\n", pf);
+
+ return 0;
+}
+
+static void mcs_intr_handler_task(struct work_struct *work)
+{
+ struct rvu *rvu = container_of(work, struct rvu, mcs_intr_work);
+ struct mcs_intrq_entry *qentry;
+ struct mcs_intr_event *event;
+ unsigned long flags;
+
+ do {
+ spin_lock_irqsave(&rvu->mcs_intrq_lock, flags);
+ qentry = list_first_entry_or_null(&rvu->mcs_intrq_head,
+ struct mcs_intrq_entry,
+ node);
+ if (qentry)
+ list_del(&qentry->node);
+
+ spin_unlock_irqrestore(&rvu->mcs_intrq_lock, flags);
+ if (!qentry)
+ break; /* nothing more to process */
+
+ event = &qentry->intr_event;
+
+ mcs_notify_pfvf(event, rvu);
+ kfree(qentry);
+ } while (1);
+}
+
+int rvu_mbox_handler_mcs_intr_cfg(struct rvu *rvu,
+ struct mcs_intr_cfg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_pfvf *pfvf;
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ /* Check if it is PF or VF */
+ if (pcifunc & RVU_PFVF_FUNC_MASK)
+ pfvf = &mcs->vf[rvu_get_hwvf(rvu, pcifunc)];
+ else
+ pfvf = &mcs->pf[rvu_get_pf(pcifunc)];
+
+ mcs->pf_map[0] = pcifunc;
+ pfvf->intr_mask = req->intr_mask;
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_hw_info(struct rvu *rvu,
+ struct msg_req *req,
+ struct mcs_hw_info *rsp)
+{
+ struct mcs *mcs;
+
+ if (!rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_NOT_MAPPED;
+
+ /* MCS resources are same across all blocks */
+ mcs = mcs_get_pdata(0);
+ rsp->num_mcs_blks = rvu->mcs_blk_cnt;
+ rsp->tcam_entries = mcs->hw->tcam_entries;
+ rsp->secy_entries = mcs->hw->secy_entries;
+ rsp->sc_entries = mcs->hw->sc_entries;
+ rsp->sa_entries = mcs->hw->sa_entries;
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_port_reset(struct rvu *rvu, struct mcs_port_reset_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_reset_port(mcs, req->port_id, req->reset);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_clear_stats(struct rvu *rvu,
+ struct mcs_clear_stats *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mutex_lock(&mcs->stats_lock);
+ if (req->all)
+ mcs_clear_all_stats(mcs, pcifunc, req->dir);
+ else
+ mcs_clear_stats(mcs, req->type, req->id, req->dir);
+
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_flowid_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_flowid_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ /* In CNF10K-B, before reading the statistics,
+ * MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP needs to be set
+ * to get accurate statistics
+ */
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_flowid_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ /* Clear MCSX_MIL_GLOBAL.FORCE_CLK_EN_IP after reading
+ * the statistics
+ */
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_secy_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_secy_stats *rsp)
+{ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+
+ if (req->dir == MCS_RX)
+ mcs_get_rx_secy_stats(mcs, rsp, req->id);
+ else
+ mcs_get_tx_secy_stats(mcs, rsp, req->id);
+
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_sc_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_sc_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_sc_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_sa_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_sa_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_sa_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_get_port_stats(struct rvu *rvu,
+ struct mcs_stats_req *req,
+ struct mcs_port_stats *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, true);
+
+ mutex_lock(&mcs->stats_lock);
+ mcs_get_port_stats(mcs, rsp, req->id, req->dir);
+ mutex_unlock(&mcs->stats_lock);
+
+ if (mcs->hw->mcs_blks > 1)
+ mcs_set_force_clk_en(mcs, false);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_set_active_lmac(struct rvu *rvu,
+ struct mcs_set_active_lmac *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ if (!mcs)
+ return MCS_AF_ERR_NOT_MAPPED;
+
+ mcs->hw->lmac_bmap = req->lmac_bmap;
+ mcs_set_lmac_channels(req->mcs_id, req->chan_base);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_port_cfg_set(struct rvu *rvu, struct mcs_port_cfg_set_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id)))
+ return -EINVAL;
+
+ mcs_set_port_cfg(mcs, req);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_port_cfg_get(struct rvu *rvu, struct mcs_port_cfg_get_req *req,
+ struct mcs_port_cfg_get_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (mcs->hw->lmac_cnt <= req->port_id || !(mcs->hw->lmac_bmap & BIT_ULL(req->port_id)))
+ return -EINVAL;
+
+ mcs_get_port_cfg(mcs, req, rsp);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_custom_tag_cfg_get(struct rvu *rvu, struct mcs_custom_tag_cfg_get_req *req,
+ struct mcs_custom_tag_cfg_get_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_get_custom_tag_cfg(mcs, req, rsp);
+
+ return 0;
+}
+
+int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc)
+{
+ struct mcs *mcs;
+ int mcs_id;
+
+ /* CNF10K-B mcs0-6 are mapped to RPM2-8*/
+ if (rvu->mcs_blk_cnt > 1) {
+ for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) {
+ mcs = mcs_get_pdata(mcs_id);
+ mcs_free_all_rsrc(mcs, MCS_RX, pcifunc);
+ mcs_free_all_rsrc(mcs, MCS_TX, pcifunc);
+ }
+ } else {
+ /* CN10K-B has only one mcs block */
+ mcs = mcs_get_pdata(0);
+ mcs_free_all_rsrc(mcs, MCS_RX, pcifunc);
+ mcs_free_all_rsrc(mcs, MCS_TX, pcifunc);
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_flowid_ena_entry(struct rvu *rvu,
+ struct mcs_flowid_ena_dis_entry *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs_ena_dis_flowid_entry(mcs, req->flow_id, req->dir, req->ena);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_pn_table_write(struct rvu *rvu,
+ struct mcs_pn_table_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs_pn_table_write(mcs, req->pn_id, req->next_pn, req->dir);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_set_pn_threshold(struct rvu *rvu,
+ struct mcs_set_pn_threshold *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_pn_threshold_set(mcs, req);
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_rx_sc_sa_map_write(struct rvu *rvu,
+ struct mcs_rx_sc_sa_map *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs->mcs_ops->mcs_rx_sa_mem_map_write(mcs, req);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_tx_sc_sa_map_write(struct rvu *rvu,
+ struct mcs_tx_sc_sa_map *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs->mcs_ops->mcs_tx_sa_mem_map_write(mcs, req);
+ mcs->tx_sa_active[req->sc_id] = req->tx_sa_active;
+
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_sa_plcy_write(struct rvu *rvu,
+ struct mcs_sa_plcy_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+ int i;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ for (i = 0; i < req->sa_cnt; i++)
+ mcs_sa_plcy_write(mcs, &req->plcy[i][0],
+ req->sa_index[i], req->dir);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_rx_sc_cam_write(struct rvu *rvu,
+ struct mcs_rx_sc_cam_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+ mcs_rx_sc_cam_write(mcs, req->sci, req->secy_id, req->sc_id);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_secy_plcy_write(struct rvu *rvu,
+ struct mcs_secy_plcy_write_req *req,
+ struct msg_rsp *rsp)
+{ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mcs_secy_plcy_write(mcs, req->plcy,
+ req->secy_id, req->dir);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_flowid_entry_write(struct rvu *rvu,
+ struct mcs_flowid_entry_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct secy_mem_map map;
+ struct mcs *mcs;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ /* TODO validate the flowid */
+ mcs_flowid_entry_write(mcs, req->data, req->mask,
+ req->flow_id, req->dir);
+ map.secy = req->secy_id;
+ map.sc = req->sc_id;
+ map.ctrl_pkt = req->ctrl_pkt;
+ map.flow_id = req->flow_id;
+ map.sci = req->sci;
+ mcs->mcs_ops->mcs_flowid_secy_map(mcs, &map, req->dir);
+ if (req->ena)
+ mcs_ena_dis_flowid_entry(mcs, req->flow_id,
+ req->dir, true);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_free_resources(struct rvu *rvu,
+ struct mcs_free_rsrc_req *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ struct mcs *mcs;
+ int rc;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (req->dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ mutex_lock(&rvu->rsrc_lock);
+ /* Free all the cam resources mapped to PF/VF */
+ if (req->all) {
+ rc = mcs_free_all_rsrc(mcs, req->dir, pcifunc);
+ goto exit;
+ }
+
+ switch (req->rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ rc = mcs_free_rsrc(&map->flow_ids, map->flowid2pf_map, req->rsrc_id, pcifunc);
+ mcs_ena_dis_flowid_entry(mcs, req->rsrc_id, req->dir, false);
+ break;
+ case MCS_RSRC_TYPE_SECY:
+ rc = mcs_free_rsrc(&map->secy, map->secy2pf_map, req->rsrc_id, pcifunc);
+ mcs_clear_secy_plcy(mcs, req->rsrc_id, req->dir);
+ break;
+ case MCS_RSRC_TYPE_SC:
+ rc = mcs_free_rsrc(&map->sc, map->sc2pf_map, req->rsrc_id, pcifunc);
+ /* Disable SC CAM only on RX side */
+ if (req->dir == MCS_RX)
+ mcs_ena_dis_sc_cam_entry(mcs, req->rsrc_id, false);
+ break;
+ case MCS_RSRC_TYPE_SA:
+ rc = mcs_free_rsrc(&map->sa, map->sa2pf_map, req->rsrc_id, pcifunc);
+ break;
+ }
+exit:
+ mutex_unlock(&rvu->rsrc_lock);
+ return rc;
+}
+
+int rvu_mbox_handler_mcs_alloc_resources(struct rvu *rvu,
+ struct mcs_alloc_rsrc_req *req,
+ struct mcs_alloc_rsrc_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ struct mcs *mcs;
+ int rsrc_id, i;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ if (req->dir == MCS_RX)
+ map = &mcs->rx;
+ else
+ map = &mcs->tx;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ if (req->all) {
+ rsrc_id = mcs_alloc_all_rsrc(mcs, &rsp->flow_ids[0],
+ &rsp->secy_ids[0],
+ &rsp->sc_ids[0],
+ &rsp->sa_ids[0],
+ &rsp->sa_ids[1],
+ pcifunc, req->dir);
+ goto exit;
+ }
+
+ switch (req->rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->flow_ids, map->flowid2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->flow_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ case MCS_RSRC_TYPE_SECY:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->secy, map->secy2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->secy_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ case MCS_RSRC_TYPE_SC:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->sc, map->sc2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->sc_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ case MCS_RSRC_TYPE_SA:
+ for (i = 0; i < req->rsrc_cnt; i++) {
+ rsrc_id = mcs_alloc_rsrc(&map->sa, map->sa2pf_map, pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+ rsp->sa_ids[i] = rsrc_id;
+ rsp->rsrc_cnt++;
+ }
+ break;
+ }
+
+ rsp->rsrc_type = req->rsrc_type;
+ rsp->dir = req->dir;
+ rsp->mcs_id = req->mcs_id;
+ rsp->all = req->all;
+
+exit:
+ if (rsrc_id < 0)
+ dev_err(rvu->dev, "Failed to allocate the mcs resources for PCIFUNC:%d\n", pcifunc);
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+}
+
+int rvu_mbox_handler_mcs_alloc_ctrl_pkt_rule(struct rvu *rvu,
+ struct mcs_alloc_ctrl_pkt_rule_req *req,
+ struct mcs_alloc_ctrl_pkt_rule_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct mcs_rsrc_map *map;
+ struct mcs *mcs;
+ int rsrc_id;
+ u16 offset;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ map = (req->dir == MCS_RX) ? &mcs->rx : &mcs->tx;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ switch (req->rule_type) {
+ case MCS_CTRL_PKT_RULE_TYPE_ETH:
+ offset = MCS_CTRLPKT_ETYPE_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_DA:
+ offset = MCS_CTRLPKT_DA_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_RANGE:
+ offset = MCS_CTRLPKT_DA_RANGE_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_COMBO:
+ offset = MCS_CTRLPKT_COMBO_RULE_OFFSET;
+ break;
+ case MCS_CTRL_PKT_RULE_TYPE_MAC:
+ offset = MCS_CTRLPKT_MAC_EN_RULE_OFFSET;
+ break;
+ }
+
+ rsrc_id = mcs_alloc_ctrlpktrule(&map->ctrlpktrule, map->ctrlpktrule2pf_map, offset,
+ pcifunc);
+ if (rsrc_id < 0)
+ goto exit;
+
+ rsp->rule_idx = rsrc_id;
+ rsp->rule_type = req->rule_type;
+ rsp->dir = req->dir;
+ rsp->mcs_id = req->mcs_id;
+
+ mutex_unlock(&rvu->rsrc_lock);
+ return 0;
+exit:
+ if (rsrc_id < 0)
+ dev_err(rvu->dev, "Failed to allocate the mcs ctrl pkt rule for PCIFUNC:%d\n",
+ pcifunc);
+ mutex_unlock(&rvu->rsrc_lock);
+ return rsrc_id;
+}
+
+int rvu_mbox_handler_mcs_free_ctrl_pkt_rule(struct rvu *rvu,
+ struct mcs_free_ctrl_pkt_rule_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+ int rc;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ rc = mcs_free_ctrlpktrule(mcs, req);
+
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return rc;
+}
+
+int rvu_mbox_handler_mcs_ctrl_pkt_rule_write(struct rvu *rvu,
+ struct mcs_ctrl_pkt_rule_write_req *req,
+ struct msg_rsp *rsp)
+{
+ struct mcs *mcs;
+ int rc;
+
+ if (req->mcs_id >= rvu->mcs_blk_cnt)
+ return MCS_AF_ERR_INVALID_MCSID;
+
+ mcs = mcs_get_pdata(req->mcs_id);
+
+ rc = mcs_ctrlpktrule_write(mcs, req);
+
+ return rc;
+}
+
+static void rvu_mcs_set_lmac_bmap(struct rvu *rvu)
+{
+ struct mcs *mcs = mcs_get_pdata(0);
+ unsigned long lmac_bmap;
+ int cgx, lmac, port;
+
+ for (port = 0; port < mcs->hw->lmac_cnt; port++) {
+ cgx = port / rvu->hw->lmac_per_cgx;
+ lmac = port % rvu->hw->lmac_per_cgx;
+ if (!is_lmac_valid(rvu_cgx_pdata(cgx, rvu), lmac))
+ continue;
+ set_bit(port, &lmac_bmap);
+ }
+ mcs->hw->lmac_bmap = lmac_bmap;
+}
+
+int rvu_mcs_init(struct rvu *rvu)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ int lmac, err = 0, mcs_id;
+ struct mcs *mcs;
+
+ rvu->mcs_blk_cnt = mcs_get_blkcnt();
+
+ if (!rvu->mcs_blk_cnt)
+ return 0;
+
+ /* Needed only for CN10K-B */
+ if (rvu->mcs_blk_cnt == 1) {
+ err = mcs_set_lmac_channels(0, hw->cgx_chan_base);
+ if (err)
+ return err;
+ /* Set active lmacs */
+ rvu_mcs_set_lmac_bmap(rvu);
+ }
+
+ /* Install default tcam bypass entry and set port to operational mode */
+ for (mcs_id = 0; mcs_id < rvu->mcs_blk_cnt; mcs_id++) {
+ mcs = mcs_get_pdata(mcs_id);
+ mcs_install_flowid_bypass_entry(mcs);
+ for (lmac = 0; lmac < mcs->hw->lmac_cnt; lmac++)
+ mcs_set_lmac_mode(mcs, lmac, 0);
+
+ mcs->rvu = rvu;
+
+ /* Allocated memory for PFVF data */
+ mcs->pf = devm_kcalloc(mcs->dev, hw->total_pfs,
+ sizeof(struct mcs_pfvf), GFP_KERNEL);
+ if (!mcs->pf)
+ return -ENOMEM;
+
+ mcs->vf = devm_kcalloc(mcs->dev, hw->total_vfs,
+ sizeof(struct mcs_pfvf), GFP_KERNEL);
+ if (!mcs->vf)
+ return -ENOMEM;
+ }
+
+ /* Initialize the wq for handling mcs interrupts */
+ INIT_LIST_HEAD(&rvu->mcs_intrq_head);
+ INIT_WORK(&rvu->mcs_intr_work, mcs_intr_handler_task);
+ rvu->mcs_intr_wq = alloc_workqueue("mcs_intr_wq", 0, 0);
+ if (!rvu->mcs_intr_wq) {
+ dev_err(rvu->dev, "mcs alloc workqueue failed\n");
+ return -ENOMEM;
+ }
+
+ return err;
+}
+
+void rvu_mcs_exit(struct rvu *rvu)
+{
+ if (!rvu->mcs_intr_wq)
+ return;
+
+ flush_workqueue(rvu->mcs_intr_wq);
+ destroy_workqueue(rvu->mcs_intr_wq);
+ rvu->mcs_intr_wq = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index 67a6821d2dff..3411e2e47d46 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -9,6 +9,8 @@
#include
#include
#include
+#include
+#include
#include "ptp.h"
#include "mbox.h"
@@ -50,12 +52,23 @@
#define PTP_CLOCK_COMP 0xF18ULL
#define PTP_TIMESTAMP 0xF20ULL
#define PTP_CLOCK_SEC 0xFD0ULL
+#define PTP_SEC_ROLLOVER 0xFD8ULL
#define CYCLE_MULT 1000
static struct ptp *first_ptp_block;
static const struct pci_device_id ptp_id_table[];
+static bool is_ptp_dev_cnf10kb(struct ptp *ptp)
+{
+ return (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CNF10K_B_PTP) ? true : false;
+}
+
+static bool is_ptp_dev_cn10k(struct ptp *ptp)
+{
+ return (ptp->pdev->device == PCI_DEVID_CN10K_PTP) ? true : false;
+}
+
static bool cn10k_ptp_errata(struct ptp *ptp)
{
if (ptp->pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_A_PTP ||
@@ -72,6 +85,43 @@ static bool is_ptp_tsfmt_sec_nsec(struct ptp *ptp)
return false;
}
+static enum hrtimer_restart ptp_reset_thresh(struct hrtimer *hrtimer)
+{
+ struct ptp *ptp = container_of(hrtimer, struct ptp, hrtimer);
+ ktime_t curr_ts = ktime_get();
+ ktime_t delta_ns, period_ns;
+ u64 ptp_clock_hi;
+
+ /* calculate the elapsed time since last restart */
+ delta_ns = ktime_to_ns(ktime_sub(curr_ts, ptp->last_ts));
+
+ /* if the ptp clock value has crossed 0.5 seconds,
+ * its too late to update pps threshold value, so
+ * update threshold after 1 second.
+ */
+ ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
+ if (ptp_clock_hi > 500000000) {
+ period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - ptp_clock_hi));
+ } else {
+ writeq(500000000, ptp->reg_base + PTP_PPS_THRESH_HI);
+ period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - delta_ns));
+ }
+
+ hrtimer_forward_now(hrtimer, period_ns);
+ ptp->last_ts = curr_ts;
+
+ return HRTIMER_RESTART;
+}
+
+static void ptp_hrtimer_start(struct ptp *ptp, ktime_t start_ns)
+{
+ ktime_t period_ns;
+
+ period_ns = ktime_set(0, (NSEC_PER_SEC + 100 - start_ns));
+ hrtimer_start(&ptp->hrtimer, period_ns, HRTIMER_MODE_REL);
+ ptp->last_ts = ktime_get();
+}
+
static u64 read_ptp_tstmp_sec_nsec(struct ptp *ptp)
{
u64 sec, sec1, nsec;
@@ -246,6 +296,10 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
/* sclk is in MHz */
ptp->clock_rate = sclk * 1000000;
+ /* Program the seconds rollover value to 1 second */
+ if (is_ptp_dev_cnf10kb(ptp))
+ writeq(0x3b9aca00, ptp->reg_base + PTP_SEC_ROLLOVER);
+
/* Enable PTP clock */
clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
@@ -270,6 +324,18 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
/* Set 50% duty cycle for 1Hz output */
writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR);
+ if (cn10k_ptp_errata(ptp)) {
+ /* The ptp_clock_hi rollsover to zero once clock cycle before it
+ * reaches one second boundary. so, program the pps_lo_incr in
+ * such a way that the pps threshold value comparison at one
+ * second boundary will succeed and pps edge changes. After each
+ * one second boundary, the hrtimer handler will be invoked and
+ * reprograms the pps threshold value.
+ */
+ ptp->clock_period = NSEC_PER_SEC / ptp->clock_rate;
+ writeq((0x1dcd6500ULL - ptp->clock_period) << 32,
+ ptp->reg_base + PTP_PPS_LO_INCR);
+ }
if (cn10k_ptp_errata(ptp))
clock_comp = ptp_calc_adjusted_comp(ptp->clock_rate);
@@ -282,14 +348,39 @@ void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
static int ptp_get_tstmp(struct ptp *ptp, u64 *clk)
{
- *clk = readq(ptp->reg_base + PTP_TIMESTAMP);
+ u64 timestamp;
+
+ if (is_ptp_dev_cn10k(ptp)) {
+ timestamp = readq(ptp->reg_base + PTP_TIMESTAMP);
+ *clk = (timestamp >> 32) * NSEC_PER_SEC + (timestamp & 0xFFFFFFFF);
+ } else {
+ *clk = readq(ptp->reg_base + PTP_TIMESTAMP);
+ }
return 0;
}
static int ptp_set_thresh(struct ptp *ptp, u64 thresh)
{
- writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI);
+ if (!cn10k_ptp_errata(ptp))
+ writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI);
+
+ return 0;
+}
+
+static int ptp_extts_on(struct ptp *ptp, int on)
+{
+ u64 ptp_clock_hi;
+
+ if (cn10k_ptp_errata(ptp)) {
+ if (on) {
+ ptp_clock_hi = readq(ptp->reg_base + PTP_CLOCK_HI);
+ ptp_hrtimer_start(ptp, (ktime_t)ptp_clock_hi);
+ } else {
+ if (hrtimer_active(&ptp->hrtimer))
+ hrtimer_cancel(&ptp->hrtimer);
+ }
+ }
return 0;
}
@@ -329,6 +420,11 @@ static int ptp_probe(struct pci_dev *pdev,
else
ptp->read_ptp_tstmp = &read_ptp_tstmp_nsec;
+ if (cn10k_ptp_errata(ptp)) {
+ hrtimer_init(&ptp->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ ptp->hrtimer.function = ptp_reset_thresh;
+ }
+
return 0;
error_free:
@@ -353,6 +449,9 @@ static void ptp_remove(struct pci_dev *pdev)
struct ptp *ptp = pci_get_drvdata(pdev);
u64 clock_cfg;
+ if (cn10k_ptp_errata(ptp) && hrtimer_active(&ptp->hrtimer))
+ hrtimer_cancel(&ptp->hrtimer);
+
if (IS_ERR_OR_NULL(ptp))
return;
@@ -420,6 +519,9 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
case PTP_OP_SET_THRESH:
err = ptp_set_thresh(rvu->ptp, req->thresh);
break;
+ case PTP_OP_EXTTS_ON:
+ err = ptp_extts_on(rvu->ptp, req->extts_on);
+ break;
default:
err = -EINVAL;
break;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
index 95a955159f40..b9d92abc3844 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
@@ -17,7 +17,10 @@ struct ptp {
void __iomem *reg_base;
u64 (*read_ptp_tstmp)(struct ptp *ptp);
spinlock_t ptp_lock; /* lock */
+ struct hrtimer hrtimer;
+ ktime_t last_ts;
u32 clock_rate;
+ u32 clock_period;
};
struct ptp *ptp_get(void);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index ef59de43b11e..a70e1153fa04 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -415,11 +415,26 @@ void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable)
return;
cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_CFG);
- if (enable)
+ if (enable) {
cfg |= RPMX_RX_TS_PREPEND;
- else
+ cfg |= RPMX_TX_PTP_1S_SUPPORT;
+ } else {
cfg &= ~RPMX_RX_TS_PREPEND;
+ cfg &= ~RPMX_TX_PTP_1S_SUPPORT;
+ }
+
rpm_write(rpm, lmac_id, RPMX_CMRX_CFG, cfg);
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_MTI_MAC100X_XIF_MODE);
+
+ if (enable) {
+ cfg |= RPMX_ONESTEP_ENABLE;
+ cfg &= ~RPMX_TS_BINARY_MODE;
+ } else {
+ cfg &= ~RPMX_ONESTEP_ENABLE;
+ }
+
+ rpm_write(rpm, lmac_id, RPMX_MTI_MAC100X_XIF_MODE, cfg);
}
int rpm_lmac_pfc_config(void *rpmd, int lmac_id, u8 tx_pause, u8 rx_pause, u16 pfc_en)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index c2bd6e54ea51..77f2ef9e1425 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -16,6 +16,7 @@
/* Registers */
#define RPMX_CMRX_CFG 0x00
#define RPMX_RX_TS_PREPEND BIT_ULL(22)
+#define RPMX_TX_PTP_1S_SUPPORT BIT_ULL(17)
#define RPMX_CMRX_SW_INT 0x180
#define RPMX_CMRX_SW_INT_W1S 0x188
#define RPMX_CMRX_SW_INT_ENA_W1S 0x198
@@ -72,6 +73,10 @@
#define RPMX_MTI_MAC100X_CL89_PAUSE_QUANTA 0x8108
#define RPM_DEFAULT_PAUSE_TIME 0x7FF
+#define RPMX_MTI_MAC100X_XIF_MODE 0x8100
+#define RPMX_ONESTEP_ENABLE BIT_ULL(5)
+#define RPMX_TS_BINARY_MODE BIT_ULL(11)
+
/* Function Declarations */
int rpm_get_nr_lmacs(void *rpmd);
u8 rpm_get_lmac_type(void *rpmd, int lmac_id);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 7282a826d81e..3f5e09b77d4b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -16,6 +16,7 @@
#include "rvu.h"
#include "rvu_reg.h"
#include "ptp.h"
+#include "mcs.h"
#include "rvu_trace.h"
#include "rvu_npc_hash.h"
@@ -23,8 +24,6 @@
#define DRV_NAME "rvu_af"
#define DRV_STRING "Marvell OcteonTX2 RVU Admin Function Driver"
-static int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
-
static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
struct rvu_block *block, int lf);
static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf,
@@ -418,7 +417,7 @@ void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf)
*hwvf = cfg & 0xFFF;
}
-static int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
+int rvu_get_hwvf(struct rvu *rvu, int pcifunc)
{
int pf, func;
u64 cfg;
@@ -1159,6 +1158,12 @@ cpt:
rvu_program_channels(rvu);
+ err = rvu_mcs_init(rvu);
+ if (err) {
+ dev_err(rvu->dev, "%s: Failed to initialize mcs\n", __func__);
+ goto nix_err;
+ }
+
return 0;
nix_err:
@@ -3293,6 +3298,7 @@ err_mbox:
err_hwsetup:
rvu_cgx_exit(rvu);
rvu_fwdata_exit(rvu);
+ rvu_mcs_exit(rvu);
rvu_reset_all_blocks(rvu);
rvu_free_hw_resources(rvu);
rvu_clear_rvum_blk_revid(rvu);
@@ -3319,6 +3325,7 @@ static void rvu_remove(struct pci_dev *pdev)
rvu_flr_wq_destroy(rvu);
rvu_cgx_exit(rvu);
rvu_fwdata_exit(rvu);
+ rvu_mcs_exit(rvu);
rvu_mbox_destroy(&rvu->afpf_wq_info);
rvu_disable_sriov(rvu);
rvu_reset_all_blocks(rvu);
@@ -3354,12 +3361,18 @@ static int __init rvu_init_module(void)
if (err < 0)
goto ptp_err;
+ err = pci_register_driver(&mcs_driver);
+ if (err < 0)
+ goto mcs_err;
+
err = pci_register_driver(&rvu_driver);
if (err < 0)
goto rvu_err;
return 0;
rvu_err:
+ pci_unregister_driver(&mcs_driver);
+mcs_err:
pci_unregister_driver(&ptp_driver);
ptp_err:
pci_unregister_driver(&cgx_driver);
@@ -3370,6 +3383,7 @@ ptp_err:
static void __exit rvu_cleanup_module(void)
{
pci_unregister_driver(&rvu_driver);
+ pci_unregister_driver(&mcs_driver);
pci_unregister_driver(&ptp_driver);
pci_unregister_driver(&cgx_driver);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index d15bc443335d..76474385a602 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -25,6 +25,8 @@
/* Subsystem Device ID */
#define PCI_SUBSYS_DEVID_96XX 0xB200
#define PCI_SUBSYS_DEVID_CN10K_A 0xB900
+#define PCI_SUBSYS_DEVID_CNF10K_B 0xBC00
+#define PCI_SUBSYS_DEVID_CN10K_B 0xBD00
/* PCI BAR nos */
#define PCI_AF_REG_BAR_NUM 0
@@ -62,6 +64,10 @@ struct rvu_debugfs {
struct dentry *nix;
struct dentry *npc;
struct dentry *cpt;
+ struct dentry *mcs_root;
+ struct dentry *mcs;
+ struct dentry *mcs_rx;
+ struct dentry *mcs_tx;
struct dump_ctx npa_aura_ctx;
struct dump_ctx npa_pool_ctx;
struct dump_ctx nix_cq_ctx;
@@ -497,6 +503,8 @@ struct rvu {
struct ptp *ptp;
+ int mcs_blk_cnt;
+
#ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg;
#endif
@@ -504,6 +512,12 @@ struct rvu {
/* RVU switch implementation over NPC with DMAC rules */
struct rvu_switch rswitch;
+
+ struct work_struct mcs_intr_work;
+ struct workqueue_struct *mcs_intr_wq;
+ struct list_head mcs_intrq_head;
+ /* mcs interrupt queue lock */
+ spinlock_t mcs_intrq_lock;
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
@@ -868,4 +882,11 @@ void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
u64 pkind, u8 var_len_off, u8 var_len_off_mask,
u8 shift_dir);
+int rvu_get_hwvf(struct rvu *rvu, int pcifunc);
+
+/* CN10K MCS */
+int rvu_mcs_init(struct rvu *rvu);
+int rvu_mcs_flr_handler(struct rvu *rvu, u16 pcifunc);
+void rvu_mcs_exit(struct rvu *rvu);
+
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index f42a09f04b25..a1970ebedf95 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -19,6 +19,7 @@
#include "lmac_common.h"
#include "npc.h"
#include "rvu_npc_hash.h"
+#include "mcs.h"
#define DEBUGFS_DIR_NAME "octeontx2"
@@ -227,6 +228,350 @@ static const struct file_operations rvu_dbg_##name##_fops = { \
static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
+static int rvu_dbg_mcs_port_stats_display(struct seq_file *filp, void *unused, int dir)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_port_stats stats;
+ int lmac;
+
+ seq_puts(filp, "\n port stats\n");
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(lmac, &mcs->hw->lmac_bmap, mcs->hw->lmac_cnt) {
+ mcs_get_port_stats(mcs, &stats, lmac, dir);
+ seq_printf(filp, "port%d: Tcam Miss: %lld\n", lmac, stats.tcam_miss_cnt);
+ seq_printf(filp, "port%d: Parser errors: %lld\n", lmac, stats.parser_err_cnt);
+
+ if (dir == MCS_RX && mcs->hw->mcs_blks > 1)
+ seq_printf(filp, "port%d: Preempt error: %lld\n", lmac,
+ stats.preempt_err_cnt);
+ if (dir == MCS_TX)
+ seq_printf(filp, "port%d: Sectag insert error: %lld\n", lmac,
+ stats.sectag_insert_err_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+static int rvu_dbg_mcs_rx_port_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_port_stats, mcs_rx_port_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_port_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_port_stats_display(filp, unused, MCS_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_port_stats, mcs_tx_port_stats_display, NULL);
+
+static int rvu_dbg_mcs_sa_stats_display(struct seq_file *filp, void *unused, int dir)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_sa_stats stats;
+ struct rsrc_bmap *map;
+ int sa_id;
+
+ if (dir == MCS_TX) {
+ map = &mcs->tx.sa;
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
+ seq_puts(filp, "\n TX SA stats\n");
+ mcs_get_sa_stats(mcs, &stats, sa_id, MCS_TX);
+ seq_printf(filp, "sa%d: Pkts encrypted: %lld\n", sa_id,
+ stats.pkt_encrypt_cnt);
+
+ seq_printf(filp, "sa%d: Pkts protected: %lld\n", sa_id,
+ stats.pkt_protected_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+ }
+
+ /* RX stats */
+ map = &mcs->rx.sa;
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sa_id, map->bmap, mcs->hw->sa_entries) {
+ seq_puts(filp, "\n RX SA stats\n");
+ mcs_get_sa_stats(mcs, &stats, sa_id, MCS_RX);
+ seq_printf(filp, "sa%d: Invalid pkts: %lld\n", sa_id, stats.pkt_invalid_cnt);
+ seq_printf(filp, "sa%d: Pkts no sa error: %lld\n", sa_id, stats.pkt_nosaerror_cnt);
+ seq_printf(filp, "sa%d: Pkts not valid: %lld\n", sa_id, stats.pkt_notvalid_cnt);
+ seq_printf(filp, "sa%d: Pkts ok: %lld\n", sa_id, stats.pkt_ok_cnt);
+ seq_printf(filp, "sa%d: Pkts no sa: %lld\n", sa_id, stats.pkt_nosa_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+static int rvu_dbg_mcs_rx_sa_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_sa_stats, mcs_rx_sa_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_sa_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_sa_stats_display(filp, unused, MCS_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_sa_stats, mcs_tx_sa_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_sc_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_sc_stats stats;
+ struct rsrc_bmap *map;
+ int sc_id;
+
+ map = &mcs->tx.sc;
+ seq_puts(filp, "\n SC stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
+ mcs_get_sc_stats(mcs, &stats, sc_id, MCS_TX);
+ seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
+ seq_printf(filp, "sc%d: Pkts encrypted: %lld\n", sc_id, stats.pkt_encrypt_cnt);
+ seq_printf(filp, "sc%d: Pkts protected: %lld\n", sc_id, stats.pkt_protected_cnt);
+
+ if (mcs->hw->mcs_blks == 1) {
+ seq_printf(filp, "sc%d: Octets encrypted: %lld\n", sc_id,
+ stats.octet_encrypt_cnt);
+ seq_printf(filp, "sc%d: Octets protected: %lld\n", sc_id,
+ stats.octet_protected_cnt);
+ }
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_sc_stats, mcs_tx_sc_stats_display, NULL);
+
+static int rvu_dbg_mcs_rx_sc_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_sc_stats stats;
+ struct rsrc_bmap *map;
+ int sc_id;
+
+ map = &mcs->rx.sc;
+ seq_puts(filp, "\n SC stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(sc_id, map->bmap, mcs->hw->sc_entries) {
+ mcs_get_sc_stats(mcs, &stats, sc_id, MCS_RX);
+ seq_printf(filp, "\n=======sc%d======\n\n", sc_id);
+ seq_printf(filp, "sc%d: Cam hits: %lld\n", sc_id, stats.hit_cnt);
+ seq_printf(filp, "sc%d: Invalid pkts: %lld\n", sc_id, stats.pkt_invalid_cnt);
+ seq_printf(filp, "sc%d: Late pkts: %lld\n", sc_id, stats.pkt_late_cnt);
+ seq_printf(filp, "sc%d: Notvalid pkts: %lld\n", sc_id, stats.pkt_notvalid_cnt);
+ seq_printf(filp, "sc%d: Unchecked pkts: %lld\n", sc_id, stats.pkt_unchecked_cnt);
+
+ if (mcs->hw->mcs_blks > 1) {
+ seq_printf(filp, "sc%d: Delay pkts: %lld\n", sc_id, stats.pkt_delay_cnt);
+ seq_printf(filp, "sc%d: Pkts ok: %lld\n", sc_id, stats.pkt_ok_cnt);
+ }
+ if (mcs->hw->mcs_blks == 1) {
+ seq_printf(filp, "sc%d: Octets decrypted: %lld\n", sc_id,
+ stats.octet_decrypt_cnt);
+ seq_printf(filp, "sc%d: Octets validated: %lld\n", sc_id,
+ stats.octet_validate_cnt);
+ }
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_sc_stats, mcs_rx_sc_stats_display, NULL);
+
+static int rvu_dbg_mcs_flowid_stats_display(struct seq_file *filp, void *unused, int dir)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_flowid_stats stats;
+ struct rsrc_bmap *map;
+ int flow_id;
+
+ seq_puts(filp, "\n Flowid stats\n");
+
+ if (dir == MCS_RX)
+ map = &mcs->rx.flow_ids;
+ else
+ map = &mcs->tx.flow_ids;
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(flow_id, map->bmap, mcs->hw->tcam_entries) {
+ mcs_get_flowid_stats(mcs, &stats, flow_id, dir);
+ seq_printf(filp, "Flowid%d: Hit:%lld\n", flow_id, stats.tcam_hit_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+static int rvu_dbg_mcs_tx_flowid_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_TX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_flowid_stats, mcs_tx_flowid_stats_display, NULL);
+
+static int rvu_dbg_mcs_rx_flowid_stats_display(struct seq_file *filp, void *unused)
+{
+ return rvu_dbg_mcs_flowid_stats_display(filp, unused, MCS_RX);
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_flowid_stats, mcs_rx_flowid_stats_display, NULL);
+
+static int rvu_dbg_mcs_tx_secy_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_secy_stats stats;
+ struct rsrc_bmap *map;
+ int secy_id;
+
+ map = &mcs->tx.secy;
+ seq_puts(filp, "\n MCS TX secy stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
+ mcs_get_tx_secy_stats(mcs, &stats, secy_id);
+ seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
+ seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
+ seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
+ stats.unctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
+ seq_printf(filp, "secy%d: Octet encrypted: %lld\n", secy_id,
+ stats.octet_encrypted_cnt);
+ seq_printf(filp, "secy%d: octet protected: %lld\n", secy_id,
+ stats.octet_protected_cnt);
+ seq_printf(filp, "secy%d: Pkts on active sa: %lld\n", secy_id,
+ stats.pkt_noactivesa_cnt);
+ seq_printf(filp, "secy%d: Pkts too long: %lld\n", secy_id, stats.pkt_toolong_cnt);
+ seq_printf(filp, "secy%d: Pkts untagged: %lld\n", secy_id, stats.pkt_untagged_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_tx_secy_stats, mcs_tx_secy_stats_display, NULL);
+
+static int rvu_dbg_mcs_rx_secy_stats_display(struct seq_file *filp, void *unused)
+{
+ struct mcs *mcs = filp->private;
+ struct mcs_secy_stats stats;
+ struct rsrc_bmap *map;
+ int secy_id;
+
+ map = &mcs->rx.secy;
+ seq_puts(filp, "\n MCS secy stats\n");
+
+ mutex_lock(&mcs->stats_lock);
+ for_each_set_bit(secy_id, map->bmap, mcs->hw->secy_entries) {
+ mcs_get_rx_secy_stats(mcs, &stats, secy_id);
+ seq_printf(filp, "\n=======Secy%d======\n\n", secy_id);
+ seq_printf(filp, "secy%d: Ctrl bcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl Mcast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Ctrl ucast pkts: %lld\n", secy_id,
+ stats.ctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Ctrl octets: %lld\n", secy_id, stats.ctl_octet_cnt);
+ seq_printf(filp, "secy%d: Unctrl bcast cnt: %lld\n", secy_id,
+ stats.unctl_pkt_bcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl mcast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_mcast_cnt);
+ seq_printf(filp, "secy%d: Unctrl ucast pkts: %lld\n", secy_id,
+ stats.unctl_pkt_ucast_cnt);
+ seq_printf(filp, "secy%d: Unctrl octets: %lld\n", secy_id, stats.unctl_octet_cnt);
+ seq_printf(filp, "secy%d: Octet decrypted: %lld\n", secy_id,
+ stats.octet_decrypted_cnt);
+ seq_printf(filp, "secy%d: octet validated: %lld\n", secy_id,
+ stats.octet_validated_cnt);
+ seq_printf(filp, "secy%d: Pkts on disable port: %lld\n", secy_id,
+ stats.pkt_port_disabled_cnt);
+ seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_badtag_cnt);
+ seq_printf(filp, "secy%d: Octets validated: %lld\n", secy_id, stats.pkt_nosa_cnt);
+ seq_printf(filp, "secy%d: Pkts with nosaerror: %lld\n", secy_id,
+ stats.pkt_nosaerror_cnt);
+ seq_printf(filp, "secy%d: Tagged ctrl pkts: %lld\n", secy_id,
+ stats.pkt_tagged_ctl_cnt);
+ seq_printf(filp, "secy%d: Untaged pkts: %lld\n", secy_id, stats.pkt_untaged_cnt);
+ seq_printf(filp, "secy%d: Ctrl pkts: %lld\n", secy_id, stats.pkt_ctl_cnt);
+ if (mcs->hw->mcs_blks > 1)
+ seq_printf(filp, "secy%d: pkts notag: %lld\n", secy_id,
+ stats.pkt_notag_cnt);
+ }
+ mutex_unlock(&mcs->stats_lock);
+ return 0;
+}
+
+RVU_DEBUG_SEQ_FOPS(mcs_rx_secy_stats, mcs_rx_secy_stats_display, NULL);
+
+static void rvu_dbg_mcs_init(struct rvu *rvu)
+{
+ struct mcs *mcs;
+ char dname[10];
+ int i;
+
+ if (!rvu->mcs_blk_cnt)
+ return;
+
+ rvu->rvu_dbg.mcs_root = debugfs_create_dir("mcs", rvu->rvu_dbg.root);
+
+ for (i = 0; i < rvu->mcs_blk_cnt; i++) {
+ mcs = mcs_get_pdata(i);
+
+ sprintf(dname, "mcs%d", i);
+ rvu->rvu_dbg.mcs = debugfs_create_dir(dname,
+ rvu->rvu_dbg.mcs_root);
+
+ rvu->rvu_dbg.mcs_rx = debugfs_create_dir("rx_stats", rvu->rvu_dbg.mcs);
+
+ debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_flowid_stats_fops);
+
+ debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_secy_stats_fops);
+
+ debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_sc_stats_fops);
+
+ debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_sa_stats_fops);
+
+ debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_rx, mcs,
+ &rvu_dbg_mcs_rx_port_stats_fops);
+
+ rvu->rvu_dbg.mcs_tx = debugfs_create_dir("tx_stats", rvu->rvu_dbg.mcs);
+
+ debugfs_create_file("flowid", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_flowid_stats_fops);
+
+ debugfs_create_file("secy", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_secy_stats_fops);
+
+ debugfs_create_file("sc", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_sc_stats_fops);
+
+ debugfs_create_file("sa", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_sa_stats_fops);
+
+ debugfs_create_file("port", 0600, rvu->rvu_dbg.mcs_tx, mcs,
+ &rvu_dbg_mcs_tx_port_stats_fops);
+ }
+}
+
#define LMT_MAPTBL_ENTRY_SIZE 16
/* Dump LMTST map table */
static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
@@ -3053,6 +3398,7 @@ create:
rvu_dbg_npc_init(rvu);
rvu_dbg_cpt_init(rvu, BLKADDR_CPT0);
rvu_dbg_cpt_init(rvu, BLKADDR_CPT1);
+ rvu_dbg_mcs_init(rvu);
}
void rvu_dbg_exit(struct rvu *rvu)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 0879a48411f3..7646bb2ec89b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -4296,8 +4296,14 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw)
/* Restore CINT timer delay to HW reset values */
rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL);
+ cfg = rvu_read64(rvu, blkaddr, NIX_AF_SEB_CFG);
+
/* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */
- rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, 0x1ULL);
+ cfg |= 1ULL;
+ if (!is_rvu_otx2(rvu))
+ cfg |= NIX_PTP_1STEP_EN;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, cfg);
if (is_block_implemented(hw, blkaddr)) {
err = nix_setup_txschq(rvu, nix_hw, blkaddr);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 77a9ade91f3e..0e0d536645ac 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -266,6 +266,7 @@
#define NIX_AF_TX_NPC_CAPTURE_CONFIG (0x0660)
#define NIX_AF_TX_NPC_CAPTURE_INFO (0x0670)
#define NIX_AF_SEB_CFG (0x05F0)
+#define NIX_PTP_1STEP_EN BIT_ULL(2)
#define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3)
#define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index d463dc72d80a..73fdb8798614 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -13,5 +13,6 @@ rvu_nicvf-y := otx2_vf.o otx2_devlink.o
rvu_nicpf-$(CONFIG_DCB) += otx2_dcbnl.o
rvu_nicvf-$(CONFIG_DCB) += otx2_dcbnl.o
+rvu_nicpf-$(CONFIG_MACSEC) += cn10k_macsec.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index fd4f083c699e..826f691de259 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -86,8 +86,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
aq->sq.cq_ena = 1;
aq->sq.ena = 1;
- /* Only one SMQ is allocated, map all SQ's to that SMQ */
- aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
new file mode 100644
index 000000000000..64f3acd7f67b
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c
@@ -0,0 +1,1668 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell MACSEC hardware offload driver
+ *
+ * Copyright (C) 2022 Marvell.
+ */
+
+#include
+#include
+#include
+#include "otx2_common.h"
+
+#define MCS_TCAM0_MAC_SA_MASK GENMASK_ULL(63, 48)
+#define MCS_TCAM1_MAC_SA_MASK GENMASK_ULL(31, 0)
+#define MCS_TCAM1_ETYPE_MASK GENMASK_ULL(47, 32)
+
+#define MCS_SA_MAP_MEM_SA_USE BIT_ULL(9)
+
+#define MCS_RX_SECY_PLCY_RW_MASK GENMASK_ULL(49, 18)
+#define MCS_RX_SECY_PLCY_RP BIT_ULL(17)
+#define MCS_RX_SECY_PLCY_AUTH_ENA BIT_ULL(16)
+#define MCS_RX_SECY_PLCY_CIP GENMASK_ULL(8, 5)
+#define MCS_RX_SECY_PLCY_VAL GENMASK_ULL(2, 1)
+#define MCS_RX_SECY_PLCY_ENA BIT_ULL(0)
+
+#define MCS_TX_SECY_PLCY_MTU GENMASK_ULL(43, 28)
+#define MCS_TX_SECY_PLCY_ST_TCI GENMASK_ULL(27, 22)
+#define MCS_TX_SECY_PLCY_ST_OFFSET GENMASK_ULL(21, 15)
+#define MCS_TX_SECY_PLCY_INS_MODE BIT_ULL(14)
+#define MCS_TX_SECY_PLCY_AUTH_ENA BIT_ULL(13)
+#define MCS_TX_SECY_PLCY_CIP GENMASK_ULL(5, 2)
+#define MCS_TX_SECY_PLCY_PROTECT BIT_ULL(1)
+#define MCS_TX_SECY_PLCY_ENA BIT_ULL(0)
+
+#define MCS_GCM_AES_128 0
+#define MCS_GCM_AES_256 1
+#define MCS_GCM_AES_XPN_128 2
+#define MCS_GCM_AES_XPN_256 3
+
+#define MCS_TCI_ES 0x40 /* end station */
+#define MCS_TCI_SC 0x20 /* SCI present */
+#define MCS_TCI_SCB 0x10 /* epon */
+#define MCS_TCI_E 0x08 /* encryption */
+#define MCS_TCI_C 0x04 /* changed text */
+
+static struct cn10k_mcs_txsc *cn10k_mcs_get_txsc(struct cn10k_mcs_cfg *cfg,
+ struct macsec_secy *secy)
+{
+ struct cn10k_mcs_txsc *txsc;
+
+ list_for_each_entry(txsc, &cfg->txsc_list, entry) {
+ if (txsc->sw_secy == secy)
+ return txsc;
+ }
+
+ return NULL;
+}
+
+static struct cn10k_mcs_rxsc *cn10k_mcs_get_rxsc(struct cn10k_mcs_cfg *cfg,
+ struct macsec_secy *secy,
+ struct macsec_rx_sc *rx_sc)
+{
+ struct cn10k_mcs_rxsc *rxsc;
+
+ list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
+ if (rxsc->sw_rxsc == rx_sc && rxsc->sw_secy == secy)
+ return rxsc;
+ }
+
+ return NULL;
+}
+
+static const char *rsrc_name(enum mcs_rsrc_type rsrc_type)
+{
+ switch (rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ return "FLOW";
+ case MCS_RSRC_TYPE_SC:
+ return "SC";
+ case MCS_RSRC_TYPE_SECY:
+ return "SECY";
+ case MCS_RSRC_TYPE_SA:
+ return "SA";
+ default:
+ return "Unknown";
+ };
+
+ return "Unknown";
+}
+
+static int cn10k_mcs_alloc_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
+ enum mcs_rsrc_type type, u16 *rsrc_id)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_alloc_rsrc_req *req;
+ struct mcs_alloc_rsrc_rsp *rsp;
+ int ret = -ENOMEM;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_alloc_resources(mbox);
+ if (!req)
+ goto fail;
+
+ req->rsrc_type = type;
+ req->rsrc_cnt = 1;
+ req->dir = dir;
+
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_alloc_rsrc_rsp *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp) || req->rsrc_cnt != rsp->rsrc_cnt ||
+ req->rsrc_type != rsp->rsrc_type || req->dir != rsp->dir) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ switch (rsp->rsrc_type) {
+ case MCS_RSRC_TYPE_FLOWID:
+ *rsrc_id = rsp->flow_ids[0];
+ break;
+ case MCS_RSRC_TYPE_SC:
+ *rsrc_id = rsp->sc_ids[0];
+ break;
+ case MCS_RSRC_TYPE_SECY:
+ *rsrc_id = rsp->secy_ids[0];
+ break;
+ case MCS_RSRC_TYPE_SA:
+ *rsrc_id = rsp->sa_ids[0];
+ break;
+ default:
+ ret = -EINVAL;
+ goto fail;
+ };
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ dev_err(pfvf->dev, "Failed to allocate %s %s resource\n",
+ dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static void cn10k_mcs_free_rsrc(struct otx2_nic *pfvf, enum mcs_direction dir,
+ enum mcs_rsrc_type type, u16 hw_rsrc_id,
+ bool all)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_free_rsrc_req *req;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_free_resources(mbox);
+ if (!req)
+ goto fail;
+
+ req->rsrc_id = hw_rsrc_id;
+ req->rsrc_type = type;
+ req->dir = dir;
+ if (all)
+ req->all = 1;
+
+ if (otx2_sync_mbox_msg(&pfvf->mbox))
+ goto fail;
+
+ mutex_unlock(&mbox->lock);
+
+ return;
+fail:
+ dev_err(pfvf->dev, "Failed to free %s %s resource\n",
+ dir == MCS_TX ? "TX" : "RX", rsrc_name(type));
+ mutex_unlock(&mbox->lock);
+}
+
+static int cn10k_mcs_alloc_txsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
+{
+ return cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id);
+}
+
+static int cn10k_mcs_alloc_rxsa(struct otx2_nic *pfvf, u16 *hw_sa_id)
+{
+ return cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id);
+}
+
+static void cn10k_mcs_free_txsa(struct otx2_nic *pfvf, u16 hw_sa_id)
+{
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
+}
+
+static void cn10k_mcs_free_rxsa(struct otx2_nic *pfvf, u16 hw_sa_id)
+{
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SA, hw_sa_id, false);
+}
+
+static int cn10k_mcs_write_rx_secy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy, u8 hw_secy_id)
+{
+ struct mcs_secy_plcy_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ u64 policy;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ policy = FIELD_PREP(MCS_RX_SECY_PLCY_RW_MASK, secy->replay_window);
+ if (secy->replay_protect)
+ policy |= MCS_RX_SECY_PLCY_RP;
+
+ policy |= MCS_RX_SECY_PLCY_AUTH_ENA;
+ policy |= FIELD_PREP(MCS_RX_SECY_PLCY_CIP, MCS_GCM_AES_128);
+ policy |= FIELD_PREP(MCS_RX_SECY_PLCY_VAL, secy->validate_frames);
+
+ policy |= MCS_RX_SECY_PLCY_ENA;
+
+ req->plcy = policy;
+ req->secy_id = hw_secy_id;
+ req->dir = MCS_RX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_rx_flowid(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
+{
+ struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
+ struct mcs_flowid_entry_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->data[1] = FIELD_PREP(MCS_TCAM1_ETYPE_MASK, ETH_P_MACSEC);
+ req->mask[1] = ~0ULL;
+ req->mask[1] &= ~MCS_TCAM1_ETYPE_MASK;
+
+ req->mask[0] = ~0ULL;
+ req->mask[2] = ~0ULL;
+ req->mask[3] = ~0ULL;
+
+ req->flow_id = rxsc->hw_flow_id;
+ req->secy_id = hw_secy_id;
+ req->sc_id = rxsc->hw_sc_id;
+ req->dir = MCS_RX;
+
+ if (sw_rx_sc->active)
+ req->ena = 1;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_sc_cam(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc, u8 hw_secy_id)
+{
+ struct macsec_rx_sc *sw_rx_sc = rxsc->sw_rxsc;
+ struct mcs_rx_sc_cam_write_req *sc_req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ sc_req = otx2_mbox_alloc_msg_mcs_rx_sc_cam_write(mbox);
+ if (!sc_req) {
+ return -ENOMEM;
+ goto fail;
+ }
+
+ sc_req->sci = (__force u64)cpu_to_be64((__force u64)sw_rx_sc->sci);
+ sc_req->sc_id = rxsc->hw_sc_id;
+ sc_req->secy_id = hw_secy_id;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_rx_sa_plcy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_rxsc *rxsc,
+ u8 assoc_num, bool sa_in_use)
+{
+ unsigned char *src = rxsc->sa_key[assoc_num];
+ struct mcs_sa_plcy_write_req *plcy_req;
+ struct mcs_rx_sc_sa_map *map_req;
+ struct mbox *mbox = &pfvf->mbox;
+ u8 reg, key_len;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
+ if (!plcy_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ map_req = otx2_mbox_alloc_msg_mcs_rx_sc_sa_map_write(mbox);
+ if (!map_req) {
+ otx2_mbox_reset(&mbox->mbox, 0);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
+ memcpy((u8 *)&plcy_req->plcy[0][reg],
+ (src + reg * 8), 8);
+ reg++;
+ }
+
+ plcy_req->sa_index[0] = rxsc->hw_sa_id[assoc_num];
+ plcy_req->sa_cnt = 1;
+ plcy_req->dir = MCS_RX;
+
+ map_req->sa_index = rxsc->hw_sa_id[assoc_num];
+ map_req->sa_in_use = sa_in_use;
+ map_req->sc_id = rxsc->hw_sc_id;
+ map_req->an = assoc_num;
+
+ /* Send two messages together */
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_rx_sa_pn(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc,
+ u8 assoc_num, u64 next_pn)
+{
+ struct mcs_pn_table_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->pn_id = rxsc->hw_sa_id[assoc_num];
+ req->next_pn = next_pn;
+ req->dir = MCS_RX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_tx_secy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc)
+{
+ struct mcs_secy_plcy_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct macsec_tx_sc *sw_tx_sc;
+ /* Insert SecTag after 12 bytes (DA+SA)*/
+ u8 tag_offset = 12;
+ u8 sectag_tci = 0;
+ u64 policy;
+ int ret;
+
+ sw_tx_sc = &secy->tx_sc;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_secy_plcy_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (sw_tx_sc->send_sci) {
+ sectag_tci |= MCS_TCI_SC;
+ } else {
+ if (sw_tx_sc->end_station)
+ sectag_tci |= MCS_TCI_ES;
+ if (sw_tx_sc->scb)
+ sectag_tci |= MCS_TCI_SCB;
+ }
+
+ if (sw_tx_sc->encrypt)
+ sectag_tci |= (MCS_TCI_E | MCS_TCI_C);
+
+ policy = FIELD_PREP(MCS_TX_SECY_PLCY_MTU, secy->netdev->mtu);
+ /* Write SecTag excluding AN bits(1..0) */
+ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_TCI, sectag_tci >> 2);
+ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_ST_OFFSET, tag_offset);
+ policy |= MCS_TX_SECY_PLCY_INS_MODE;
+ policy |= MCS_TX_SECY_PLCY_AUTH_ENA;
+ policy |= FIELD_PREP(MCS_TX_SECY_PLCY_CIP, MCS_GCM_AES_128);
+
+ if (secy->protect_frames)
+ policy |= MCS_TX_SECY_PLCY_PROTECT;
+
+ /* If the encodingsa does not exist/active and protect is
+ * not set then frames can be sent out as it is. Hence enable
+ * the policy irrespective of secy operational when !protect.
+ */
+ if (!secy->protect_frames || secy->operational)
+ policy |= MCS_TX_SECY_PLCY_ENA;
+
+ req->plcy = policy;
+ req->secy_id = txsc->hw_secy_id_tx;
+ req->dir = MCS_TX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_tx_flowid(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc)
+{
+ struct mcs_flowid_entry_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ u64 mac_sa;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_flowid_entry_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mac_sa = ether_addr_to_u64(secy->netdev->dev_addr);
+
+ req->data[0] = FIELD_PREP(MCS_TCAM0_MAC_SA_MASK, mac_sa);
+ req->data[1] = FIELD_PREP(MCS_TCAM1_MAC_SA_MASK, mac_sa >> 16);
+
+ req->mask[0] = ~0ULL;
+ req->mask[0] &= ~MCS_TCAM0_MAC_SA_MASK;
+
+ req->mask[1] = ~0ULL;
+ req->mask[1] &= ~MCS_TCAM1_MAC_SA_MASK;
+
+ req->mask[2] = ~0ULL;
+ req->mask[3] = ~0ULL;
+
+ req->flow_id = txsc->hw_flow_id;
+ req->secy_id = txsc->hw_secy_id_tx;
+ req->sc_id = txsc->hw_sc_id;
+ req->sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
+ req->dir = MCS_TX;
+ /* This can be enabled since stack xmits packets only when interface is up */
+ req->ena = 1;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_link_tx_sa2sc(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc,
+ u8 sa_num, bool sa_active)
+{
+ struct mcs_tx_sc_sa_map *map_req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ /* Link the encoding_sa only to SC out of all SAs */
+ if (txsc->encoding_sa != sa_num)
+ return 0;
+
+ mutex_lock(&mbox->lock);
+
+ map_req = otx2_mbox_alloc_msg_mcs_tx_sc_sa_map_write(mbox);
+ if (!map_req) {
+ otx2_mbox_reset(&mbox->mbox, 0);
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ map_req->sa_index0 = txsc->hw_sa_id[sa_num];
+ map_req->sa_index0_vld = sa_active;
+ map_req->sectag_sci = (__force u64)cpu_to_be64((__force u64)secy->sci);
+ map_req->sc_id = txsc->hw_sc_id;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_write_tx_sa_plcy(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc,
+ u8 assoc_num)
+{
+ unsigned char *src = txsc->sa_key[assoc_num];
+ struct mcs_sa_plcy_write_req *plcy_req;
+ struct mbox *mbox = &pfvf->mbox;
+ u8 reg, key_len;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ plcy_req = otx2_mbox_alloc_msg_mcs_sa_plcy_write(mbox);
+ if (!plcy_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ for (reg = 0, key_len = 0; key_len < secy->key_len; key_len += 8) {
+ memcpy((u8 *)&plcy_req->plcy[0][reg], (src + reg * 8), 8);
+ reg++;
+ }
+
+ plcy_req->plcy[0][8] = assoc_num;
+ plcy_req->sa_index[0] = txsc->hw_sa_id[assoc_num];
+ plcy_req->sa_cnt = 1;
+ plcy_req->dir = MCS_TX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_write_tx_sa_pn(struct otx2_nic *pfvf,
+ struct cn10k_mcs_txsc *txsc,
+ u8 assoc_num, u64 next_pn)
+{
+ struct mcs_pn_table_write_req *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_pn_table_write(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->pn_id = txsc->hw_sa_id[assoc_num];
+ req->next_pn = next_pn;
+ req->dir = MCS_TX;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_ena_dis_flowid(struct otx2_nic *pfvf, u16 hw_flow_id,
+ bool enable, enum mcs_direction dir)
+{
+ struct mcs_flowid_ena_dis_entry *req;
+ struct mbox *mbox = &pfvf->mbox;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_flowid_ena_entry(mbox);
+ if (!req) {
+ return -ENOMEM;
+ goto fail;
+ }
+
+ req->flow_id = hw_flow_id;
+ req->ena = enable;
+ req->dir = dir;
+
+ ret = otx2_sync_mbox_msg(mbox);
+
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_sa_stats(struct otx2_nic *pfvf, u8 hw_sa_id,
+ struct mcs_sa_stats *rsp_p,
+ enum mcs_direction dir, bool clear)
+{
+ struct mcs_clear_stats *clear_req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_stats_req *req;
+ struct mcs_sa_stats *rsp;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_get_sa_stats(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->id = hw_sa_id;
+ req->dir = dir;
+
+ if (!clear)
+ goto send_msg;
+
+ clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+ if (!clear_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ clear_req->id = hw_sa_id;
+ clear_req->dir = dir;
+ clear_req->type = MCS_RSRC_TYPE_SA;
+
+send_msg:
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_sa_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = PTR_ERR(rsp);
+ goto fail;
+ }
+
+ memcpy(rsp_p, rsp, sizeof(*rsp_p));
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_sc_stats(struct otx2_nic *pfvf, u8 hw_sc_id,
+ struct mcs_sc_stats *rsp_p,
+ enum mcs_direction dir, bool clear)
+{
+ struct mcs_clear_stats *clear_req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_stats_req *req;
+ struct mcs_sc_stats *rsp;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_get_sc_stats(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->id = hw_sc_id;
+ req->dir = dir;
+
+ if (!clear)
+ goto send_msg;
+
+ clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+ if (!clear_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ clear_req->id = hw_sc_id;
+ clear_req->dir = dir;
+ clear_req->type = MCS_RSRC_TYPE_SC;
+
+send_msg:
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_sc_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = PTR_ERR(rsp);
+ goto fail;
+ }
+
+ memcpy(rsp_p, rsp, sizeof(*rsp_p));
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static int cn10k_mcs_secy_stats(struct otx2_nic *pfvf, u8 hw_secy_id,
+ struct mcs_secy_stats *rsp_p,
+ enum mcs_direction dir, bool clear)
+{
+ struct mcs_clear_stats *clear_req;
+ struct mbox *mbox = &pfvf->mbox;
+ struct mcs_secy_stats *rsp;
+ struct mcs_stats_req *req;
+ int ret;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_get_secy_stats(mbox);
+ if (!req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ req->id = hw_secy_id;
+ req->dir = dir;
+
+ if (!clear)
+ goto send_msg;
+
+ clear_req = otx2_mbox_alloc_msg_mcs_clear_stats(mbox);
+ if (!clear_req) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ clear_req->id = hw_secy_id;
+ clear_req->dir = dir;
+ clear_req->type = MCS_RSRC_TYPE_SECY;
+
+send_msg:
+ ret = otx2_sync_mbox_msg(mbox);
+ if (ret)
+ goto fail;
+
+ rsp = (struct mcs_secy_stats *)otx2_mbox_get_rsp(&pfvf->mbox.mbox,
+ 0, &req->hdr);
+ if (IS_ERR(rsp)) {
+ ret = PTR_ERR(rsp);
+ goto fail;
+ }
+
+ memcpy(rsp_p, rsp, sizeof(*rsp_p));
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ mutex_unlock(&mbox->lock);
+ return ret;
+}
+
+static struct cn10k_mcs_txsc *cn10k_mcs_create_txsc(struct otx2_nic *pfvf)
+{
+ struct cn10k_mcs_txsc *txsc;
+ int ret;
+
+ txsc = kzalloc(sizeof(*txsc), GFP_KERNEL);
+ if (!txsc)
+ return ERR_PTR(-ENOMEM);
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
+ &txsc->hw_flow_id);
+ if (ret)
+ goto fail;
+
+ /* For a SecY, one TX secy and one RX secy HW resources are needed */
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
+ &txsc->hw_secy_id_tx);
+ if (ret)
+ goto free_flowid;
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
+ &txsc->hw_secy_id_rx);
+ if (ret)
+ goto free_tx_secy;
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
+ &txsc->hw_sc_id);
+ if (ret)
+ goto free_rx_secy;
+
+ return txsc;
+free_rx_secy:
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_rx, false);
+free_tx_secy:
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_tx, false);
+free_flowid:
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
+ txsc->hw_flow_id, false);
+fail:
+ return ERR_PTR(ret);
+}
+
+/* Free Tx SC and its SAs(if any) resources to AF
+ */
+static void cn10k_mcs_delete_txsc(struct otx2_nic *pfvf,
+ struct cn10k_mcs_txsc *txsc)
+{
+ u8 sa_bmap = txsc->sa_bmap;
+ u8 sa_num = 0;
+
+ while (sa_bmap) {
+ if (sa_bmap & 1) {
+ cn10k_mcs_write_tx_sa_plcy(pfvf, txsc->sw_secy,
+ txsc, sa_num);
+ cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
+ }
+ sa_num++;
+ sa_bmap >>= 1;
+ }
+
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SC,
+ txsc->hw_sc_id, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_rx, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY,
+ txsc->hw_secy_id_tx, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID,
+ txsc->hw_flow_id, false);
+}
+
+static struct cn10k_mcs_rxsc *cn10k_mcs_create_rxsc(struct otx2_nic *pfvf)
+{
+ struct cn10k_mcs_rxsc *rxsc;
+ int ret;
+
+ rxsc = kzalloc(sizeof(*rxsc), GFP_KERNEL);
+ if (!rxsc)
+ return ERR_PTR(-ENOMEM);
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
+ &rxsc->hw_flow_id);
+ if (ret)
+ goto fail;
+
+ ret = cn10k_mcs_alloc_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
+ &rxsc->hw_sc_id);
+ if (ret)
+ goto free_flowid;
+
+ return rxsc;
+free_flowid:
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
+ rxsc->hw_flow_id, false);
+fail:
+ return ERR_PTR(ret);
+}
+
+/* Free Rx SC and its SAs(if any) resources to AF
+ */
+static void cn10k_mcs_delete_rxsc(struct otx2_nic *pfvf,
+ struct cn10k_mcs_rxsc *rxsc)
+{
+ u8 sa_bmap = rxsc->sa_bmap;
+ u8 sa_num = 0;
+
+ while (sa_bmap) {
+ if (sa_bmap & 1) {
+ cn10k_mcs_write_rx_sa_plcy(pfvf, rxsc->sw_secy, rxsc,
+ sa_num, false);
+ cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
+ }
+ sa_num++;
+ sa_bmap >>= 1;
+ }
+
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SC,
+ rxsc->hw_sc_id, false);
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID,
+ rxsc->hw_flow_id, false);
+}
+
+static int cn10k_mcs_secy_tx_cfg(struct otx2_nic *pfvf, struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc,
+ struct macsec_tx_sa *sw_tx_sa, u8 sa_num)
+{
+ if (sw_tx_sa) {
+ cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
+ cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+ sw_tx_sa->next_pn_halves.lower);
+ cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc, sa_num,
+ sw_tx_sa->active);
+ }
+
+ cn10k_mcs_write_tx_secy(pfvf, secy, txsc);
+ cn10k_mcs_write_tx_flowid(pfvf, secy, txsc);
+ /* When updating secy, change RX secy also */
+ cn10k_mcs_write_rx_secy(pfvf, secy, txsc->hw_secy_id_rx);
+
+ return 0;
+}
+
+static int cn10k_mcs_secy_rx_cfg(struct otx2_nic *pfvf,
+ struct macsec_secy *secy, u8 hw_secy_id)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_rxsc *mcs_rx_sc;
+ struct macsec_rx_sc *sw_rx_sc;
+ struct macsec_rx_sa *sw_rx_sa;
+ u8 sa_num;
+
+ for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
+ sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
+ mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (unlikely(!mcs_rx_sc))
+ continue;
+
+ for (sa_num = 0; sa_num < CN10K_MCS_SA_PER_SC; sa_num++) {
+ sw_rx_sa = rcu_dereference_bh(sw_rx_sc->sa[sa_num]);
+ if (!sw_rx_sa)
+ continue;
+
+ cn10k_mcs_write_rx_sa_plcy(pfvf, secy, mcs_rx_sc,
+ sa_num, sw_rx_sa->active);
+ cn10k_mcs_write_rx_sa_pn(pfvf, mcs_rx_sc, sa_num,
+ sw_rx_sa->next_pn_halves.lower);
+ }
+
+ cn10k_mcs_write_rx_flowid(pfvf, mcs_rx_sc, hw_secy_id);
+ cn10k_mcs_write_sc_cam(pfvf, mcs_rx_sc, hw_secy_id);
+ }
+
+ return 0;
+}
+
+static int cn10k_mcs_disable_rxscs(struct otx2_nic *pfvf,
+ struct macsec_secy *secy,
+ bool delete)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_rxsc *mcs_rx_sc;
+ struct macsec_rx_sc *sw_rx_sc;
+ int ret;
+
+ for (sw_rx_sc = rcu_dereference_bh(secy->rx_sc); sw_rx_sc && sw_rx_sc->active;
+ sw_rx_sc = rcu_dereference_bh(sw_rx_sc->next)) {
+ mcs_rx_sc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (unlikely(!mcs_rx_sc))
+ continue;
+
+ ret = cn10k_mcs_ena_dis_flowid(pfvf, mcs_rx_sc->hw_flow_id,
+ false, MCS_RX);
+ if (ret)
+ dev_err(pfvf->dev, "Failed to disable TCAM for SC %d\n",
+ mcs_rx_sc->hw_sc_id);
+ if (delete) {
+ cn10k_mcs_delete_rxsc(pfvf, mcs_rx_sc);
+ list_del(&mcs_rx_sc->entry);
+ kfree(mcs_rx_sc);
+ }
+ }
+
+ return 0;
+}
+
+static void cn10k_mcs_sync_stats(struct otx2_nic *pfvf, struct macsec_secy *secy,
+ struct cn10k_mcs_txsc *txsc)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_secy_stats rx_rsp = { 0 };
+ struct mcs_sc_stats sc_rsp = { 0 };
+ struct cn10k_mcs_rxsc *rxsc;
+
+ /* Because of shared counters for some stats in the hardware, when
+ * updating secy policy take a snapshot of current stats and reset them.
+ * Below are the effected stats because of shared counters.
+ */
+
+ /* Check if sync is really needed */
+ if (secy->validate_frames == txsc->last_validate_frames &&
+ secy->protect_frames == txsc->last_protect_frames)
+ return;
+
+ cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
+
+ txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
+ txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
+ txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
+ if (txsc->last_validate_frames == MACSEC_VALIDATE_STRICT)
+ txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
+ else
+ txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
+
+ list_for_each_entry(rxsc, &cfg->rxsc_list, entry) {
+ cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &sc_rsp, MCS_RX, true);
+
+ rxsc->stats.InOctetsValidated += sc_rsp.octet_validate_cnt;
+ rxsc->stats.InOctetsDecrypted += sc_rsp.octet_decrypt_cnt;
+
+ rxsc->stats.InPktsInvalid += sc_rsp.pkt_invalid_cnt;
+ rxsc->stats.InPktsNotValid += sc_rsp.pkt_notvalid_cnt;
+
+ if (txsc->last_protect_frames)
+ rxsc->stats.InPktsLate += sc_rsp.pkt_late_cnt;
+ else
+ rxsc->stats.InPktsDelayed += sc_rsp.pkt_late_cnt;
+
+ if (txsc->last_validate_frames == MACSEC_VALIDATE_CHECK)
+ rxsc->stats.InPktsUnchecked += sc_rsp.pkt_unchecked_cnt;
+ else
+ rxsc->stats.InPktsOK += sc_rsp.pkt_unchecked_cnt;
+ }
+
+ txsc->last_validate_frames = secy->validate_frames;
+ txsc->last_protect_frames = secy->protect_frames;
+}
+
+static int cn10k_mdo_open(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct macsec_tx_sa *sw_tx_sa;
+ struct cn10k_mcs_txsc *txsc;
+ u8 sa_num;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ sa_num = txsc->encoding_sa;
+ sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
+
+ err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
+ if (err)
+ return err;
+
+ return cn10k_mcs_secy_rx_cfg(pfvf, secy, txsc->hw_secy_id_rx);
+}
+
+static int cn10k_mdo_stop(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ err = cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
+ if (err)
+ return err;
+
+ return cn10k_mcs_disable_rxscs(pfvf, ctx->secy, false);
+}
+
+static int cn10k_mdo_add_secy(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct cn10k_mcs_txsc *txsc;
+
+ if (secy->icv_len != MACSEC_DEFAULT_ICV_LEN)
+ return -EOPNOTSUPP;
+
+ /* Stick to 16 bytes key len until XPN support is added */
+ if (secy->key_len != 16)
+ return -EOPNOTSUPP;
+
+ if (secy->xpn)
+ return -EOPNOTSUPP;
+
+ txsc = cn10k_mcs_create_txsc(pfvf);
+ if (IS_ERR(txsc))
+ return -ENOSPC;
+
+ txsc->sw_secy = secy;
+ txsc->encoding_sa = secy->tx_sc.encoding_sa;
+ txsc->last_validate_frames = secy->validate_frames;
+ txsc->last_protect_frames = secy->protect_frames;
+
+ list_add(&txsc->entry, &cfg->txsc_list);
+
+ if (netif_running(secy->netdev))
+ return cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, NULL, 0);
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_secy(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct macsec_tx_sa *sw_tx_sa;
+ struct cn10k_mcs_txsc *txsc;
+ u8 sa_num;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ txsc->encoding_sa = secy->tx_sc.encoding_sa;
+
+ sa_num = txsc->encoding_sa;
+ sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[sa_num]);
+
+ if (netif_running(secy->netdev)) {
+ cn10k_mcs_sync_stats(pfvf, secy, txsc);
+
+ err = cn10k_mcs_secy_tx_cfg(pfvf, secy, txsc, sw_tx_sa, sa_num);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_del_secy(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ cn10k_mcs_ena_dis_flowid(pfvf, txsc->hw_flow_id, false, MCS_TX);
+ cn10k_mcs_disable_rxscs(pfvf, ctx->secy, true);
+ cn10k_mcs_delete_txsc(pfvf, txsc);
+ list_del(&txsc->entry);
+ kfree(txsc);
+
+ return 0;
+}
+
+static int cn10k_mdo_add_txsa(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (cn10k_mcs_alloc_txsa(pfvf, &txsc->hw_sa_id[sa_num]))
+ return -ENOSPC;
+
+ memcpy(&txsc->sa_key[sa_num], ctx->sa.key, secy->key_len);
+ txsc->sa_bmap |= 1 << sa_num;
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_tx_sa_plcy(pfvf, secy, txsc, sa_num);
+ if (err)
+ return err;
+
+ err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+ sw_tx_sa->next_pn_halves.lower);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
+ sa_num, sw_tx_sa->active);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_txsa(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct macsec_tx_sa *sw_tx_sa = ctx->sa.tx_sa;
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (netif_running(secy->netdev)) {
+ /* Keys cannot be changed after creation */
+ err = cn10k_write_tx_sa_pn(pfvf, txsc, sa_num,
+ sw_tx_sa->next_pn_halves.lower);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_link_tx_sa2sc(pfvf, secy, txsc,
+ sa_num, sw_tx_sa->active);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_del_txsa(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_free_txsa(pfvf, txsc->hw_sa_id[sa_num]);
+ txsc->sa_bmap &= ~(1 << sa_num);
+
+ return 0;
+}
+
+static int cn10k_mdo_add_rxsc(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct cn10k_mcs_rxsc *rxsc;
+ struct cn10k_mcs_txsc *txsc;
+ int err;
+
+ txsc = cn10k_mcs_get_txsc(cfg, secy);
+ if (!txsc)
+ return -ENOENT;
+
+ rxsc = cn10k_mcs_create_rxsc(pfvf);
+ if (IS_ERR(rxsc))
+ return -ENOSPC;
+
+ rxsc->sw_secy = ctx->secy;
+ rxsc->sw_rxsc = ctx->rx_sc;
+ list_add(&rxsc->entry, &cfg->rxsc_list);
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_rx_flowid(pfvf, rxsc, txsc->hw_secy_id_rx);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_write_sc_cam(pfvf, rxsc, txsc->hw_secy_id_rx);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_rxsc(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ bool enable = ctx->rx_sc->active;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (netif_running(secy->netdev))
+ return cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id,
+ enable, MCS_RX);
+
+ return 0;
+}
+
+static int cn10k_mdo_del_rxsc(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, ctx->rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ cn10k_mcs_ena_dis_flowid(pfvf, rxsc->hw_flow_id, false, MCS_RX);
+ cn10k_mcs_delete_rxsc(pfvf, rxsc);
+ list_del(&rxsc->entry);
+ kfree(rxsc);
+
+ return 0;
+}
+
+static int cn10k_mdo_add_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
+ u64 next_pn = rx_sa->next_pn_halves.lower;
+ struct macsec_secy *secy = ctx->secy;
+ bool sa_in_use = rx_sa->active;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+ int err;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (cn10k_mcs_alloc_rxsa(pfvf, &rxsc->hw_sa_id[sa_num]))
+ return -ENOSPC;
+
+ memcpy(&rxsc->sa_key[sa_num], ctx->sa.key, ctx->secy->key_len);
+ rxsc->sa_bmap |= 1 << sa_num;
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc,
+ sa_num, sa_in_use);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_upd_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_rx_sa *rx_sa = ctx->sa.rx_sa;
+ u64 next_pn = rx_sa->next_pn_halves.lower;
+ struct macsec_secy *secy = ctx->secy;
+ bool sa_in_use = rx_sa->active;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+ int err;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ if (netif_running(secy->netdev)) {
+ err = cn10k_mcs_write_rx_sa_plcy(pfvf, secy, rxsc, sa_num, sa_in_use);
+ if (err)
+ return err;
+
+ err = cn10k_mcs_write_rx_sa_pn(pfvf, rxsc, sa_num, next_pn);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int cn10k_mdo_del_rxsa(struct macsec_context *ctx)
+{
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_write_rx_sa_plcy(pfvf, ctx->secy, rxsc, sa_num, false);
+ cn10k_mcs_free_rxsa(pfvf, rxsc->hw_sa_id[sa_num]);
+
+ rxsc->sa_bmap &= ~(1 << sa_num);
+
+ return 0;
+}
+
+static int cn10k_mdo_get_dev_stats(struct macsec_context *ctx)
+{
+ struct mcs_secy_stats tx_rsp = { 0 }, rx_rsp = { 0 };
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_tx, &tx_rsp, MCS_TX, false);
+ ctx->stats.dev_stats->OutPktsUntagged = tx_rsp.pkt_untagged_cnt;
+ ctx->stats.dev_stats->OutPktsTooLong = tx_rsp.pkt_toolong_cnt;
+
+ cn10k_mcs_secy_stats(pfvf, txsc->hw_secy_id_rx, &rx_rsp, MCS_RX, true);
+ txsc->stats.InPktsBadTag += rx_rsp.pkt_badtag_cnt;
+ txsc->stats.InPktsUnknownSCI += rx_rsp.pkt_nosa_cnt;
+ txsc->stats.InPktsNoSCI += rx_rsp.pkt_nosaerror_cnt;
+ if (secy->validate_frames == MACSEC_VALIDATE_STRICT)
+ txsc->stats.InPktsNoTag += rx_rsp.pkt_untaged_cnt;
+ else
+ txsc->stats.InPktsUntagged += rx_rsp.pkt_untaged_cnt;
+ txsc->stats.InPktsOverrun = 0;
+
+ ctx->stats.dev_stats->InPktsNoTag = txsc->stats.InPktsNoTag;
+ ctx->stats.dev_stats->InPktsUntagged = txsc->stats.InPktsUntagged;
+ ctx->stats.dev_stats->InPktsBadTag = txsc->stats.InPktsBadTag;
+ ctx->stats.dev_stats->InPktsUnknownSCI = txsc->stats.InPktsUnknownSCI;
+ ctx->stats.dev_stats->InPktsNoSCI = txsc->stats.InPktsNoSCI;
+ ctx->stats.dev_stats->InPktsOverrun = txsc->stats.InPktsOverrun;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_tx_sc_stats(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_sc_stats rsp = { 0 };
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ cn10k_mcs_sc_stats(pfvf, txsc->hw_sc_id, &rsp, MCS_TX, false);
+
+ ctx->stats.tx_sc_stats->OutPktsProtected = rsp.pkt_protected_cnt;
+ ctx->stats.tx_sc_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
+ ctx->stats.tx_sc_stats->OutOctetsProtected = rsp.octet_protected_cnt;
+ ctx->stats.tx_sc_stats->OutOctetsEncrypted = rsp.octet_encrypt_cnt;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_tx_sa_stats(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_sa_stats rsp = { 0 };
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_txsc *txsc;
+
+ txsc = cn10k_mcs_get_txsc(cfg, ctx->secy);
+ if (!txsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_sa_stats(pfvf, txsc->hw_sa_id[sa_num], &rsp, MCS_TX, false);
+
+ ctx->stats.tx_sa_stats->OutPktsProtected = rsp.pkt_protected_cnt;
+ ctx->stats.tx_sa_stats->OutPktsEncrypted = rsp.pkt_encrypt_cnt;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_rx_sc_stats(struct macsec_context *ctx)
+{
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_secy *secy = ctx->secy;
+ struct mcs_sc_stats rsp = { 0 };
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, secy, ctx->rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ cn10k_mcs_sc_stats(pfvf, rxsc->hw_sc_id, &rsp, MCS_RX, true);
+
+ rxsc->stats.InOctetsValidated += rsp.octet_validate_cnt;
+ rxsc->stats.InOctetsDecrypted += rsp.octet_decrypt_cnt;
+
+ rxsc->stats.InPktsInvalid += rsp.pkt_invalid_cnt;
+ rxsc->stats.InPktsNotValid += rsp.pkt_notvalid_cnt;
+
+ if (secy->protect_frames)
+ rxsc->stats.InPktsLate += rsp.pkt_late_cnt;
+ else
+ rxsc->stats.InPktsDelayed += rsp.pkt_late_cnt;
+
+ if (secy->validate_frames == MACSEC_VALIDATE_CHECK)
+ rxsc->stats.InPktsUnchecked += rsp.pkt_unchecked_cnt;
+ else
+ rxsc->stats.InPktsOK += rsp.pkt_unchecked_cnt;
+
+ ctx->stats.rx_sc_stats->InOctetsValidated = rxsc->stats.InOctetsValidated;
+ ctx->stats.rx_sc_stats->InOctetsDecrypted = rxsc->stats.InOctetsDecrypted;
+ ctx->stats.rx_sc_stats->InPktsInvalid = rxsc->stats.InPktsInvalid;
+ ctx->stats.rx_sc_stats->InPktsNotValid = rxsc->stats.InPktsNotValid;
+ ctx->stats.rx_sc_stats->InPktsLate = rxsc->stats.InPktsLate;
+ ctx->stats.rx_sc_stats->InPktsDelayed = rxsc->stats.InPktsDelayed;
+ ctx->stats.rx_sc_stats->InPktsUnchecked = rxsc->stats.InPktsUnchecked;
+ ctx->stats.rx_sc_stats->InPktsOK = rxsc->stats.InPktsOK;
+
+ return 0;
+}
+
+static int cn10k_mdo_get_rx_sa_stats(struct macsec_context *ctx)
+{
+ struct macsec_rx_sc *sw_rx_sc = ctx->sa.rx_sa->sc;
+ struct otx2_nic *pfvf = netdev_priv(ctx->netdev);
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct mcs_sa_stats rsp = { 0 };
+ u8 sa_num = ctx->sa.assoc_num;
+ struct cn10k_mcs_rxsc *rxsc;
+
+ rxsc = cn10k_mcs_get_rxsc(cfg, ctx->secy, sw_rx_sc);
+ if (!rxsc)
+ return -ENOENT;
+
+ if (sa_num >= CN10K_MCS_SA_PER_SC)
+ return -EOPNOTSUPP;
+
+ cn10k_mcs_sa_stats(pfvf, rxsc->hw_sa_id[sa_num], &rsp, MCS_RX, false);
+
+ ctx->stats.rx_sa_stats->InPktsOK = rsp.pkt_ok_cnt;
+ ctx->stats.rx_sa_stats->InPktsInvalid = rsp.pkt_invalid_cnt;
+ ctx->stats.rx_sa_stats->InPktsNotValid = rsp.pkt_notvalid_cnt;
+ ctx->stats.rx_sa_stats->InPktsNotUsingSA = rsp.pkt_nosaerror_cnt;
+ ctx->stats.rx_sa_stats->InPktsUnusedSA = rsp.pkt_nosa_cnt;
+
+ return 0;
+}
+
+static const struct macsec_ops cn10k_mcs_ops = {
+ .mdo_dev_open = cn10k_mdo_open,
+ .mdo_dev_stop = cn10k_mdo_stop,
+ .mdo_add_secy = cn10k_mdo_add_secy,
+ .mdo_upd_secy = cn10k_mdo_upd_secy,
+ .mdo_del_secy = cn10k_mdo_del_secy,
+ .mdo_add_rxsc = cn10k_mdo_add_rxsc,
+ .mdo_upd_rxsc = cn10k_mdo_upd_rxsc,
+ .mdo_del_rxsc = cn10k_mdo_del_rxsc,
+ .mdo_add_rxsa = cn10k_mdo_add_rxsa,
+ .mdo_upd_rxsa = cn10k_mdo_upd_rxsa,
+ .mdo_del_rxsa = cn10k_mdo_del_rxsa,
+ .mdo_add_txsa = cn10k_mdo_add_txsa,
+ .mdo_upd_txsa = cn10k_mdo_upd_txsa,
+ .mdo_del_txsa = cn10k_mdo_del_txsa,
+ .mdo_get_dev_stats = cn10k_mdo_get_dev_stats,
+ .mdo_get_tx_sc_stats = cn10k_mdo_get_tx_sc_stats,
+ .mdo_get_tx_sa_stats = cn10k_mdo_get_tx_sa_stats,
+ .mdo_get_rx_sc_stats = cn10k_mdo_get_rx_sc_stats,
+ .mdo_get_rx_sa_stats = cn10k_mdo_get_rx_sa_stats,
+};
+
+void cn10k_handle_mcs_event(struct otx2_nic *pfvf, struct mcs_intr_info *event)
+{
+ struct cn10k_mcs_cfg *cfg = pfvf->macsec_cfg;
+ struct macsec_tx_sa *sw_tx_sa = NULL;
+ struct macsec_secy *secy = NULL;
+ struct cn10k_mcs_txsc *txsc;
+ u8 an;
+
+ if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
+ return;
+
+ if (!(event->intr_mask & MCS_CPM_TX_PACKET_XPN_EQ0_INT))
+ return;
+
+ /* Find the SecY to which the expired hardware SA is mapped */
+ list_for_each_entry(txsc, &cfg->txsc_list, entry) {
+ for (an = 0; an < CN10K_MCS_SA_PER_SC; an++)
+ if (txsc->hw_sa_id[an] == event->sa_id) {
+ secy = txsc->sw_secy;
+ sw_tx_sa = rcu_dereference_bh(secy->tx_sc.sa[an]);
+ }
+ }
+
+ if (secy && sw_tx_sa)
+ macsec_pn_wrapped(secy, sw_tx_sa);
+}
+
+int cn10k_mcs_init(struct otx2_nic *pfvf)
+{
+ struct mbox *mbox = &pfvf->mbox;
+ struct cn10k_mcs_cfg *cfg;
+ struct mcs_intr_cfg *req;
+
+ if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
+ return 0;
+
+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cfg->txsc_list);
+ INIT_LIST_HEAD(&cfg->rxsc_list);
+ pfvf->macsec_cfg = cfg;
+
+ pfvf->netdev->features |= NETIF_F_HW_MACSEC;
+ pfvf->netdev->macsec_ops = &cn10k_mcs_ops;
+
+ mutex_lock(&mbox->lock);
+
+ req = otx2_mbox_alloc_msg_mcs_intr_cfg(mbox);
+ if (!req)
+ goto fail;
+
+ req->intr_mask = MCS_CPM_TX_PACKET_XPN_EQ0_INT;
+
+ if (otx2_sync_mbox_msg(mbox))
+ goto fail;
+
+ mutex_unlock(&mbox->lock);
+
+ return 0;
+fail:
+ dev_err(pfvf->dev, "Cannot notify PN wrapped event\n");
+ return 0;
+}
+
+void cn10k_mcs_free(struct otx2_nic *pfvf)
+{
+ if (!test_bit(CN10K_HW_MACSEC, &pfvf->hw.cap_flag))
+ return;
+
+ cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_SECY, 0, true);
+ cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_SECY, 0, true);
+ kfree(pfvf->macsec_cfg);
+ pfvf->macsec_cfg = NULL;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index d686c7b6252f..9ac9e6615ae7 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -586,8 +586,9 @@ void otx2_get_mac_from_af(struct net_device *netdev)
}
EXPORT_SYMBOL(otx2_get_mac_from_af);
-int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
+int otx2_txschq_config(struct otx2_nic *pfvf, int lvl, int prio, bool txschq_for_pfc)
{
+ u16 (*schq_list)[MAX_TXSCHQ_PER_FUNC];
struct otx2_hw *hw = &pfvf->hw;
struct nix_txschq_config *req;
u64 schq, parent;
@@ -602,7 +603,13 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->lvl = lvl;
req->num_regs = 1;
- schq = hw->txschq_list[lvl][0];
+ schq_list = hw->txschq_list;
+#ifdef CONFIG_DCB
+ if (txschq_for_pfc)
+ schq_list = pfvf->pfc_schq_list;
+#endif
+
+ schq = schq_list[lvl][prio];
/* Set topology e.t.c configuration */
if (lvl == NIX_TXSCH_LVL_SMQ) {
req->reg[0] = NIX_AF_SMQX_CFG(schq);
@@ -611,7 +618,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
(0x2ULL << 36);
req->num_regs++;
/* MDQ config */
- parent = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
+ parent = schq_list[NIX_TXSCH_LVL_TL4][prio];
req->reg[1] = NIX_AF_MDQX_PARENT(schq);
req->regval[1] = parent << 16;
req->num_regs++;
@@ -619,14 +626,14 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq);
req->regval[2] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL4) {
- parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0];
+ parent = schq_list[NIX_TXSCH_LVL_TL3][prio];
req->reg[0] = NIX_AF_TL4X_PARENT(schq);
req->regval[0] = parent << 16;
req->num_regs++;
req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq);
req->regval[1] = dwrr_val;
} else if (lvl == NIX_TXSCH_LVL_TL3) {
- parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0];
+ parent = schq_list[NIX_TXSCH_LVL_TL2][prio];
req->reg[0] = NIX_AF_TL3X_PARENT(schq);
req->regval[0] = parent << 16;
req->num_regs++;
@@ -635,11 +642,13 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
if (lvl == hw->txschq_link_cfg_lvl) {
req->num_regs++;
req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
- /* Enable this queue and backpressure */
- req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
+ /* Enable this queue and backpressure
+ * and set relative channel
+ */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio;
}
} else if (lvl == NIX_TXSCH_LVL_TL2) {
- parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0];
+ parent = schq_list[NIX_TXSCH_LVL_TL1][prio];
req->reg[0] = NIX_AF_TL2X_PARENT(schq);
req->regval[0] = parent << 16;
@@ -650,8 +659,10 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
if (lvl == hw->txschq_link_cfg_lvl) {
req->num_regs++;
req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link);
- /* Enable this queue and backpressure */
- req->regval[2] = BIT_ULL(13) | BIT_ULL(12);
+ /* Enable this queue and backpressure
+ * and set relative channel
+ */
+ req->regval[2] = BIT_ULL(13) | BIT_ULL(12) | prio;
}
} else if (lvl == NIX_TXSCH_LVL_TL1) {
/* Default config for TL1.
@@ -676,6 +687,31 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
return otx2_sync_mbox_msg(&pfvf->mbox);
}
+EXPORT_SYMBOL(otx2_txschq_config);
+
+int otx2_smq_flush(struct otx2_nic *pfvf, int smq)
+{
+ struct nix_txschq_config *req;
+ int rc;
+
+ mutex_lock(&pfvf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->lvl = NIX_TXSCH_LVL_SMQ;
+ req->reg[0] = NIX_AF_SMQX_CFG(smq);
+ req->regval[0] |= BIT_ULL(49);
+ req->num_regs++;
+
+ rc = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+ return rc;
+}
+EXPORT_SYMBOL(otx2_smq_flush);
int otx2_txsch_alloc(struct otx2_nic *pfvf)
{
@@ -806,8 +842,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
aq->sq.cq_ena = 1;
aq->sq.ena = 1;
- /* Only one SMQ is allocated, map all SQ's to that SMQ */
- aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
+ aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
@@ -1792,4 +1827,5 @@ otx2_mbox_up_handler_ ## _fn_name(struct otx2_nic *pfvf, \
} \
EXPORT_SYMBOL(otx2_mbox_up_handler_ ## _fn_name);
MBOX_UP_CGX_MESSAGES
+MBOX_UP_MCS_MESSAGES
#undef M
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index b28029cc4316..282db6fe3b08 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -19,6 +19,7 @@
#include
#include
#include
+#include
#include