net: mvpp2: add FCA RXQ non occupied descriptor threshold

The firmware needs to monitor the RX Non-occupied descriptor
bits for flow control to move to XOFF mode.
These bits need to be unmasked to be functional, but they will
not raise interrupts as we leave the RX exception summary
bit in MVPP2_ISR_RX_TX_MASK_REG clear.

Signed-off-by: Stefan Chulski <stefanc@marvell.com>
Acked-by: Marcin Wojtas <mw@semihalf.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Stefan Chulski 2021-02-11 12:48:55 +02:00 committed by David S. Miller
parent 2788d8418a
commit bf270fa3c4
2 changed files with 40 additions and 7 deletions

View File

@ -295,6 +295,8 @@
#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000
#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31)
#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0
#define MVPP2_ISR_RX_ERR_CAUSE_REG(port) (0x5520 + 4 * (port))
#define MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK 0x00ff
/* Buffer Manager registers */
#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4))
@ -763,6 +765,7 @@
/* MSS Flow control */
#define FC_QUANTA 0xFFFF
#define FC_CLK_DIVIDER 100
#define MSS_THRESHOLD_STOP 768
/* RX buffer constants */
#define MVPP2_SKB_SHINFO_SIZE \

View File

@ -1133,14 +1133,19 @@ static inline void mvpp2_qvec_interrupt_disable(struct mvpp2_queue_vector *qvec)
static void mvpp2_interrupts_mask(void *arg)
{
struct mvpp2_port *port = arg;
int cpu = smp_processor_id();
u32 thread;
/* If the thread isn't used, don't do anything */
if (smp_processor_id() > port->priv->nthreads)
if (cpu > port->priv->nthreads)
return;
mvpp2_thread_write(port->priv,
mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
thread = mvpp2_cpu_to_thread(port->priv, cpu);
mvpp2_thread_write(port->priv, thread,
MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
mvpp2_thread_write(port->priv, thread,
MVPP2_ISR_RX_ERR_CAUSE_REG(port->id), 0);
}
/* Unmask the current thread's Rx/Tx interrupts.
@ -1150,20 +1155,25 @@ static void mvpp2_interrupts_mask(void *arg)
static void mvpp2_interrupts_unmask(void *arg)
{
struct mvpp2_port *port = arg;
u32 val;
int cpu = smp_processor_id();
u32 val, thread;
/* If the thread isn't used, don't do anything */
if (smp_processor_id() > port->priv->nthreads)
if (cpu > port->priv->nthreads)
return;
thread = mvpp2_cpu_to_thread(port->priv, cpu);
val = MVPP2_CAUSE_MISC_SUM_MASK |
MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
if (port->has_tx_irqs)
val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
mvpp2_thread_write(port->priv,
mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
mvpp2_thread_write(port->priv, thread,
MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
mvpp2_thread_write(port->priv, thread,
MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
}
static void
@ -1188,6 +1198,9 @@ mvpp2_shared_interrupt_mask_unmask(struct mvpp2_port *port, bool mask)
mvpp2_thread_write(port->priv, v->sw_thread_id,
MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
mvpp2_thread_write(port->priv, v->sw_thread_id,
MVPP2_ISR_RX_ERR_CAUSE_REG(port->id),
MVPP2_ISR_RX_ERR_CAUSE_NONOCC_MASK);
}
}
@ -2393,6 +2406,20 @@ static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
}
}
/* Set the number of non-occupied descriptors threshold */
static void mvpp2_set_rxq_free_tresh(struct mvpp2_port *port,
struct mvpp2_rx_queue *rxq)
{
u32 val;
mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
val = mvpp2_read(port->priv, MVPP2_RXQ_THRESH_REG);
val &= ~MVPP2_RXQ_NON_OCCUPIED_MASK;
val |= MSS_THRESHOLD_STOP << MVPP2_RXQ_NON_OCCUPIED_OFFSET;
mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
}
/* Set the number of packets that will be received before Rx interrupt
* will be generated by HW.
*/
@ -2656,6 +2683,9 @@ static int mvpp2_rxq_init(struct mvpp2_port *port,
mvpp2_rx_pkts_coal_set(port, rxq);
mvpp2_rx_time_coal_set(port, rxq);
/* Set the number of non occupied descriptors threshold */
mvpp2_set_rxq_free_tresh(port, rxq);
/* Add number of descriptors ready for receiving packets */
mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);