net: Add lockdep asserts to ____napi_schedule().
____napi_schedule() needs to be invoked with disabled interrupts due to __raise_softirq_irqoff (in order not to corrupt the per-CPU list). ____napi_schedule() needs also to be invoked from an interrupt context so that the raised-softirq is processed while the interrupt context is left. Add lockdep asserts for both conditions. While this is the second time the irq/softirq check is needed, provide a generic lockdep_assert_softirq_will_run() which is used by both caller. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
d96657dc92
commit
fbd9a2ceba
@@ -4265,6 +4265,9 @@ static inline void ____napi_schedule(struct softnet_data *sd,
|
||||
{
|
||||
struct task_struct *thread;
|
||||
|
||||
lockdep_assert_softirq_will_run();
|
||||
lockdep_assert_irqs_disabled();
|
||||
|
||||
if (test_bit(NAPI_STATE_THREADED, &napi->state)) {
|
||||
/* Paired with smp_mb__before_atomic() in
|
||||
* napi_enable()/dev_set_threaded().
|
||||
@@ -4872,7 +4875,7 @@ int __netif_rx(struct sk_buff *skb)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lockdep_assert_once(hardirq_count() | softirq_count());
|
||||
lockdep_assert_softirq_will_run();
|
||||
|
||||
trace_netif_rx_entry(skb);
|
||||
ret = netif_rx_internal(skb);
|
||||
|
||||
Reference in New Issue
Block a user