mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 05:02:12 +00:00
net: dev: rename queue selection helpers.
With the following patches, we are going to use __netdev_pick_tx() in many modules. Rename it to netdev_pick_tx(), to make it clear is a public API. Also rename the existing netdev_pick_tx() to netdev_core_pick_tx(), to avoid name clashes. Suggested-by: Eric Dumazet <edumazet@google.com> Suggested-by: David Miller <davem@davemloft.net> Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
0b963ef20c
commit
4bd97d51a5
@ -2152,9 +2152,9 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
|
||||
&qdisc_xmit_lock_key); \
|
||||
}
|
||||
|
||||
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
struct net_device *sb_dev);
|
||||
struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
struct net_device *sb_dev);
|
||||
|
||||
/* returns the headroom that the master device needs to take in account
|
||||
* when forwarding to this dev
|
||||
|
@ -3704,8 +3704,8 @@ u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
|
||||
}
|
||||
EXPORT_SYMBOL(dev_pick_tx_cpu_id);
|
||||
|
||||
static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev)
|
||||
static u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct sock *sk = skb->sk;
|
||||
int queue_index = sk_tx_queue_get(sk);
|
||||
@ -3730,9 +3730,9 @@ static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
|
||||
return queue_index;
|
||||
}
|
||||
|
||||
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
struct net_device *sb_dev)
|
||||
struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
int queue_index = 0;
|
||||
|
||||
@ -3748,9 +3748,9 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
|
||||
|
||||
if (ops->ndo_select_queue)
|
||||
queue_index = ops->ndo_select_queue(dev, skb, sb_dev,
|
||||
__netdev_pick_tx);
|
||||
netdev_pick_tx);
|
||||
else
|
||||
queue_index = __netdev_pick_tx(dev, skb, sb_dev);
|
||||
queue_index = netdev_pick_tx(dev, skb, sb_dev);
|
||||
|
||||
queue_index = netdev_cap_txqueue(dev, queue_index);
|
||||
}
|
||||
@ -3824,7 +3824,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
|
||||
else
|
||||
skb_dst_force(skb);
|
||||
|
||||
txq = netdev_pick_tx(dev, skb, sb_dev);
|
||||
txq = netdev_core_pick_tx(dev, skb, sb_dev);
|
||||
q = rcu_dereference_bh(txq->qdisc);
|
||||
|
||||
trace_net_dev_queue(skb);
|
||||
@ -4429,7 +4429,7 @@ void generic_xdp_tx(struct sk_buff *skb, struct bpf_prog *xdp_prog)
|
||||
bool free_skb = true;
|
||||
int cpu, rc;
|
||||
|
||||
txq = netdev_pick_tx(dev, skb, NULL);
|
||||
txq = netdev_core_pick_tx(dev, skb, NULL);
|
||||
cpu = smp_processor_id();
|
||||
HARD_TX_LOCK(dev, txq, cpu);
|
||||
if (!netif_xmit_stopped(txq)) {
|
||||
|
@ -323,7 +323,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
|
||||
if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) {
|
||||
struct netdev_queue *txq;
|
||||
|
||||
txq = netdev_pick_tx(dev, skb, NULL);
|
||||
txq = netdev_core_pick_tx(dev, skb, NULL);
|
||||
|
||||
/* try until next clock tick */
|
||||
for (tries = jiffies_to_usecs(1)/USEC_PER_POLL;
|
||||
|
@ -247,7 +247,7 @@ void xfrm_dev_resume(struct sk_buff *skb)
|
||||
unsigned long flags;
|
||||
|
||||
rcu_read_lock();
|
||||
txq = netdev_pick_tx(dev, skb, NULL);
|
||||
txq = netdev_core_pick_tx(dev, skb, NULL);
|
||||
|
||||
HARD_TX_LOCK(dev, txq, smp_processor_id());
|
||||
if (!netif_xmit_frozen_or_stopped(txq))
|
||||
|
Loading…
Reference in New Issue
Block a user