mirror of
https://github.com/torvalds/linux.git
synced 2024-11-24 05:02:12 +00:00
packet: rework packet_pick_tx_queue() to use common code selection
Currently packet_pick_tx_queue() is the only caller of ndo_select_queue() using a fallback argument other than netdev_pick_tx. Leveraging rx queue, we can obtain a similar queue selection behavior using core helpers. After this change, ndo_select_queue() is always invoked with netdev_pick_tx() as fallback. We can change ndo_select_queue() signature in a followup patch, dropping an indirect call per transmitted packet in some scenarios (e.g. TCP syn and XDP generic xmit) This changes slightly how af packet queue selection happens when PACKET_QDISC_BYPASS is set. It's now more similar to plan dev_queue_xmit() tacking in account both XPS and TC mapping. v1 -> v2: - rebased after helper name change RFC -> v1: - initialize sender_cpu to the expected value Signed-off-by: Paolo Abeni <pabeni@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
4bd97d51a5
commit
b71b5837f8
@ -2152,6 +2152,8 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
|
||||
&qdisc_xmit_lock_key); \
|
||||
}
|
||||
|
||||
u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev);
|
||||
struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
struct net_device *sb_dev);
|
||||
|
@ -3704,8 +3704,8 @@ u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
|
||||
}
|
||||
EXPORT_SYMBOL(dev_pick_tx_cpu_id);
|
||||
|
||||
static u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev)
|
||||
u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
struct sock *sk = skb->sk;
|
||||
int queue_index = sk_tx_queue_get(sk);
|
||||
@ -3729,6 +3729,7 @@ static u16 netdev_pick_tx(struct net_device *dev, struct sk_buff *skb,
|
||||
|
||||
return queue_index;
|
||||
}
|
||||
EXPORT_SYMBOL(netdev_pick_tx);
|
||||
|
||||
struct netdev_queue *netdev_core_pick_tx(struct net_device *dev,
|
||||
struct sk_buff *skb,
|
||||
|
@ -275,24 +275,23 @@ static bool packet_use_direct_xmit(const struct packet_sock *po)
|
||||
return po->xmit == packet_direct_xmit;
|
||||
}
|
||||
|
||||
static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev)
|
||||
{
|
||||
return dev_pick_tx_cpu_id(dev, skb, sb_dev, NULL);
|
||||
}
|
||||
|
||||
static u16 packet_pick_tx_queue(struct sk_buff *skb)
|
||||
{
|
||||
struct net_device *dev = skb->dev;
|
||||
const struct net_device_ops *ops = dev->netdev_ops;
|
||||
int cpu = raw_smp_processor_id();
|
||||
u16 queue_index;
|
||||
|
||||
#ifdef CONFIG_XPS
|
||||
skb->sender_cpu = cpu + 1;
|
||||
#endif
|
||||
skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
|
||||
if (ops->ndo_select_queue) {
|
||||
queue_index = ops->ndo_select_queue(dev, skb, NULL,
|
||||
__packet_pick_tx_queue);
|
||||
netdev_pick_tx);
|
||||
queue_index = netdev_cap_txqueue(dev, queue_index);
|
||||
} else {
|
||||
queue_index = __packet_pick_tx_queue(dev, skb, NULL);
|
||||
queue_index = netdev_pick_tx(dev, skb, NULL);
|
||||
}
|
||||
|
||||
return queue_index;
|
||||
|
Loading…
Reference in New Issue
Block a user