forked from Minki/linux
RDMA/ipoib: drop skb on path record lookup failure
In unicast_arp_send function there is an inconsistency in error handling of path_rec_start call. If path_rec_start is called because of an absent ah field, skb will be dropped. But if it is called on a creation of a new path, or if the path is invalid, skb will be added to the tail of path queue. In case of a new path it will be dropped on path_free, but in case of invalid path it can stay in the queue forever. This patch unifies the behavior, dropping skb in all cases of path_rec_start failure. Signed-off-by: Evgenii Smirnov <evgenii.smirnov@profitbricks.com> Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
parent
fa9391dbad
commit
15517080c5
@ -1054,62 +1054,42 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
|
|||||||
|
|
||||||
path = __path_find(dev, phdr->hwaddr + 4);
|
path = __path_find(dev, phdr->hwaddr + 4);
|
||||||
if (!path || !path->ah || !path->ah->valid) {
|
if (!path || !path->ah || !path->ah->valid) {
|
||||||
int new_path = 0;
|
|
||||||
|
|
||||||
if (!path) {
|
if (!path) {
|
||||||
path = path_rec_create(dev, phdr->hwaddr + 4);
|
path = path_rec_create(dev, phdr->hwaddr + 4);
|
||||||
new_path = 1;
|
if (!path)
|
||||||
}
|
goto drop_and_unlock;
|
||||||
if (path) {
|
__path_add(dev, path);
|
||||||
if (!new_path)
|
} else {
|
||||||
/* make sure there is no changes in the existing path record */
|
/*
|
||||||
|
* make sure there are no changes in the existing
|
||||||
|
* path record
|
||||||
|
*/
|
||||||
init_path_rec(priv, path, phdr->hwaddr + 4);
|
init_path_rec(priv, path, phdr->hwaddr + 4);
|
||||||
|
}
|
||||||
|
if (!path->query && path_rec_start(dev, path)) {
|
||||||
|
goto drop_and_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
|
if (skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
|
||||||
push_pseudo_header(skb, phdr->hwaddr);
|
push_pseudo_header(skb, phdr->hwaddr);
|
||||||
__skb_queue_tail(&path->queue, skb);
|
__skb_queue_tail(&path->queue, skb);
|
||||||
} else {
|
goto unlock;
|
||||||
++dev->stats.tx_dropped;
|
|
||||||
dev_kfree_skb_any(skb);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!path->query && path_rec_start(dev, path)) {
|
|
||||||
spin_unlock_irqrestore(&priv->lock, flags);
|
|
||||||
if (new_path)
|
|
||||||
path_free(dev, path);
|
|
||||||
return;
|
|
||||||
} else
|
|
||||||
__path_add(dev, path);
|
|
||||||
} else {
|
} else {
|
||||||
goto drop_and_unlock;
|
goto drop_and_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock_irqrestore(&priv->lock, flags);
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (path->ah && path->ah->valid) {
|
spin_unlock_irqrestore(&priv->lock, flags);
|
||||||
ipoib_dbg(priv, "Send unicast ARP to %08x\n",
|
ipoib_dbg(priv, "Send unicast ARP to %08x\n",
|
||||||
be32_to_cpu(sa_path_get_dlid(&path->pathrec)));
|
be32_to_cpu(sa_path_get_dlid(&path->pathrec)));
|
||||||
|
|
||||||
spin_unlock_irqrestore(&priv->lock, flags);
|
|
||||||
path->ah->last_send = rn->send(dev, skb, path->ah->ah,
|
path->ah->last_send = rn->send(dev, skb, path->ah->ah,
|
||||||
IPOIB_QPN(phdr->hwaddr));
|
IPOIB_QPN(phdr->hwaddr));
|
||||||
return;
|
return;
|
||||||
} else if ((path->query || !path_rec_start(dev, path)) &&
|
|
||||||
skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
|
|
||||||
push_pseudo_header(skb, phdr->hwaddr);
|
|
||||||
__skb_queue_tail(&path->queue, skb);
|
|
||||||
} else {
|
|
||||||
goto drop_and_unlock;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&priv->lock, flags);
|
|
||||||
return;
|
|
||||||
|
|
||||||
drop_and_unlock:
|
drop_and_unlock:
|
||||||
++dev->stats.tx_dropped;
|
++dev->stats.tx_dropped;
|
||||||
dev_kfree_skb_any(skb);
|
dev_kfree_skb_any(skb);
|
||||||
|
unlock:
|
||||||
spin_unlock_irqrestore(&priv->lock, flags);
|
spin_unlock_irqrestore(&priv->lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user