net: more spelling fixes
Various spelling fixes in networking stack Signed-off-by: Stephen Hemminger <stephen@networkplumber.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
committed by
David S. Miller
parent
22a9321614
commit
8e3bff96af
@@ -373,7 +373,7 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh)
|
|||||||
|
|
||||||
p = br_port_get_rtnl(dev);
|
p = br_port_get_rtnl(dev);
|
||||||
/* We want to accept dev as bridge itself if the AF_SPEC
|
/* We want to accept dev as bridge itself if the AF_SPEC
|
||||||
* is set to see if someone is setting vlan info on the brigde
|
* is set to see if someone is setting vlan info on the bridge
|
||||||
*/
|
*/
|
||||||
if (!p && !afspec)
|
if (!p && !afspec)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
@@ -389,7 +389,7 @@ int br_setlink(struct net_device *dev, struct nlmsghdr *nlh)
|
|||||||
err = br_setport(p, tb);
|
err = br_setport(p, tb);
|
||||||
spin_unlock_bh(&p->br->lock);
|
spin_unlock_bh(&p->br->lock);
|
||||||
} else {
|
} else {
|
||||||
/* Binary compatability with old RSTP */
|
/* Binary compatibility with old RSTP */
|
||||||
if (nla_len(protinfo) < sizeof(u8))
|
if (nla_len(protinfo) < sizeof(u8))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
|||||||
@@ -676,8 +676,8 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
|
|||||||
while ((mask | (mask >> 1)) != mask)
|
while ((mask | (mask >> 1)) != mask)
|
||||||
mask |= (mask >> 1);
|
mask |= (mask >> 1);
|
||||||
/* On 64 bit arches, must check mask fits in table->mask (u32),
|
/* On 64 bit arches, must check mask fits in table->mask (u32),
|
||||||
* and on 32bit arches, must check RPS_DEV_FLOW_TABLE_SIZE(mask + 1)
|
* and on 32bit arches, must check
|
||||||
* doesnt overflow.
|
* RPS_DEV_FLOW_TABLE_SIZE(mask + 1) doesn't overflow.
|
||||||
*/
|
*/
|
||||||
#if BITS_PER_LONG > 32
|
#if BITS_PER_LONG > 32
|
||||||
if (mask > (unsigned long)(u32)mask)
|
if (mask > (unsigned long)(u32)mask)
|
||||||
|
|||||||
@@ -30,7 +30,7 @@
|
|||||||
#define PRIOMAP_MIN_SZ 128
|
#define PRIOMAP_MIN_SZ 128
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Extend @dev->priomap so that it's large enough to accomodate
|
* Extend @dev->priomap so that it's large enough to accommodate
|
||||||
* @target_idx. @dev->priomap.priomap_len > @target_idx after successful
|
* @target_idx. @dev->priomap.priomap_len > @target_idx after successful
|
||||||
* return. Must be called under rtnl lock.
|
* return. Must be called under rtnl lock.
|
||||||
*/
|
*/
|
||||||
|
|||||||
@@ -1051,7 +1051,7 @@ e_inval:
|
|||||||
*
|
*
|
||||||
* To support IP_CMSG_PKTINFO option, we store rt_iif and specific
|
* To support IP_CMSG_PKTINFO option, we store rt_iif and specific
|
||||||
* destination in skb->cb[] before dst drop.
|
* destination in skb->cb[] before dst drop.
|
||||||
* This way, receiver doesnt make cache line misses to read rtable.
|
* This way, receiver doesn't make cache line misses to read rtable.
|
||||||
*/
|
*/
|
||||||
void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
|
void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -408,7 +408,7 @@ struct tcp_out_options {
|
|||||||
* Beware: Something in the Internet is very sensitive to the ordering of
|
* Beware: Something in the Internet is very sensitive to the ordering of
|
||||||
* TCP options, we learned this through the hard way, so be careful here.
|
* TCP options, we learned this through the hard way, so be careful here.
|
||||||
* Luckily we can at least blame others for their non-compliance but from
|
* Luckily we can at least blame others for their non-compliance but from
|
||||||
* inter-operatibility perspective it seems that we're somewhat stuck with
|
* inter-operability perspective it seems that we're somewhat stuck with
|
||||||
* the ordering which we have been using if we want to keep working with
|
* the ordering which we have been using if we want to keep working with
|
||||||
* those broken things (not that it currently hurts anybody as there isn't
|
* those broken things (not that it currently hurts anybody as there isn't
|
||||||
* particular reason why the ordering would need to be changed).
|
* particular reason why the ordering would need to be changed).
|
||||||
@@ -681,7 +681,7 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
|
|||||||
*
|
*
|
||||||
* Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
|
* Its important tcp_wfree() can be replaced by sock_wfree() in the event skb
|
||||||
* needs to be reallocated in a driver.
|
* needs to be reallocated in a driver.
|
||||||
* The invariant being skb->truesize substracted from sk->sk_wmem_alloc
|
* The invariant being skb->truesize subtracted from sk->sk_wmem_alloc
|
||||||
*
|
*
|
||||||
* Since transmit from skb destructor is forbidden, we use a tasklet
|
* Since transmit from skb destructor is forbidden, we use a tasklet
|
||||||
* to process all sockets that eventually need to send more skbs.
|
* to process all sockets that eventually need to send more skbs.
|
||||||
@@ -701,9 +701,9 @@ static void tcp_tsq_handler(struct sock *sk)
|
|||||||
tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC);
|
tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC);
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* One tasklest per cpu tries to send more skbs.
|
* One tasklet per cpu tries to send more skbs.
|
||||||
* We run in tasklet context but need to disable irqs when
|
* We run in tasklet context but need to disable irqs when
|
||||||
* transfering tsq->head because tcp_wfree() might
|
* transferring tsq->head because tcp_wfree() might
|
||||||
* interrupt us (non NAPI drivers)
|
* interrupt us (non NAPI drivers)
|
||||||
*/
|
*/
|
||||||
static void tcp_tasklet_func(unsigned long data)
|
static void tcp_tasklet_func(unsigned long data)
|
||||||
@@ -797,7 +797,7 @@ void __init tcp_tasklet_init(void)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Write buffer destructor automatically called from kfree_skb.
|
* Write buffer destructor automatically called from kfree_skb.
|
||||||
* We cant xmit new skbs from this context, as we might already
|
* We can't xmit new skbs from this context, as we might already
|
||||||
* hold qdisc lock.
|
* hold qdisc lock.
|
||||||
*/
|
*/
|
||||||
void tcp_wfree(struct sk_buff *skb)
|
void tcp_wfree(struct sk_buff *skb)
|
||||||
|
|||||||
Reference in New Issue
Block a user